hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90c2c9e300270ded316092b4ba6774c02aa8520b
| 12,912
|
py
|
Python
|
architectures/bug_arch_really_acc_final.py
|
SestoAle/Sesto_PPO
|
eda2febfcce906b8cf1742e25c06b4dfb180e9bf
|
[
"MIT"
] | null | null | null |
architectures/bug_arch_really_acc_final.py
|
SestoAle/Sesto_PPO
|
eda2febfcce906b8cf1742e25c06b4dfb180e9bf
|
[
"MIT"
] | null | null | null |
architectures/bug_arch_really_acc_final.py
|
SestoAle/Sesto_PPO
|
eda2febfcce906b8cf1742e25c06b4dfb180e9bf
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from layers.layers import *
def input_spec():
input_length = 9292
global_state = tf.compat.v1.placeholder(tf.float32, [None, input_length], name='state')
return [global_state]
def obs_to_state(obs):
global_batch = np.stack([np.asarray(state['global_in']) for state in obs])
return [global_batch]
def network_spec(states):
input_length = 71
with_circular = False
global_state = states[0]
# agent, goal, rays, obs = tf.split(global_state, [4, 3, 12, 21], axis=1)
# Jump
agent_plane_x, agent_plane_z, agent_jump, is_grounded, can_double_jump, target_distances, goal, threedgrid, rotation, rays, \
inventory, goal_weight = \
tf.split(global_state, [1, 1, 1, 1, 1, 3, 2, 9261, 4, 12, 2, 3], axis=1)
agent_plane_x = ((agent_plane_x + 1) / 2) * 500
agent_plane_x = tf.cast(agent_plane_x, tf.int32)
agent_plane_z = ((agent_plane_z + 1) / 2) * 500
agent_plane_z = tf.cast(agent_plane_z, tf.int32)
agent_jump = ((agent_jump + 1) / 2) * 60
agent_jump = tf.cast(agent_jump, tf.int32)
agent = tf.concat([agent_plane_x, agent_plane_z, agent_jump], axis=1)
agent = embedding(agent, indices=501, size=32, name='agent_embs')
agent = tf.reshape(agent, (-1, 3 * 32))
# agent = tf.concat([agent, is_grounded, can_double_jump], axis=1)
agent = linear(agent, 1024, name='global_embs', activation=tf.nn.relu)
is_grounded = linear(is_grounded, 1024, name='grounded_embs', activation=tf.nn.relu)
can_double_jump = linear(can_double_jump, 1024, name='double_embs', activation=tf.nn.relu)
agent = tf.concat([agent, is_grounded, can_double_jump], axis=1)
goal_weight = linear(goal_weight, 1024, name='goal_embs', activation=tf.nn.relu)
threedgrid = tf.cast(tf.reshape(threedgrid, [-1, 21, 21, 21]), tf.int32)
# threedgrid = tf.reshape(threedgrid, [-1, 15, 15, 15, 1])
threedgrid = embedding(threedgrid, indices=4, size=32, name='global_embs')
threedgrid = conv_layer_3d(threedgrid, 32, [3, 3, 3], strides=(2, 2, 2), name='conv_01', activation=tf.nn.relu)
#threedgrid = tf.nn.max_pool3d(threedgrid, [2, 2, 2], strides=(2, 2, 2), padding="VALID")
threedgrid = conv_layer_3d(threedgrid, 32, [3, 3, 3], strides=(2, 2, 2), name='conv_02', activation=tf.nn.relu)
#threedgrid = tf.nn.max_pool3d(threedgrid, [2, 2, 2], strides=(2, 2, 2), padding="VALID")
threedgrid = conv_layer_3d(threedgrid, 64, [3, 3, 3], strides=(2, 2, 2), name='conv_03', activation=tf.nn.relu)
threedgrid = conv_layer_3d(threedgrid, 64, [3, 3, 3], strides=(2, 2, 2), name='conv_04', activation=tf.nn.relu)
threedgrid = tf.reshape(threedgrid, [-1, 2 * 2 * 2 * 64])
# threedgrid = linear(threedgrid, 1024, name='three_embs', activation=tf.nn.tanh)
# target_distances = linear(target_distances, 1024, name='target_distances_emb', activation=tf.nn.tanh)
# inventory = linear(inventory, 1024, name='inventory_embs', activation=tf.nn.tanh)
global_state = tf.concat([agent, threedgrid, goal_weight], axis=1)
global_state = linear(global_state, 1024, name='embs', activation=tf.nn.relu)
return global_state
def obs_to_state_rnd(obs):
global_batch = np.stack([state['global_in'] for state in obs])
return [global_batch]
def network_spec_rnd_predictor(states):
with_circular = False
global_state = states[0]
# agent, goal, rays, obs = tf.split(global_state, [4, 3, 12, 21], axis=1)
# Jump
agent_plane_x, agent_plane_z, agent_jump, is_grounded, can_double_jump, target_distances, goal, threedgrid, rotation, rays, \
inventory, goal_weight = \
tf.split(global_state, [1, 1, 1, 1, 1, 3, 2, 9261, 4, 12, 2, 3], axis=1)
agent_plane_x = ((agent_plane_x + 1) / 2) * 500
agent_plane_x = tf.cast(agent_plane_x, tf.int32)
agent_plane_z = ((agent_plane_z + 1) / 2) * 500
agent_plane_z = tf.cast(agent_plane_z, tf.int32)
agent_jump = ((agent_jump + 1) / 2) * 60
agent_jump = tf.cast(agent_jump, tf.int32)
agent = tf.concat([agent_plane_x, agent_plane_z, agent_jump], axis=1)
global_state = agent
global_state = embedding(global_state, indices=501, size=32, name='embs')
global_state = tf.reshape(global_state, (-1, 3 * 32))
#global_state = linear(global_state, 1024, name='global_embs', activation=tf.nn.leaky_relu)
global_state = linear(global_state, 1024, name='latent_1', activation=tf.nn.leaky_relu,
)
global_state = linear(global_state, 512, name='latent_2', activation=tf.nn.leaky_relu,
)
global_state = linear(global_state, 128, name='latent_3', activation=tf.nn.relu,
)
global_state = linear(global_state, 128, name='latent_4', activation=tf.nn.relu,
)
global_state = linear(global_state, 64, name='out',
)
return global_state
def network_spec_rnd_target(states):
with_circular = False
global_state = states[0]
# agent, goal, rays, obs = tf.split(global_state, [4, 3, 12, 21], axis=1)
# Jump
agent_plane_x, agent_plane_z, agent_jump, is_grounded, can_double_jump, target_distances, goal, threedgrid, rotation, rays, \
inventory, goal_weight = \
tf.split(global_state, [1, 1, 1, 1, 1, 3, 2, 9261, 4, 12, 2, 3], axis=1)
agent_plane_x = ((agent_plane_x + 1) / 2) * 500
agent_plane_x = tf.cast(agent_plane_x, tf.int32)
agent_plane_z = ((agent_plane_z + 1) / 2) * 500
agent_plane_z = tf.cast(agent_plane_z, tf.int32)
agent_jump = ((agent_jump + 1) / 2) * 60
agent_jump = tf.cast(agent_jump, tf.int32)
agent = tf.concat([agent_plane_x, agent_plane_z, agent_jump], axis=1)
global_state = agent
global_state = embedding(global_state, indices=501, size=32, name='embs')
global_state = tf.reshape(global_state, (-1, 3 * 32))
#global_state = linear(global_state, 1024, name='global_embs', activation=tf.nn.leaky_relu)
global_state = linear(global_state, 1024, name='latent_1', activation=tf.nn.leaky_relu,
)
global_state = linear(global_state, 512, name='latent_2', activation=tf.nn.leaky_relu,
)
global_state = linear(global_state, 64, name='out',
)
return global_state
def input_spec_irl():
input_length = 9289
global_state = tf.compat.v1.placeholder(tf.float32, [None, input_length], name='state')
global_state_n = tf.compat.v1.placeholder(tf.float32, [None, input_length], name='state_n')
act = tf.compat.v1.placeholder(tf.int32, [None, 1], name='act')
return [[global_state], act, [global_state_n]]
def obs_to_state_irl(obs):
if len(obs[0]['global_in']) > 9289:
global_batch = np.stack([state['global_in'][:-3] for state in obs])
else:
global_batch = np.stack([state['global_in'] for state in obs])
return [global_batch]
def network_spec_irl(states, states_n, act, with_action, actions_size):
global_state = states[0]
global_state_n = states_n[0]
action_state = tf.cast(act, tf.int32)
# Jump
agent_plane_x, agent_plane_z, agent_jump, is_grounded, can_double_jump, target_distances, goal, threedgrid, rotation, rays, \
inventory = \
tf.split(global_state, [1, 1, 1, 1, 1, 3, 2, 9261, 4, 12, 2], axis=1)
agent_plane_x = ((agent_plane_x + 1) / 2) * 500
agent_plane_x = tf.cast(agent_plane_x, tf.int32)
agent_plane_z = ((agent_plane_z + 1) / 2) * 500
agent_plane_z = tf.cast(agent_plane_z, tf.int32)
agent_jump = ((agent_jump + 1) / 2) * 40
agent_jump = tf.cast(agent_jump, tf.int32)
agent = tf.concat([agent_plane_x, agent_plane_z, agent_jump], axis=1)
global_state = agent
# agent_n_plane_x, agent_n_plane_z, agent_n_jump, _, _, _, _, _, _, _, _ = \
# tf.split(global_state_n, [1, 1, 1, 1, 1, 3, 2, 3375, 4, 12, 2], axis=1)
#
# agent_n_plane_x = ((agent_n_plane_x + 1) / 2) * 220
# agent_n_plane_x = tf.cast(agent_n_plane_x, tf.int32)
#
# agent_n_plane_z = ((agent_n_plane_z + 1) / 2) * 280
# agent_n_plane_z = tf.cast(agent_n_plane_z, tf.int32)
#
# agent_n_jump = ((agent_n_jump + 1) / 2) * 40
# agent_n_jump = tf.cast(agent_n_jump, tf.int32)
#
# agent_n = tf.concat([agent_n_plane_x, agent_n_plane_z, agent_n_jump], axis=1)
# global_state_n = agent_n
# global_state = tf.compat.v1.Print(global_state, [global_state], 'Global state: ', summarize=1e5)
# global_state = embedding(global_state, indices=280, size=32, name='embs')
# global_state = tf.reshape(global_state, (-1, 3*32))
# global_state = linear(global_state, 64, name='latent_1', activation=tf.nn.relu,
# init=tf.compat.v1.keras.initializers.Orthogonal(gain=np.sqrt(2), seed=None,
# dtype=tf.dtypes.float32)
# )
threedgrid = tf.cast(tf.reshape(threedgrid, [-1, 21, 21, 21]), tf.int32)
# threedgrid = tf.reshape(threedgrid, [-1, 15, 15, 15, 1])
threedgrid_state = embedding(threedgrid, indices=4, size=32, name='global_embs')
threedgrid = conv_layer_3d(threedgrid_state, 32, [3, 3, 3], strides=(2, 2, 2), name='conv_01', activation=tf.nn.relu)
#threedgrid = tf.nn.max_pool3d(threedgrid, [2, 2, 2], strides=(2, 2, 2), padding="VALID")
threedgrid = conv_layer_3d(threedgrid, 32, [3, 3, 3], strides=(2, 2, 2), name='conv_02', activation=tf.nn.relu)
#threedgrid = tf.nn.max_pool3d(threedgrid, [2, 2, 2], strides=(2, 2, 2), padding="VALID")
threedgrid = conv_layer_3d(threedgrid, 64, [3, 3, 3], strides=(2, 2, 2), name='conv_03', activation=tf.nn.relu)
threedgrid = conv_layer_3d(threedgrid, 64, [3, 3, 3], strides=(2, 2, 2), name='conv_04', activation=tf.nn.relu)
threedgrid = tf.reshape(threedgrid, [-1, 2 * 2 * 2 * 64])
# global_state_n = embedding(global_state_n, indices=501, size=32, name='embs')
# global_state_n = tf.reshape(global_state_n, (-1, 3 * 32))
# global_state_n = linear(global_state_n, 64, name='latent_1_n', activation=tf.nn.relu,
# init=tf.compat.v1.keras.initializers.Orthogonal(gain=np.sqrt(2), seed=None,
# dtype=tf.dtypes.float32)
# )
# action_state = tf.compat.v1.Print(action_state, [action_state], 'Action state: ', summarize=1e5)
# action_state = tf.one_hot(action_state, 10)
# action_state = tf.reshape(action_state, [-1, 10])
# global_state = tf.one_hot(global_state, 280)
# global_state = tf.reshape(global_state, [-1, 3*280])
action_state = embedding(action_state, indices=10, size=512, name='action_embs')
action_state = tf.reshape(action_state, [-1, 512])
action = action_state
# action_state = linear(action_state, 64, name='latent_action_n', activation=tf.nn.relu,
# init=tf.compat.v1.keras.initializers.Orthogonal(gain=np.sqrt(2), seed=None,
# dtype=tf.dtypes.float32)
# )
# action_state = tf.compat.v1.layers.dropout(action_state, rate=0.2)
# inventory = linear(inventory, 32, name='inventory_embs', activation=tf.nn.tanh)
# inventory = linear(inventory, 64, name='latent_inventory_n', activation=tf.nn.relu,
# init=tf.compat.v1.keras.initializers.Orthogonal(gain=np.sqrt(2), seed=None,
# dtype=tf.dtypes.float32)
# )
encoded = tf.concat([threedgrid, action], axis=1)
global_state = linear(encoded, 1024, name='latent_1', activation=tf.nn.relu,
)
global_state = linear(global_state, 512, name='latent_2', activation=tf.nn.relu,
)
global_state = linear(global_state, 128, name='latent_3', activation=tf.nn.relu,
)
# global_state = linear(global_state, 512, name='latent_2', activation=tf.nn.relu,
# init=tf.compat.v1.keras.initializers.Orthogonal(gain=np.sqrt(2), seed=None,
# dtype=tf.dtypes.float32)
# )
# global_state = tf.compat.v1.layers.dropout(global_state, rate=0.2)
global_state = linear(global_state, 1, name='out',
init=tf.compat.v1.keras.initializers.Orthogonal(gain=np.sqrt(2), seed=None,
dtype=tf.dtypes.float32)
)
# global_state = tf.compat.v1.layers.dropout(global_state, rate=0.2)
return global_state, threedgrid_state, action_state
| 43.04
| 129
| 0.628253
| 1,856
| 12,912
| 4.128233
| 0.071659
| 0.130645
| 0.060298
| 0.054033
| 0.821196
| 0.772383
| 0.759854
| 0.705429
| 0.696554
| 0.694727
| 0
| 0.062847
| 0.232265
| 12,912
| 300
| 130
| 43.04
| 0.710078
| 0.324892
| 0
| 0.556338
| 0
| 0
| 0.034798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06338
| false
| 0
| 0.014085
| 0
| 0.140845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
90df82c49ed72d1a4352e491e5048aa62832324d
| 48
|
py
|
Python
|
blueque/__init__.py
|
ustudio/Blueque
|
f973c470d6558856bbd7f3bf4d6a3e42d38fce85
|
[
"Apache-2.0"
] | 5
|
2016-12-03T23:10:45.000Z
|
2018-06-06T17:06:27.000Z
|
blueque/__init__.py
|
ustudio/Blueque
|
f973c470d6558856bbd7f3bf4d6a3e42d38fce85
|
[
"Apache-2.0"
] | 8
|
2015-06-19T21:32:48.000Z
|
2021-01-08T19:27:45.000Z
|
blueque/__init__.py
|
ustudio/Blueque
|
f973c470d6558856bbd7f3bf4d6a3e42d38fce85
|
[
"Apache-2.0"
] | 1
|
2017-05-18T06:15:17.000Z
|
2017-05-18T06:15:17.000Z
|
from blueque.client import Client # noqa: F401
| 24
| 47
| 0.770833
| 7
| 48
| 5.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0.166667
| 48
| 1
| 48
| 48
| 0.85
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
290177dca8fe9091364f40f03cf5a9f898861f0c
| 79
|
py
|
Python
|
das_framework/const.py
|
thinkmoore/das
|
d9faabf3de987b890a5079b914f5aba597215b14
|
[
"CC0-1.0"
] | 35
|
2019-04-16T19:37:01.000Z
|
2022-02-14T20:33:41.000Z
|
das_framework/const.py
|
thinkmoore/das
|
d9faabf3de987b890a5079b914f5aba597215b14
|
[
"CC0-1.0"
] | 6
|
2019-06-05T19:41:15.000Z
|
2020-08-19T19:04:59.000Z
|
das_framework/const.py
|
thinkmoore/das
|
d9faabf3de987b890a5079b914f5aba597215b14
|
[
"CC0-1.0"
] | 12
|
2019-05-02T19:38:06.000Z
|
2021-09-11T22:02:03.000Z
|
class Const:
@property
def GUROBI(self):
return "GUROBI"
| 11.285714
| 23
| 0.544304
| 8
| 79
| 5.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.367089
| 79
| 6
| 24
| 13.166667
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0.075949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
2907affe391915a90c5cd0f341a40e966279758c
| 11,437
|
py
|
Python
|
final_code.py
|
sarrthak/Tensorflow-object-detection-for-cutom-dataset
|
122e628cbd5f3e98ed7548bdfec02f2df863aa9b
|
[
"MIT"
] | null | null | null |
final_code.py
|
sarrthak/Tensorflow-object-detection-for-cutom-dataset
|
122e628cbd5f3e98ed7548bdfec02f2df863aa9b
|
[
"MIT"
] | null | null | null |
final_code.py
|
sarrthak/Tensorflow-object-detection-for-cutom-dataset
|
122e628cbd5f3e98ed7548bdfec02f2df863aa9b
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import time
import serial
ser1=serial.Serial('COM10',9600)
#LED controll
ser1.write('b'.encode())
ser1.write('d'.encode())
ser1.write('f'.encode())
ser1.write('h'.encode())
ser1.write('j'.encode())
def ORB_detector(new_image, image_template):
# Function that compares input image to template
# It then returns the number of ORB matches between them
image1 = cv2.cvtColor(new_image, cv2.COLOR_BGR2GRAY)
# Create ORB detector with 1000 keypoints with a scaling pyramid factor of 1.2
orb = cv2.ORB_create(1000, 1.2)
# Detect keypoints of original image
(kp1, des1) = orb.detectAndCompute(image1, None)
# Detect keypoints of rotated image
(kp2, des2) = orb.detectAndCompute(image_template, None)
# Create matcher
# Note we're no longer using Flannbased matching
bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)
# Do matching
matches = bf.match(des1,des2)
# Sort the matches based on distance. Least distance
# is better
matches = sorted(matches, key=lambda val: val.distance)
return len(matches)
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1200)
# Load our image template, this is our reference image
image_template1 = cv2.imread('get_1.jpg', 0)
image_template2 = cv2.imread('get_2.jpg', 0)
image_template3 = cv2.imread('get_3_.jpg', 0)
image_template4 = cv2.imread('get_4.jpg', 0)
image_template5 = cv2.imread('get_5.jpg', 0)
# image_template = cv2.imread('images/kitkat.jpg', 0)
# FIRST STEP
while True:
# Get webcam images
ret, frame = cap.read()
# Get height and width of webcam frame
height, width = frame.shape[:2]
# Define ROI Box Dimensions (Note some of these things should be outside the loop)
top_left_x = int(0.3*(int((width / 7) * 6)))
top_left_y = int(0.55*(int((height / 3) + (height / 6))))
bottom_right_x = int(0.5*(int((width / 7) * 6)))
bottom_right_y = int(0.3*(int((height / 3) - (height / 6))))
# Draw rectangular window for our region of interest
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), 255, 3)
# Crop window of observation we defined above
cropped = frame[bottom_right_y:top_left_y , top_left_x:bottom_right_x]
# Flip frame orientation horizontally
#frame = cv2.flip(frame,1)
# Get number of ORB matches
matches1 = ORB_detector(cropped, image_template1)
# Display status string showing the current no. of matches
output_string = "Matches = " + str(matches1)
cv2.putText(frame, output_string, (50,450), cv2.FONT_HERSHEY_COMPLEX, 1, (250,0,150), 2)
# Our threshold to indicate object deteciton
# For new images or lightening conditions you may need to experiment a bit
# Note: The ORB detector to get the top 1000 matches, 350 is essentially a min 35% match
if matches1 < 270:
cv2.putText(frame,'Proceed 1st step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('a'.encode())
# If matches exceed our threshold then object has been detected
if matches1 > 270:
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), (0,255,0), 3)
cv2.putText(frame,'Proceed 2nd step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('b'.encode())
time.sleep(2)
break
cv2.imshow('Object Detector using ORB', frame)
if cv2.waitKey(1) == 13: #13 is the Enter Key
break
# SECOND STEP
while True:
ret, frame = cap.read()
# Get height and width of webcam frame
height, width = frame.shape[:2]
# Define ROI Box Dimensions (Note some of these things should be outside the loop)
top_left_x = int(0.57*(int((width / 7) * 6)))
top_left_y = int(0.55*(int((height / 3) + (height / 6))))
bottom_right_x = int(0.68*(int((width / 7) * 6)))
bottom_right_y = int(0.3*(int((height / 3) - (height / 6))))
# Draw rectangular window for our region of interest
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), 255, 3)
# Crop window of observation we defined above
cropped = frame[bottom_right_y:top_left_y , top_left_x:bottom_right_x]
# Flip frame orientation horizontally
#frame = cv2.flip(frame,1)
# Get number of ORB matches
matches2 = ORB_detector(cropped, image_template2)
# Display status string showing the current no. of matches
output_string = "Matches = " + str(matches2)
cv2.putText(frame, output_string, (50,450), cv2.FONT_HERSHEY_COMPLEX, 1, (250,0,150), 2)
# Our threshold to indicate object deteciton
# For new images or lightening conditions you may need to experiment a bit
# Note: The ORB detector to get the top 1000 matches, 350 is essentially a min 35% match
if matches2 < 140:
cv2.putText(frame,'Proceed 2nd step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('c'.encode())
# If matches exceed our threshold then object has been detected
if matches2 > 140:
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), (0,255,0), 3)
cv2.putText(frame,'Proceed 3rd step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('d'.encode())
time.sleep(2)
break;
cv2.imshow('Object Detector using ORB', frame)
if cv2.waitKey(1) == 13: #13 is the Enter Key
break
# THIRD Step
while True:
ret, frame = cap.read()
# Get height and width of webcam frame
height, width = frame.shape[:2]
# Define ROI Box Dimensions (Note some of these things should be outside the loop)
top_left_x = int(0.73*(int((width / 7) * 6)))
top_left_y = int(0.55*(int((height / 3) + (height / 6))))
bottom_right_x = int(0.93*(int((width / 7) * 6)))
bottom_right_y = int(0.3*(int((height / 3) - (height / 6))))
# Draw rectangular window for our region of interest
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), 255, 3)
# Crop window of observation we defined above
cropped = frame[bottom_right_y:top_left_y , top_left_x:bottom_right_x]
# Flip frame orientation horizontally
#frame = cv2.flip(frame,1)
# Get number of ORB matches
matches3 = ORB_detector(cropped, image_template3)
# Display status string showing the current no. of matches
output_string = "Matches = " + str(matches3)
cv2.putText(frame, output_string, (50,450), cv2.FONT_HERSHEY_COMPLEX, 1, (250,0,150), 2)
# Our threshold to indicate object deteciton
# For new images or lightening conditions you may need to experiment a bit
# Note: The ORB detector to get the top 1000 matches, 350 is essentially a min 35% match
if matches3 < 155:
cv2.putText(frame,'Proceed 3rd step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('e'.encode())
# If matches exceed our threshold then object has been detected
if matches3 > 155:
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), (0,255,0), 3)
cv2.putText(frame,'Proceed 4th step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('f'.encode())
time.sleep(2)
break;
cv2.imshow('Object Detector using ORB', frame)
if cv2.waitKey(1) == 13: #13 is the Enter Key
break
# FOURTH STEP
while True:
ret, frame = cap.read()
# Get height and width of webcam frame
height, width = frame.shape[:2]
# Define ROI Box Dimensions (Note some of these things should be outside the loop)
top_left_x = int(0.72*(int((width / 7) * 6)))
top_left_y = int(1.1*(int((height / 3) + (height / 6))))
bottom_right_x = int(0.85*(int((width / 7) * 6)))
bottom_right_y = int(1.90*(int((height / 3) - (height / 6))))
# Draw rectangular window for our region of interest
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), 255, 3)
# Crop window of observation we defined above
cropped = frame[bottom_right_y:top_left_y , top_left_x:bottom_right_x]
# Flip frame orientation horizontally
#frame = cv2.flip(frame,1)
# Get number of ORB matches
matches4 = ORB_detector(cropped, image_template4)
# Display status string showing the current no. of matches
output_string = "Matches = " + str(matches4)
cv2.putText(frame, output_string, (50,450), cv2.FONT_HERSHEY_COMPLEX, 1, (250,0,150), 2)
# Our threshold to indicate object deteciton
# For new images or lightening conditions you may need to experiment a bit
# Note: The ORB detector to get the top 1000 matches, 350 is essentially a min 35% match
if matches4 < 60:
cv2.putText(frame,'Proceed 4th step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('g'.encode())
# If matches exceed our threshold then object has been detected
if matches4 > 60:
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), (0,255,0), 3)
cv2.putText(frame,'Proceed 5th step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('h'.encode())
time.sleep(2)
break;
cv2.imshow('Object Detector using ORB', frame)
if cv2.waitKey(1) == 13: #13 is the Enter Key
break
# FIFTH STEP
while True:
ret, frame = cap.read()
# Get height and width of webcam frame
height, width = frame.shape[:2]
# Define ROI Box Dimensions (Note some of these things should be outside the loop)
top_left_x = int(0.37*(int((width / 7) * 6)))
top_left_y = int(1.83*(int((height / 3) + (height / 6))))
bottom_right_x = int(0.85*(int((width / 7) * 6)))
bottom_right_y = int(1.33*(int((height / 3) + (height / 6))))
# Draw rectangular window for our region of interest
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), 255, 3)
# Crop window of observation we defined above
cropped = frame[bottom_right_y:top_left_y , top_left_x:bottom_right_x]
# Flip frame orientation horizontally
#frame = cv2.flip(frame,1)
# Get number of ORB matches
matches5 = ORB_detector(cropped, image_template5)
# Display status string showing the current no. of matches
output_string = "Matches = " + str(matches5)
cv2.putText(frame, output_string, (50,450), cv2.FONT_HERSHEY_COMPLEX, 1, (250,0,150), 2)
# Our threshold to indicate object deteciton
# For new images or lightening conditions you may need to experiment a bit
# Note: The ORB detector to get the top 1000 matches, 350 is essentially a min 35% match
if matches5 < 400:
cv2.putText(frame,'Proceed 5th step',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('i'.encode())
# If matches exceed our threshold then object has been detected
if matches5 > 400:
cv2.rectangle(frame, (top_left_x,top_left_y), (bottom_right_x,bottom_right_y), (0,255,0), 3)
cv2.putText(frame,'Finished',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)
ser1.write('j'.encode())
time.sleep(5)
break
cv2.imshow('Object Detector using ORB', frame)
if cv2.waitKey(1) == 13: #13 is the Enter Key
break
cap.release()
cv2.destroyAllWindows()
| 37.498361
| 100
| 0.668095
| 1,776
| 11,437
| 4.169482
| 0.137387
| 0.037812
| 0.021607
| 0.042539
| 0.788926
| 0.788926
| 0.783255
| 0.783255
| 0.783255
| 0.777313
| 0
| 0.067957
| 0.213867
| 11,437
| 305
| 101
| 37.498361
| 0.755645
| 0.332342
| 0
| 0.57047
| 0
| 0
| 0.052032
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006711
| false
| 0
| 0.026846
| 0
| 0.040268
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
292dc1500e97cba6baaf5357c91031774596245f
| 54
|
py
|
Python
|
tommaso_strategy.py
|
ericremoreynolds/tictactoe
|
6a2c2e2fe5094cf5ec9dd61374b9c2521544d697
|
[
"MIT"
] | null | null | null |
tommaso_strategy.py
|
ericremoreynolds/tictactoe
|
6a2c2e2fe5094cf5ec9dd61374b9c2521544d697
|
[
"MIT"
] | null | null | null |
tommaso_strategy.py
|
ericremoreynolds/tictactoe
|
6a2c2e2fe5094cf5ec9dd61374b9c2521544d697
|
[
"MIT"
] | null | null | null |
def tommaso_strategy(state):
return state.play(5)
| 18
| 28
| 0.740741
| 8
| 54
| 4.875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.148148
| 54
| 2
| 29
| 27
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
295530f931565298c3bad984c34726b733431c7b
| 59
|
py
|
Python
|
example/app.py
|
1010code/cryptoString
|
597ca2adcd31ae4c0909b6472e833b50e49d4ae9
|
[
"MIT"
] | null | null | null |
example/app.py
|
1010code/cryptoString
|
597ca2adcd31ae4c0909b6472e833b50e49d4ae9
|
[
"MIT"
] | null | null | null |
example/app.py
|
1010code/cryptoString
|
597ca2adcd31ae4c0909b6472e833b50e49d4ae9
|
[
"MIT"
] | null | null | null |
import cryptoString as crypto
print(crypto.RandomChar(30))
| 19.666667
| 29
| 0.830508
| 8
| 59
| 6.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.084746
| 59
| 3
| 30
| 19.666667
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
295ba0e45ca19f718da8c82a6c70eaab09a9b939
| 308
|
py
|
Python
|
morningsite/morningapp/admin.py
|
october-rain/good-morning
|
3374f6ac3042e38c483acfea26656010a0b4a686
|
[
"MIT"
] | null | null | null |
morningsite/morningapp/admin.py
|
october-rain/good-morning
|
3374f6ac3042e38c483acfea26656010a0b4a686
|
[
"MIT"
] | null | null | null |
morningsite/morningapp/admin.py
|
october-rain/good-morning
|
3374f6ac3042e38c483acfea26656010a0b4a686
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from morningapp.models import Article,Profile,Contact,Tag,Tag_Article,Mood
# Register your models here.
admin.site.register(Article)
admin.site.register(Profile)
admin.site.register(Contact)
admin.site.register(Tag)
admin.site.register(Tag_Article)
admin.site.register(Mood)
| 30.8
| 74
| 0.831169
| 45
| 308
| 5.644444
| 0.355556
| 0.212598
| 0.401575
| 0.188976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061688
| 308
| 9
| 75
| 34.222222
| 0.878893
| 0.084416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
296e529fbec732d4393ea57a9f0aab646355707b
| 31
|
py
|
Python
|
uu/formlibrary/upgrades/__init__.py
|
mostscript/uu.formlibrary
|
a7f5819abac7c1ddea69ddee8fce465d45f4d1d5
|
[
"BSD-4-Clause-UC"
] | null | null | null |
uu/formlibrary/upgrades/__init__.py
|
mostscript/uu.formlibrary
|
a7f5819abac7c1ddea69ddee8fce465d45f4d1d5
|
[
"BSD-4-Clause-UC"
] | null | null | null |
uu/formlibrary/upgrades/__init__.py
|
mostscript/uu.formlibrary
|
a7f5819abac7c1ddea69ddee8fce465d45f4d1d5
|
[
"BSD-4-Clause-UC"
] | null | null | null |
# package used for migrations
| 15.5
| 30
| 0.774194
| 4
| 31
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 31
| 1
| 31
| 31
| 0.96
| 0.870968
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
462722bc53e21747f78bd4731cb4a317234b4229
| 34
|
py
|
Python
|
demo/login.py
|
tianhezhizhou/pythonProject
|
c70cd8ed420bb09d25fe04f0a86689142f34b2b3
|
[
"MIT"
] | null | null | null |
demo/login.py
|
tianhezhizhou/pythonProject
|
c70cd8ed420bb09d25fe04f0a86689142f34b2b3
|
[
"MIT"
] | null | null | null |
demo/login.py
|
tianhezhizhou/pythonProject
|
c70cd8ed420bb09d25fe04f0a86689142f34b2b3
|
[
"MIT"
] | null | null | null |
num=300
num2=14
num3=100
num4=111
| 6.8
| 8
| 0.764706
| 8
| 34
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.466667
| 0.117647
| 34
| 4
| 9
| 8.5
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4628ace6e6bfa53ac2cc46d3957ff5eb00bc14ef
| 73
|
py
|
Python
|
vkbottle/tools/dev_tools/template/__init__.py
|
van-burgerberg/vkbottle
|
134eb76e6289b7674142316ca72646ce999d9388
|
[
"MIT"
] | null | null | null |
vkbottle/tools/dev_tools/template/__init__.py
|
van-burgerberg/vkbottle
|
134eb76e6289b7674142316ca72646ce999d9388
|
[
"MIT"
] | null | null | null |
vkbottle/tools/dev_tools/template/__init__.py
|
van-burgerberg/vkbottle
|
134eb76e6289b7674142316ca72646ce999d9388
|
[
"MIT"
] | null | null | null |
from .generator import template_gen
from .element import TemplateElement
| 24.333333
| 36
| 0.863014
| 9
| 73
| 6.888889
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 2
| 37
| 36.5
| 0.953846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
465b9a272967d353d04fe627c676bea2cbc118d9
| 53,515
|
py
|
Python
|
beams/test/test_models.py
|
aPeter1/muSR_Visualization
|
eeb01d37cb69a7e82e49331fe5620bb2773909ed
|
[
"MIT"
] | null | null | null |
beams/test/test_models.py
|
aPeter1/muSR_Visualization
|
eeb01d37cb69a7e82e49331fe5620bb2773909ed
|
[
"MIT"
] | null | null | null |
beams/test/test_models.py
|
aPeter1/muSR_Visualization
|
eeb01d37cb69a7e82e49331fe5620bb2773909ed
|
[
"MIT"
] | null | null | null |
import pytest
import pickle
import numpy as np
from app.model import objects
from app.resources import resources
def close_enough(val_one, val_two, tolerance):
return abs(val_one - val_two) <= tolerance
class TestHistograms:
@pytest.mark.parametrize("input_array, t0, good_start, good_end, bkgd_start, bkgd_end, run_id, bin_size, title",
[([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
8, 5, 19, 0, 4, "RANDOM_ID", 0.2, "Front")])
def test_basic_construction(self, input_array, t0, good_start, good_end, bkgd_start,
bkgd_end, run_id, bin_size, title):
objects.Histogram(input_array, t0, good_start, good_end, bkgd_start, bkgd_end, title, run_id, bin_size)
@pytest.mark.parametrize("hist_one, hist_two, start_bin_one, start_bin_two, end_bin_one, end_bin_two, init_dif",
# Two histograms with identical meta values.
[(objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2),
1030, 1030, 27648, 27648, 50),
# Histograms with different time zeroes.
(objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
objects.Histogram(range(27648),
979, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2),
1031, 1030, 27648, 27647, 51),
# Histograms with different good bin starts.
(objects.Histogram(range(27648),
980, 2000, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2),
2000, 2000, 27648, 27648, 1020),
])
def test_intersect(self, hist_one: objects.Histogram, hist_two: objects.Histogram,
start_bin_one, start_bin_two, end_bin_one, end_bin_two, init_dif):
start_bin_one_c, start_bin_two_c, end_bin_one_c, end_bin_two_c, init_dif_c = hist_one.intersect(hist_two)
assert start_bin_one_c == start_bin_one
assert start_bin_two_c == start_bin_two
assert end_bin_one_c == end_bin_one
assert end_bin_two_c == end_bin_two
assert init_dif_c == init_dif
@pytest.mark.parametrize("hist_one, hist_two",
# Start bin is greater then the end bin
[(objects.Histogram(range(27648),
980, 1030, 1000, 70, 900, "Front", "RANDOM_ID", 0.2),
objects.Histogram(range(27648),
979, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2)),
# Start bin is below 0
(objects.Histogram(range(27648),
980, -5, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2)),
# End bin is above length of histogram
(objects.Histogram(range(27648),
980, 2000, 70000, 70, 900, "Front", "RANDOM_ID", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2))
])
def test_intersect_raise_exception(self, hist_one: objects.Histogram, hist_two: objects.Histogram):
with pytest.raises(ValueError):
hist_one.intersect(hist_two)
with pytest.raises(ValueError):
hist_two.intersect(hist_one)
@pytest.mark.parametrize("hist, radiation",
[(objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
485.0),
(objects.Histogram(range(27648),
980, 1030, 27648, 0, 900, "Front", "RANDOM_ID", 0.2),
450.0),
(objects.Histogram(range(27648),
980, 1030, 27648, 700, 700, "Front", "RANDOM_ID", 0.2),
700),
])
def test_background_radiation(self, hist: objects.Histogram, radiation):
radiation_c = hist.background_radiation()
assert radiation_c == radiation
@pytest.mark.parametrize("hist",
# Background start is below 0
[(objects.Histogram(range(27648),
980, 1030, 27648, -5, 900, "Front", "RANDOM_ID", 0.2)),
# Background start is above background end
(objects.Histogram(range(27648),
980, 1030, 27648, 1200, 900, "Front", "RANDOM_ID", 0.2)),
# Background end is above length of histogram
(objects.Histogram(range(27648),
980, 1030, 27648, 700, 70000, "Front", "RANDOM_ID", 0.2)),
])
def test_background_radiation_raise_exception(self, hist: objects.Histogram):
with pytest.raises(ValueError):
hist.background_radiation()
@pytest.mark.parametrize("hist",
[(objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2))])
def test_pickling(self, hist):
# This should fail if you have added an attribute to the histogram class and not added it to the
# __array_finalize__ method (this will make more sense if you look at the code).
histogram_unpickled = pickle.loads(pickle.dumps(hist))
assert hist == histogram_unpickled
@pytest.mark.parametrize("hist",
[(objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2))])
def test_persistent_object(self, hist):
histogram_minimized = hist.get_persistent_data()
histogram_maximized = hist.build_from_persistent_data(histogram_minimized)
assert hist == histogram_maximized
@pytest.mark.parametrize("hist",
[(objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2))])
def test_persistent_with_pickling(self, hist):
histogram_minimized = hist.get_persistent_data()
histogram_minimized_unpickled = pickle.loads(pickle.dumps(histogram_minimized))
histogram_maximized = hist.build_from_persistent_data(histogram_minimized_unpickled)
assert hist == histogram_maximized
@pytest.mark.parametrize("hists, correct_combined_hist",
[((objects.Histogram(range(27648),
980, 680, 25000, 600, 1000, "Front", "3412", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 500, 900, "Front", "3413", 0.2)),
(objects.Histogram(range(0, 55296, 2),
980, 1030, 25000, 600, 900, "Front", "3412, 3413", 0.2))),
((objects.Histogram(range(27648),
1000, 680, 25000, 600, 1000, "Front", "3412", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 500, 900, "Front", "3413", 0.2)),
(objects.Histogram(range(20, 55276, 2),
980, 1030, 24980, 580, 900, "Front", "3412, 3413", 0.2))),
((objects.Histogram(range(30000),
600, 1000, 23400, 120, 480, "Back", "1612", 0.2),
objects.Histogram(range(30000),
568, 1200, 26000, 122, 464, "Back", "1613", 0.2)),
(objects.Histogram(range(32, 59968, 2),
568, 1200, 23368, 122, 448, "Back", "1612, 1613", 0.2))),
((objects.Histogram(range(21000),
500, 550, 20000, 65, 420, "Forw", "19232", 0.3),
objects.Histogram(range(21000),
600, 613, 20380, 50, 553, "Forw", "19233", 0.3),
objects.Histogram(range(21000),
300, 334, 19670, 66, 120, "Forw", "19234", 0.3)
),
(objects.Histogram(range(500, 62600, 3),
300, 350, 19670, 66, 120, "Forw", "doesn't matter", 0.3)))
])
def test_combine(self, hists, correct_combined_hist):
combined = objects.Histogram.combine(hists)
assert combined == correct_combined_hist
@pytest.mark.parametrize("hists",
[(objects.Histogram(range(27648),
980, 680, 25000, 600, 1000, "Front", "3412", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 500, 900, "Front", "3413", 0.3)),
(objects.Histogram(range(27648),
980, 680, 25000, 600, 1000, "Front", "3412", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 500, 900, "Back", "3413", 0.3)),
[objects.Histogram(range(27648),
980, 680, 25000, 600, 1000, "Front", "3412", 0.2)]
])
def test_combine_exception(self, hists):
with pytest.raises(ValueError):
objects.Histogram.combine(hists)
class TestAsymmetries:
@pytest.mark.parametrize("input_array, t0, bin_size, uncertainty, time",
[(range(27648), 980, 0.2, range(27648), range(27648))])
def test_first_constructor_combination(self, input_array, t0, bin_size, uncertainty, time):
# Test construction without histograms
objects.Asymmetry(input_array=input_array, time_zero=t0, bin_size=bin_size, uncertainty=uncertainty, time=time)
@pytest.mark.parametrize("hist_one, hist_two",
[(objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2))])
def test_second_constructor_combination(self, hist_one, hist_two):
asymmetry = objects.Asymmetry(histogram_one=hist_one, histogram_two=hist_two)
assert asymmetry.bin_size == 0.2
assert asymmetry.time_zero == 50
assert asymmetry.alpha == 1
assert asymmetry.uncertainty is not None
assert asymmetry.time is not None
@pytest.mark.parametrize("asymmetry",
[(objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))),
(objects.Asymmetry(histogram_one=objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Front",
"RANDOM_ID", 0.2),
histogram_two=objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back",
"RANDOM_ID", 0.2)))])
def test_pickling(self, asymmetry):
# This should fail if you have added an attribute to the asymmetry class and not added it to the
# __array_finalize__ method (this will make more sense if you look at the code).
asymmetry_unpickled = pickle.loads(pickle.dumps(asymmetry))
assert asymmetry == asymmetry_unpickled
@pytest.mark.parametrize("asymmetry",
[(objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))),
(objects.Asymmetry(histogram_one=objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Front",
"RANDOM_ID", 0.2),
histogram_two=objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back",
"RANDOM_ID", 0.2)))])
def test_persistent_object(self, asymmetry: objects.Asymmetry):
asymmetry_minimized = asymmetry.get_persistent_data()
asymmetry_maximized = asymmetry.build_from_persistent_data(asymmetry_minimized)
assert asymmetry == asymmetry_maximized
@pytest.mark.parametrize("asymmetry",
[(objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))),
(objects.Asymmetry(histogram_one=objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Front",
"RANDOM_ID", 0.2),
histogram_two=objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back",
"RANDOM_ID", 0.2)))])
def test_persistent_with_pickling(self, asymmetry: objects.Asymmetry):
asymmetry_minimized = asymmetry.get_persistent_data()
asymmetry_minimized_unpickled = pickle.loads(pickle.dumps(asymmetry_minimized))
asymmetry_maximized = asymmetry.build_from_persistent_data(asymmetry_minimized_unpickled)
assert asymmetry == asymmetry_maximized
@pytest.mark.parametrize("asymmetry, expected_binned_asymmetry, bin_size",
[(objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648)),
objects.Asymmetry(input_array=range(36), time_zero=980, bin_size=0.2,
uncertainty=range(36), time=range(36)),
150),
# Bin size is reasonable, even cut of bins
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(10), time_zero=8, bin_size=1,
uncertainty=range(10), time=range(10)),
10),
# Bin size is reasonable, throws away leftover bins so we have 2 instead of 3.
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(2), time_zero=8, bin_size=1,
uncertainty=range(2), time=range(2)),
40),
# Bin size is equal to current bin size
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
1),
# Bin size == 0
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
0),
# Bin size that should result in a binned asymmetry of size == 1
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(1), time_zero=8, bin_size=1,
uncertainty=range(1), time=range(1)),
100)
])
def test_bin_lengths(self, asymmetry, expected_binned_asymmetry, bin_size):
given_binned_asymmetry = asymmetry.bin(bin_size)
assert len(given_binned_asymmetry) == len(expected_binned_asymmetry)
assert len(given_binned_asymmetry.time) == len(expected_binned_asymmetry.time)
assert len(given_binned_asymmetry.uncertainty) == len(expected_binned_asymmetry.uncertainty)
if given_binned_asymmetry.calculated is not None or expected_binned_asymmetry.calculated is not None:
assert len(given_binned_asymmetry.calculated) == len(expected_binned_asymmetry.calculated)
@pytest.mark.parametrize("asymmetry, expected_binned_asymmetry, bin_size",
[(objects.Asymmetry(input_array=[1 for _ in range(15)], uncertainty=[1 for _ in range(15)],
time=range(15), time_zero=2, bin_size=1),
objects.Asymmetry(input_array=[1, 1, 1], uncertainty=[0.447, 0.447, 0.447],
time=range(3), time_zero=2, bin_size=1),
5),
(objects.Asymmetry(input_array=[1 for _ in range(15)], uncertainty=[1 for _ in range(15)],
time=range(15), time_zero=2, bin_size=1),
objects.Asymmetry(input_array=[1, 1], uncertainty=[0.408, 0.408],
time=range(2), time_zero=2, bin_size=1),
6),
(objects.Asymmetry(input_array=[3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 0],
uncertainty=[3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 0],
time=range(15), time_zero=2, bin_size=1),
objects.Asymmetry(input_array=[3.5, 6.5], uncertainty=[1.481, 2.682],
time=range(2), time_zero=2, bin_size=1),
6),
])
def test_bin_values(self, asymmetry, expected_binned_asymmetry, bin_size):
given_binned_asymmetry = asymmetry.bin(bin_size)
assert np.allclose(given_binned_asymmetry, expected_binned_asymmetry, 0.005)
assert np.allclose(given_binned_asymmetry.uncertainty, expected_binned_asymmetry.uncertainty, 0.005)
if given_binned_asymmetry.calculated is not None or expected_binned_asymmetry.calculated is not None:
np.allclose(given_binned_asymmetry.calculated, expected_binned_asymmetry.calculated, 0.005)
@pytest.mark.parametrize("asymmetry, bin_size",
# Bin size would produce an asymmetry with no elements.
[(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)), 101),
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=.1,
uncertainty=range(100), time=range(100)), 1001),
# Bin size is negative
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)), -1)
])
def test_bin_raise_exception(self, asymmetry, bin_size):
with pytest.raises(ValueError):
asymmetry.bin(bin_size)
@pytest.mark.parametrize("asymmetry, min_time, max_time, expected_integration, expected_uncertainty",
[
(objects.Asymmetry(input_array=np.zeros(10), time_zero=8, bin_size=1,
uncertainty=np.zeros(10), time=np.array(range(10))),
None, None, 0, 0),
(objects.Asymmetry(input_array=np.ones(10), time_zero=8, bin_size=1,
uncertainty=np.ones(10), time=np.array(range(10))),
None, None, 9, 0.003),
(objects.Asymmetry(input_array=np.array(range(10)), time_zero=8, bin_size=1,
uncertainty=np.ones(10), time=np.array(range(10))),
None, None, 40.5, 0.003),
(objects.Asymmetry(input_array=np.array(range(10)), time_zero=8, bin_size=1,
uncertainty=np.array(range(10)), time=np.array(range(10))),
None, None, 40.5, 0.017),
(objects.Asymmetry(input_array=np.array(range(10)), time_zero=8, bin_size=1,
uncertainty=np.ones(10), time=np.array(range(10))),
0, 1, 0, 0),
(objects.Asymmetry(input_array=np.array(range(10)), time_zero=8, bin_size=1,
uncertainty=np.ones(10), time=np.array(range(10))),
7, 9, 7.5, 0.001)
])
def test_integrate(self, asymmetry, min_time, max_time, expected_integration, expected_uncertainty):
calculated_integration, calculated_uncertainty = asymmetry.integrate(min_time, max_time)
assert close_enough(calculated_integration, expected_integration, 0.001)
assert close_enough(calculated_uncertainty, expected_uncertainty, 0.001)
@pytest.mark.parametrize("asymmetry, min_time, max_time",
[
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)), 9, 4),
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)), '1', None)
])
def test_integrate_raises_exception(self, asymmetry, min_time, max_time):
with pytest.raises(Exception):
asymmetry.integrate(min_time, max_time)
@pytest.mark.parametrize("asymmetry, expected_corrected_asymmetry, alpha",
# Correcting to value of 1.0 with a raw asymmetry.
[(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
1),
# Correcting to a value of 2.0 when the alpha of an asymmetry is already 2.0.
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100), alpha=2),
objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100), alpha=2),
2),
# Correcting an array of 1's with any alpha should result in array of 1's
(objects.Asymmetry(input_array=[1, 1, 1], time_zero=8, bin_size=1,
uncertainty=[1, 1, 1], time=range(3), alpha=1),
objects.Asymmetry(input_array=[1, 1, 1], time_zero=8, bin_size=1,
uncertainty=[1, 1, 1], time=range(3), alpha=2.458),
2.458),
# Correcting an array of values with a different alpha
(objects.Asymmetry(input_array=[1, 2, 3], time_zero=8, bin_size=1,
uncertainty=[1, 2, 3], time=range(3), alpha=1),
objects.Asymmetry(input_array=[1.000, 1.313, 1.510], time_zero=8, bin_size=1,
uncertainty=[1, 2, 3], time=range(3), alpha=2.458),
2.458)
])
def test_correct(self, asymmetry, expected_corrected_asymmetry, alpha):
given_corrected_asymmetry = asymmetry.correct(alpha)
assert given_corrected_asymmetry.alpha == expected_corrected_asymmetry.alpha
assert np.allclose(given_corrected_asymmetry, expected_corrected_asymmetry, 0.005)
assert np.allclose(given_corrected_asymmetry.uncertainty, expected_corrected_asymmetry.uncertainty, 0.005)
if given_corrected_asymmetry.calculated is not None or expected_corrected_asymmetry.calculated is not None:
assert np.allclose(given_corrected_asymmetry.calculated, expected_corrected_asymmetry.calculated, 0.005)
@pytest.mark.parametrize("asymmetry, expected_corrected_asymmetry",
# Calling raw on a raw asymmetry
[(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100))),
# Calling raw on an array of 1's with a non-one alpha (should be the same)
(objects.Asymmetry(input_array=[1, 1, 1], time_zero=8, bin_size=1,
uncertainty=[1, 1, 1], time=range(3), alpha=2.458),
objects.Asymmetry(input_array=[1, 1, 1], time_zero=8, bin_size=1,
uncertainty=[1, 1, 1], time=range(3))),
# Calling raw on a corrected asymmetry. (Inverse is in test_correct).
(objects.Asymmetry(input_array=[1.000, 1.313, 1.510], time_zero=8, bin_size=1,
uncertainty=[1, 2, 3], time=range(3), alpha=2.458),
objects.Asymmetry(input_array=[1, 2, 3], time_zero=8, bin_size=1,
uncertainty=[1, 2, 3], time=range(3)))
])
def test_raw(self, asymmetry, expected_corrected_asymmetry):
given_corrected_asymmetry = asymmetry.raw()
assert given_corrected_asymmetry.alpha == 1.0
assert np.allclose(given_corrected_asymmetry, expected_corrected_asymmetry, 0.005)
assert np.allclose(given_corrected_asymmetry.uncertainty, expected_corrected_asymmetry.uncertainty, 0.005)
if given_corrected_asymmetry.calculated is not None or expected_corrected_asymmetry.calculated is not None:
assert np.allclose(given_corrected_asymmetry.calculated, expected_corrected_asymmetry.calculated, 0.005)
@pytest.mark.parametrize("asymmetry, expected_cut_asymmetry, min_time, max_time",
# Using bounds of array as range to cut
[(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
0, 100),
# Create a range of 1
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=[0], time_zero=8, bin_size=1,
uncertainty=[0], time=[0]),
0, 1),
# Create a range of of more then one to the end
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(50), time_zero=8, bin_size=1,
uncertainty=range(50), time=range(50)),
0, 50),
# Use a negative value as minimum time
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(50), time_zero=8, bin_size=1,
uncertainty=range(50), time=range(50)),
-5, 50),
# Using a very large value as maximum time
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
0, 500),
# Get a middle range of the asymmetry
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(50, 70), time_zero=8, bin_size=1,
uncertainty=range(50, 70), time=range(50, 70)),
50, 70),
# Use bounds above end of asymmetry
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=[], time_zero=8, bin_size=1,
uncertainty=[], time=[]),
500, 700),
# Use bounds below beginning of asymmetry
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=[], time_zero=8, bin_size=1,
uncertainty=[], time=[]),
-10, -1),
# Use None as an upper bound (end is the boundary)
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(5, 100), time_zero=8, bin_size=1,
uncertainty=range(5, 100), time=range(5, 100)),
5, None),
# Use None as a lower bound (beginning is boundary)
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=range(70), time_zero=8, bin_size=1,
uncertainty=range(70), time=range(70)),
None, 70),
# Use None as upper boundary when lower is above end of asymmetry
(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
objects.Asymmetry(input_array=[], time_zero=8, bin_size=1,
uncertainty=[], time=[]),
500, None)
])
def test_cut(self, asymmetry, expected_cut_asymmetry, min_time, max_time):
given_cut_asymmetry = asymmetry.cut(min_time, max_time)
assert np.allclose(given_cut_asymmetry, expected_cut_asymmetry, 0.005)
assert np.allclose(given_cut_asymmetry.uncertainty, expected_cut_asymmetry.uncertainty, 0.005)
assert np.allclose(given_cut_asymmetry.time, expected_cut_asymmetry.time, 0.005)
if given_cut_asymmetry.calculated is not None or expected_cut_asymmetry.calculated is not None:
assert np.allclose(given_cut_asymmetry.calculated, expected_cut_asymmetry.calculated, 0.005)
@pytest.mark.parametrize("asymmetry, min_time, max_time",
# Provide a min time and max time which create an invalid range
[(objects.Asymmetry(input_array=range(100), time_zero=8, bin_size=1,
uncertainty=range(100), time=range(100)),
55, 50)
])
def test_cut_raise_exception(self, asymmetry, min_time, max_time):
with pytest.raises(ValueError):
asymmetry.cut(min_time, max_time)
class TestUncertainties:
@pytest.mark.parametrize("input_array, bin_size",
[([1, 2, 3, 4, 5, 6], 0.2)])
def test_first_constructor_combination(self, input_array, bin_size):
uncertainty = objects.Uncertainty(input_array, bin_size)
assert uncertainty.bin_size == 0.2
@pytest.mark.parametrize("hist_one, hist_two",
[(objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
objects.Histogram(range(27648),
980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2))])
def test_second_constructor_combination(self, hist_one, hist_two):
uncertainty = objects.Uncertainty(histogram_one=hist_one, histogram_two=hist_two)
assert uncertainty.bin_size == 0.2
@pytest.mark.parametrize("uncertainty",
[(objects.Uncertainty([1, 2, 3, 4, 5, 6, 7], 0.2))])
def test_pickling(self, uncertainty):
uncertainty_unpickled = pickle.loads(pickle.dumps(uncertainty))
assert uncertainty_unpickled == uncertainty
@pytest.mark.parametrize("uncertainty",
[(objects.Uncertainty([1, 2, 3, 4, 5, 6, 7], 0.2))])
def test_persistent_object(self, uncertainty: objects.Uncertainty):
uncertainty_minimized = uncertainty.get_persistent_data()
uncertainty_maximized = uncertainty.build_from_persistent_data(uncertainty_minimized)
assert uncertainty_maximized == uncertainty
@pytest.mark.parametrize("uncertainty",
[(objects.Uncertainty([1, 2, 3, 4, 5, 6, 7], 0.2))])
def test_persistent_with_pickling(self, uncertainty: objects.Uncertainty):
uncertainty_minimized = uncertainty.get_persistent_data()
uncertainty_minimized_unpickled = pickle.loads(pickle.dumps(uncertainty_minimized))
uncertainty_maximized = uncertainty.build_from_persistent_data(uncertainty_minimized_unpickled)
assert uncertainty_maximized == uncertainty
@pytest.mark.parametrize("uncertainty, expected_uncertainty, packing",
[
(objects.Uncertainty(range(100), 1),
objects.Uncertainty(range(100), 1),
1),
(objects.Uncertainty(range(10), 1),
objects.Uncertainty([0.500, 1.802, 3.201, 4.609, 6.020], 2),
2),
(objects.Uncertainty(range(10), 1),
objects.Uncertainty(range(10), 1),
0.5)
])
def test_binning(self, uncertainty, expected_uncertainty, packing):
given_uncertainty = uncertainty.bin(packing)
assert np.allclose(given_uncertainty, expected_uncertainty, 0.005)
assert given_uncertainty.bin_size == expected_uncertainty.bin_size
@pytest.mark.parametrize("uncertainty, packing",
[
(objects.Uncertainty(range(1), 1), 2)
])
def test_binning_raise_exception(self, uncertainty, packing):
with pytest.raises(ValueError):
uncertainty.bin(packing)
class TestTimes:
@pytest.mark.parametrize("input_array, bin_size, time_zero",
[([1, 2, 3, 4, 5, 6], 0.2, 1.0)])
def test_first_constructor_combination(self, input_array, bin_size, time_zero):
time = objects.Time(input_array, bin_size_ns=bin_size, time_zero_bin=time_zero)
assert time.bin_size == 0.2
assert time.time_zero == 1.0
@pytest.mark.parametrize("length, bin_size, time_zero_ns",
[(1000, 1, 10)])
def test_second_constructor_combination(self, length, bin_size, time_zero_ns):
time = objects.Time(length=length, bin_size_ns=bin_size, time_zero_ns=time_zero_ns)
assert len(time) == length
assert close_enough(time[1] - time[0], bin_size / 1000, 0.001)
assert time.time_zero == 0
@pytest.mark.parametrize("length, bin_size, time_zero_bin",
[(1000, 1, 10)])
def test_third_constructor_combination(self, length, bin_size, time_zero_bin):
time = objects.Time(length=length, bin_size_ns=bin_size, time_zero_bin=time_zero_bin)
assert len(time) == length
assert close_enough(time[1] - time[0], bin_size / 1000, 0.001)
assert time.time_zero == time_zero_bin
assert close_enough(time[0], time_zero_bin * bin_size / 1000, 0.001)
@pytest.mark.parametrize("time",
[(objects.Time([1, 2, 3, 4, 5, 6, 7], 0.2))])
def test_pickling(self, time):
time_unpickled = pickle.loads(pickle.dumps(time))
assert time_unpickled == time
@pytest.mark.parametrize("time",
[(objects.Time([1, 2, 3, 4, 5, 6, 7], 0.2))])
def test_persistent_object(self, time: objects.Time):
time_minimized = time.get_persistent_data()
time_maximized = time.build_from_persistent_data(time_minimized)
assert time_maximized == time
@pytest.mark.parametrize("time",
[(objects.Time([1, 2, 3, 4, 5, 6, 7], 0.2))])
def test_persistent_with_pickling(self, time: objects.Time):
time_minimized = time.get_persistent_data()
time_minimized_unpickled = pickle.loads(pickle.dumps(time_minimized))
time_maximized = time.build_from_persistent_data(time_minimized_unpickled)
assert time_maximized == time
@pytest.mark.parametrize("time, expected_time, packing",
[
(objects.Time(range(100), 1),
objects.Time(range(100), 1),
1),
(objects.Time(range(10), 1),
objects.Time([0.001, 0.003, 0.005, 0.007, 0.009], 2),
2),
(objects.Time(range(10), 1),
objects.Time(range(10), 1),
0.5)
])
def test_binning(self, time, expected_time, packing):
given_time = time.bin(packing)
assert np.allclose(given_time, given_time, 0.005)
assert given_time.bin_size == given_time.bin_size
assert given_time.time_zero == given_time.time_zero
class TestFits: # Just the object
@pytest.mark.parametrize("fit",
[(objects.Fit({}, "x", "a title", "a run id", None, None))])
def test_pickling(self, fit):
fit_unpickled = pickle.loads(pickle.dumps(fit))
assert fit_unpickled == fit
@pytest.mark.parametrize("fit",
[(objects.Fit({}, "x", "a title", "a run id", None,
objects.Asymmetry(input_array=range(27648),
time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))))])
def test_persistent_object(self, fit: objects.Fit):
fit_minimized = fit.get_persistent_data()
fit_maximized = fit.build_from_persistent_data(fit_minimized)
assert fit_maximized == fit
assert fit_maximized.asymmetry == fit.asymmetry
@pytest.mark.parametrize("fit",
[(objects.Fit({}, "x", "a title", "a run id", None,
objects.Asymmetry(input_array=range(27648),
time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))))])
def test_persistent_with_pickling(self, fit: objects.Fit):
fit_minimized = fit.get_persistent_data()
fit_minimized_unpickled = pickle.loads(pickle.dumps(fit_minimized))
fit_maximized = fit.build_from_persistent_data(fit_minimized_unpickled)
assert fit_maximized == fit
assert fit_maximized.asymmetry == fit.asymmetry
class TestFitDatasets:
@pytest.mark.parametrize("dataset",
[(objects.FitDataset())])
def test_pickling(self, dataset):
dataset_unpickled = pickle.loads(pickle.dumps(dataset))
assert dataset_unpickled == dataset
@pytest.mark.parametrize("dataset",
[(objects.FitDataset(fits={
"fit1": objects.Fit({}, "x", "a title", "a run id", None,
objects.Asymmetry(input_array=range(27648),
time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648)))
}))])
def test_persistent_object(self, dataset: objects.FitDataset):
dataset_minimized = dataset.get_persistent_data()
dataset_maximized = dataset.build_from_persistent_data(dataset_minimized)
assert dataset_maximized == dataset
@pytest.mark.parametrize("dataset",
[(objects.FitDataset(fits={
"fit1": objects.Fit({}, "x", "a title", "a run id", None,
objects.Asymmetry(input_array=range(27648),
time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648)))
}))])
def test_persistent_with_pickling(self, dataset: objects.FitDataset):
dataset_minimized = dataset.get_persistent_data()
dataset_minimized_unpickled = pickle.loads(pickle.dumps(dataset_minimized))
dataset_maximized = dataset.build_from_persistent_data(dataset_minimized_unpickled)
assert dataset_maximized == dataset
class TestRunDatasets:
def test_pickling(self):
dataset = objects.RunDataset()
dataset.histograms = {
"h1": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
"h2": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2)
}
dataset.asymmetries = {
"a1": objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))
}
dataset.meta = {
"m1": "someting",
"m2": 2
}
dataset.histograms_used = ["h1", "h2"]
dataset_unpickled = pickle.loads(pickle.dumps(dataset))
assert dataset_unpickled.equals(dataset)
def test_persistent_object(self):
dataset = objects.RunDataset()
dataset.histograms = {
"h1": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
"h2": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2)
}
dataset.asymmetries = {
"a1": objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))
}
dataset.meta = {
"m1": "someting",
"m2": 2
}
dataset.histograms_used = ["h1", "h2"]
dataset_minimized = dataset.get_persistent_data()
dataset_maximized = dataset.build_from_persistent_data(dataset_minimized)
assert dataset_maximized.equals(dataset)
def test_persistent_with_pickling(self):
dataset = objects.RunDataset()
dataset.histograms = {
"h1": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
"h2": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2)
}
dataset.asymmetries = {
"a1": objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))
}
dataset.meta = {
"m1": "someting",
"m2": 2
}
dataset.histograms_used = ["h1", "h2"]
dataset_minimized = dataset.get_persistent_data()
dataset_minimized_unpickled = pickle.loads(pickle.dumps(dataset_minimized))
dataset_maximized = dataset.build_from_persistent_data(dataset_minimized_unpickled)
assert dataset_maximized.equals(dataset)
class TestFileDatasets:
def test_pickling(self):
dataset = objects.RunDataset()
dataset.histograms = {
"h1": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
"h2": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2)
}
dataset.asymmetries = {
"a1": objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))
}
dataset.meta = {
"m1": "someting",
"m2": 2
}
dataset.histograms_used = ["h1", "h2"]
from app.model import files
file_dataset = objects.FileDataset(files.file(resources.resource_path(r"test/examples/histogram_data_1.dat")))
file_dataset.dataset = dataset
file_dataset_unpickled = pickle.loads(pickle.dumps(file_dataset))
assert file_dataset_unpickled.equals(file_dataset)
def test_persistent_object(self):
dataset = objects.RunDataset()
dataset.histograms = {
"h1": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
"h2": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2)
}
dataset.asymmetries = {
"a1": objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))
}
dataset.meta = {
"m1": "someting",
"m2": 2
}
dataset.histograms_used = ["h1", "h2"]
from app.model import files
file_dataset = objects.FileDataset(files.file(resources.resource_path(r"test/examples/histogram_data_1.dat")))
file_dataset.dataset = dataset
file_dataset_minimized = file_dataset.get_persistent_data()
file_dataset_maximized = file_dataset.build_from_persistent_data(file_dataset_minimized)
file_dataset_maximized.dataset = dataset # cheating a little
assert file_dataset_maximized.equals(file_dataset)
def test_persistent_with_pickling(self):
dataset = objects.RunDataset()
dataset.histograms = {
"h1": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Front", "RANDOM_ID", 0.2),
"h2": objects.Histogram(range(27648), 980, 1030, 27648, 70, 900, "Back", "RANDOM_ID", 0.2)
}
dataset.asymmetries = {
"a1": objects.Asymmetry(input_array=range(27648), time_zero=980, bin_size=0.2,
uncertainty=range(27648), time=range(27648))
}
dataset.meta = {
"m1": "someting",
"m2": 2
}
dataset.histograms_used = ["h1", "h2"]
from app.model import files
file_dataset = objects.FileDataset(files.file(resources.resource_path(r"test/examples/histogram_data_1.dat")))
file_dataset.dataset = dataset
file_dataset_minimized = file_dataset.get_persistent_data()
file_dataset_minimized_unpickled = pickle.loads(pickle.dumps(file_dataset_minimized))
file_dataset_maximized = file_dataset.build_from_persistent_data(file_dataset_minimized_unpickled)
file_dataset_maximized.dataset = dataset # cheating a little
assert file_dataset_maximized.equals(file_dataset)
| 61.511494
| 120
| 0.507222
| 5,458
| 53,515
| 4.784353
| 0.061378
| 0.03458
| 0.064336
| 0.079654
| 0.821353
| 0.782101
| 0.735726
| 0.702294
| 0.663271
| 0.602803
| 0
| 0.096774
| 0.39118
| 53,515
| 869
| 121
| 61.582278
| 0.704705
| 0.037765
| 0
| 0.551105
| 0
| 0
| 0.041263
| 0.005773
| 0
| 0
| 0
| 0
| 0.10221
| 1
| 0.070442
| false
| 0
| 0.01105
| 0.001381
| 0.093923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
465c131e463ea3edddcf3f69980a4b0234f2cc43
| 337
|
py
|
Python
|
n_ary_tree/tests/test_postorder_traversal_iterative.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
n_ary_tree/tests/test_postorder_traversal_iterative.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
n_ary_tree/tests/test_postorder_traversal_iterative.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | 3
|
2020-10-07T20:24:45.000Z
|
2020-12-16T04:53:19.000Z
|
from test_helpers.test_helpers import get_n_nary_tree
from n_ary_tree.postorder_traversal_iterative import postorder_traversal_iterative
def test_postorder_traversal_iterative():
assert postorder_traversal_iterative(get_n_nary_tree()) == [7, 5, 6, 2, 8, 3, 9, 10, 12, 11, 4, 1]
assert postorder_traversal_iterative(None) == []
| 42.125
| 102
| 0.795252
| 51
| 337
| 4.843137
| 0.529412
| 0.364372
| 0.546559
| 0.097166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050336
| 0.115727
| 337
| 7
| 103
| 48.142857
| 0.778523
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
46621042750bde6bf75e7f0594243e32219c7db5
| 43
|
py
|
Python
|
createDb.py
|
VidhiRambhia/synergy
|
aabd3f243e3dc3bbfd2f0c350bc0da066f9fbc30
|
[
"MIT"
] | null | null | null |
createDb.py
|
VidhiRambhia/synergy
|
aabd3f243e3dc3bbfd2f0c350bc0da066f9fbc30
|
[
"MIT"
] | null | null | null |
createDb.py
|
VidhiRambhia/synergy
|
aabd3f243e3dc3bbfd2f0c350bc0da066f9fbc30
|
[
"MIT"
] | null | null | null |
from synergyMain import db
db.create_all()
| 14.333333
| 26
| 0.813953
| 7
| 43
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 43
| 2
| 27
| 21.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
466797ff99791d390c96fc60f5cecb0b2c6695c7
| 210
|
py
|
Python
|
descarteslabs/workflows/types/_debugging/_debugging.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | null | null | null |
descarteslabs/workflows/types/_debugging/_debugging.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | null | null | null |
descarteslabs/workflows/types/_debugging/_debugging.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | null | null | null |
from ... import env
from ..core import typecheck_promote
from ..primitives import Int
@typecheck_promote(Int)
def _sleep(secs):
return type(secs)._from_apply("wf.debugging.sleep", secs, token=env._token)
| 23.333333
| 79
| 0.757143
| 30
| 210
| 5.1
| 0.566667
| 0.20915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 210
| 8
| 80
| 26.25
| 0.827027
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
46af72f5e1c2dfb111f4f18c4b2e7aec36b0cfd0
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/installation/base_installer.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/installation/base_installer.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/installation/base_installer.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/a7/6f/f9/8652a6bcee17168c75df0aecdbcdb1f0affc61531dd3c73b7dd167ac96
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 96
| 1
| 96
| 96
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
312f1793e6e512724d37810aa191b3e5fb94af8b
| 52
|
py
|
Python
|
protocols/vitrimers/generate/lib/equilibrate/__init__.py
|
debeshmandal/brownian
|
bc5b2e00a04d11319c85e749f9c056b75b450ff7
|
[
"MIT"
] | 1
|
2021-08-18T05:11:28.000Z
|
2021-08-18T05:11:28.000Z
|
protocols/vitrimers/generate/lib/equilibrate/__init__.py
|
debeshmandal/brownian
|
bc5b2e00a04d11319c85e749f9c056b75b450ff7
|
[
"MIT"
] | null | null | null |
protocols/vitrimers/generate/lib/equilibrate/__init__.py
|
debeshmandal/brownian
|
bc5b2e00a04d11319c85e749f9c056b75b450ff7
|
[
"MIT"
] | null | null | null |
from ._equilibrate import main as run_equilibration
| 26
| 51
| 0.865385
| 7
| 52
| 6.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 52
| 1
| 52
| 52
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
31637ad80541c6cea7bcb56a48315af69ad2dc5e
| 30,446
|
py
|
Python
|
arxiv/canonical/classic/tests/test_backfill.py
|
arXiv/arxiv-canonical
|
a758ed88a568f23a834288aed4dcf7039c1340cf
|
[
"MIT"
] | 5
|
2019-05-26T22:52:54.000Z
|
2021-11-05T12:27:11.000Z
|
arxiv/canonical/classic/tests/test_backfill.py
|
arXiv/arxiv-canonical
|
a758ed88a568f23a834288aed4dcf7039c1340cf
|
[
"MIT"
] | 31
|
2019-06-24T13:51:25.000Z
|
2021-11-12T22:27:10.000Z
|
arxiv/canonical/classic/tests/test_backfill.py
|
arXiv/arxiv-canonical
|
a758ed88a568f23a834288aed4dcf7039c1340cf
|
[
"MIT"
] | 4
|
2019-01-10T22:01:54.000Z
|
2021-11-05T12:26:58.000Z
|
"""Tests for :mod:`arxiv.canonical.classic.backfill`."""
import io
import json
import os
import tempfile
import cProfile as profile
from datetime import date, datetime
from pprint import pprint
from unittest import TestCase, mock
from pytz import timezone
from ...domain import ContentType, CanonicalFile, Category, EventType, \
Identifier, License, URI, VersionedIdentifier
from ...log import Log
from ...register import IRegisterAPI, RegisterAPI
from ...services import InMemoryStorage, CanonicalFilesystem, Filesystem, \
RemoteSource
from .. import backfill, abs, daily
ET = timezone('US/Eastern')
class TestBackfillWithData(TestCase):
"""
This runs backfill on a subset of identifiers using daily.log.
To run this test, set the environment variable DAILY_PATH to the
full path to daily.log.
"""
__test__ = bool(os.environ.get('DAILY_PATH', None) is not None)
def setUp(self):
self.state_path = tempfile.mkdtemp()
self.record_path = tempfile.mkdtemp()
# self.state_path = '/var/folders/l7/5ygyvtbs29340t2s2nsq4lh00000gp/T/tmpmrktp19_'
# self.record_path = '/var/folders/l7/5ygyvtbs29340t2s2nsq4lh00000gp/T/tmpengvav2y'
self.cache_path = './.cache'
print('state_path ::', self.state_path)
print('record_path ::', self.record_path)
# self.mock_source = mock.MagicMock()
# self.mock_source.can_resolve.return_value = True
# self.mock_source.load = \
# lambda *a, **k: io.BytesIO(b'foocontent')
self.abs_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data'
)
self.ps_cache_path = os.path.join(self.abs_path, 'cache')
self.storage = CanonicalFilesystem(self.record_path)
self.classic = Filesystem(self.abs_path)
self.remote = RemoteSource('arxiv.org')
self.api = RegisterAPI(self.storage,
[self.storage, self.classic, self.remote])
self.daily_path = os.environ.get('DAILY_PATH')
self.identifiers = [
Identifier('adap-org/9509003'),
Identifier('0704.0001'),
Identifier('0801.1021'),
Identifier('0802.0193'),
Identifier('0808.4142'),
Identifier('0905.2326'),
Identifier('0906.2112'),
Identifier('0906.3336'),
Identifier('0906.3421'),
Identifier('0906.5132'),
Identifier('0906.5504'),
Identifier('1210.8438'),
Identifier('1605.09669'),
Identifier('1607.08199'),
Identifier('cond-mat/9805021'),
Identifier('funct-an/9301001'),
Identifier('hep-th/9709111'),
Identifier('hep-th/9901001'),
Identifier('math/0202001'),
Identifier('math-ph/0702031'),
Identifier('physics/9707012')
]
def test_backfill_with_content(self):
try:
for e in backfill.backfill(self.api,
self.daily_path,
self.abs_path,
self.ps_cache_path,
self.state_path,
limit_to=set(self.identifiers),
cache_path=self.cache_path,
until=date(2000, 1, 1)):
print(e.identifier, e.event_type, e.event_date)
finally:
print('state_path ::', self.state_path)
print('record_path ::', self.record_path)
events, N = self.api.load_events(1997)
events = list(events)
self.assertEqual(
len([e for e in events if e.event_type == EventType.NEW]),
2, 'There are two NEW events in 1997'
)
# class TestBackfillRecord(TestCase):
# def setUp(self):
# """The classic record has two e-prints."""
# # One of them was first announced prior to the daily record.
# self.ident = Identifier('1902.00123')
# # The other one was announced after the daily record began.
# self.ident2 = Identifier('1902.00125')
# self.state_path = tempfile.mkdtemp()
# self.events = [
# # The first event we have in the daily record for 1902.00123.
# daily.EventData(
# arxiv_id=self.ident,
# event_date=date(2019, 2, 9),
# event_type=EventType.CROSSLIST,
# version=-1, # Who knows what version this is?
# categories=[Category('cs.WT')],
# ),
# # Here is where 1902.00125 is first announced.
# daily.EventData(
# arxiv_id=self.ident2,
# event_date=date(2019, 2, 9),
# event_type=EventType.NEW,
# version=1,
# categories=[
# Category('cs.DL'),
# Category('cs.IR'),
# ]
# ),
# # Here is where the second version of 1902.00123 is announced.
# daily.EventData(
# arxiv_id=self.ident,
# event_date=date(2019, 2, 10),
# event_type=EventType.REPLACED,
# version=-1, # Who knows what version this is?
# categories=[
# Category('cs.DL'),
# Category('cs.IR'),
# Category('cs.WT'),
# Category('cs.FO')
# ]
# )
# ]
# # We have abs records for everything...
# self.abs = [
# # The first version of 1902.00123 (pre-daily).
# abs.AbsData(
# identifier=VersionedIdentifier.from_parts(self.ident, 1),
# submitter=None,
# submitted_date=date(2019, 2, 1),
# announced_month='2019-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='foo title before daily.log existed',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.NEW,
# secondary_classification=[
# Category('cs.IR'),
# Category('cs.WT'), # <- This was added by a cross event!
# ],
# ),
# # The second version of 1902.00123, which was noted in daily.log.
# abs.AbsData(
# identifier=VersionedIdentifier.from_parts(self.ident, 2),
# submitter=None,
# submitted_date=date(2019, 2, 9),
# announced_month='2019-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='fooooo title after daily.log exists',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.REPLACED,
# secondary_classification=[
# Category('cs.IR'),
# Category('cs.WT'),
# Category('cs.FO')
# ],
# ),
# # The first version of 1902.00125, which was noted in daily.log.
# abs.AbsData(
# identifier=VersionedIdentifier.from_parts(self.ident2, 1),
# submitter=None,
# submitted_date=date(2019, 2, 9),
# announced_month='2019-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='another very cool title',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.REPLACED,
# secondary_classification=[
# Category('cs.IR'),
# ],
# )
# ]
# def _get_abs(path, identifier, *args, **kwargs):
# for a in self.abs:
# if a.identifier == identifier:
# return a
# raise RuntimeError(f'No such abs: {identifier}')
# self._get_abs = _get_abs
# @mock.patch(f'{backfill.__name__}.content', mock.MagicMock())
# @mock.patch(f'{backfill.__name__}.daily')
# @mock.patch(f'{backfill.__name__}.abs')
# def test_backfill(self, mock_abs, mock_daily):
# register = mock.MagicMock(spec=IRegisterAPI)
# added_events = []
# register.add_events.side_effect = added_events.append
# mock_daily.parse.side_effect = [
# self.events,
# self.events[0:2],
# self.events,
# ]
# # This call will get events for a particular identifier during
# # parsing of pre-daily announcements. So we just return the events
# # for 1902.00123.
# mock_daily.scan.return_value = [self.events[0], self.events[2]]
# # Handle a call to list all of the identifiers prior to the first one
# # in daily.log.
# mock_abs.list_all.return_value = \
# list(set([a.identifier.arxiv_id for a in self.abs[:2]]))
# mock_abs.iter_all.return_value = \
# list(set([a.identifier.arxiv_id for a in self.abs[:2]]))
# # Return an AbsData based on the requested identifier.
# mock_abs.get_path.side_effect = lambda b, i: i # Pass ID through.
# mock_abs.parse.side_effect = self._get_abs # Get AbsData by ID.
# # This is called when parsing the pre-daily records, and gets all of
# # the AbsData of the e-print that was first announced prior to daily.
# mock_abs.parse_versions.return_value = self.abs[0:2]
# list(backfill.backfill(register, '/daily', '/abs', '/cache',
# self.state_path))
# # We expect an ordered series of events that represents both what is
# # directly known from daily.log and what is inferred from the presence
# # of abs files and replacement events in daily.log.
# expected = [
# (EventType.NEW, VersionedIdentifier('1902.00123v1')),
# (EventType.CROSSLIST, VersionedIdentifier('1902.00123v1')),
# (EventType.NEW, VersionedIdentifier('1902.00125v1')),
# (EventType.REPLACED, VersionedIdentifier('1902.00123v2')),
# ]
# for (expected_type, expected_id), event in zip(expected, added_events):
# self.assertEqual(expected_type, event.event_type)
# self.assertEqual(expected_id, event.identifier)
# with open(os.path.join(self.state_path, 'first.json')) as f:
# first = json.load(f)
# self.assertEqual(len(first), 2, 'Two entries in first announced index')
# self.assertIn(self.ident, first)
# self.assertIn(self.ident2, first)
# with open(os.path.join(self.state_path, 'current.json')) as f:
# current = json.load(f)
# self.assertEqual(len(current), 2,
# 'Two entries in current version index')
# self.assertIn(self.ident, current)
# self.assertEqual(current[self.ident], 2)
# self.assertIn(self.ident2, current)
# self.assertEqual(current[self.ident2], 1)
# log = Log(self.state_path)
# log_entries = list(log.read_all())
# self.assertEqual(len(log_entries), len(added_events),
# 'There is a log entry for each event')
# for entry in log_entries:
# self.assertEqual(entry.state, 'SUCCESS', 'All entries are SUCCESS')
# for event, entry in zip(added_events, log_entries):
# self.assertEqual(event.event_id, entry.event_id,
# 'Log entries are in the same order as events')
# @mock.patch(f'{backfill.__name__}.content', mock.MagicMock())
# @mock.patch(f'{backfill.__name__}.daily')
# @mock.patch(f'{backfill.__name__}.abs')
# def test_backfill_with_errors(self, mock_abs, mock_daily):
# register = mock.MagicMock(spec=IRegisterAPI)
# added_events = []
# register.add_events.side_effect = added_events.append
# def _parse(path, for_date=None, **kwargs):
# if for_date is not None:
# return self.events[0:2]
# return self.events
# mock_daily.parse.side_effect = _parse
# # This call will get events for a particular identifier during
# # parsing of pre-daily announcements. So we just return the events
# # for 1902.00123.
# mock_daily.scan.return_value = [self.events[0], self.events[2]]
# # Handle a call to list all of the identifiers prior to the first one
# # in daily.log.
# mock_abs.list_all.return_value = \
# list(set([a.identifier.arxiv_id for a in self.abs[:2]]))
# mock_abs.iter_all.return_value = \
# list(set([a.identifier.arxiv_id for a in self.abs[:2]]))
# # Return an AbsData based on the requested identifier. But raise a
# # RuntimeError when handling one of the records!
# raise_an_error = [True]
# def _get_abs(dpath, identifier, *args, **kwargs):
# if identifier == '1902.00125v1' and raise_an_error:
# raise_an_error.pop()
# raise RuntimeError('')
# for a in self.abs:
# if a.identifier == identifier:
# return a
# raise RuntimeError(f'No such abs: {identifier}')
# mock_abs.parse.side_effect = _get_abs # Get AbsData by ID.
# # This is called when parsing the pre-daily records, and gets all of
# # the AbsData of the e-print that was first announced prior to daily.
# mock_abs.parse_versions.return_value = self.abs[0:2]
# # We gave generated a RuntimeError intentionally...
# with self.assertRaises(RuntimeError):
# list(backfill.backfill(register, '/fo', '/ba', '/bz',
# self.state_path))
# # ...and call backfill again to resume.
# list(backfill.backfill(register, '/fo', '/ba', '/bz', self.state_path))
# # We expect an ordered series of events that represents both what is
# # directly known from daily.log and what is inferred from the presence
# # of abs files and replacement events in daily.log.
# expected = [
# (EventType.NEW, VersionedIdentifier('1902.00123v1')),
# (EventType.CROSSLIST, VersionedIdentifier('1902.00123v1')),
# (EventType.NEW, VersionedIdentifier('1902.00125v1')),
# (EventType.REPLACED, VersionedIdentifier('1902.00123v2')),
# ]
# for (expected_type, expected_id), event in zip(expected, added_events):
# self.assertEqual(expected_type, event.event_type)
# self.assertEqual(expected_id, event.identifier)
# with open(os.path.join(self.state_path, 'first.json')) as f:
# first = json.load(f)
# self.assertEqual(len(first), 2, 'Two entries in first announced index')
# self.assertIn(self.ident, first)
# self.assertIn(self.ident2, first)
# with open(os.path.join(self.state_path, 'current.json')) as f:
# current = json.load(f)
# self.assertEqual(len(current), 2,
# 'Two entries in current version index')
# self.assertIn(self.ident, current)
# self.assertEqual(current[self.ident], 2)
# self.assertIn(self.ident2, current)
# self.assertEqual(current[self.ident2], 1)
# log = Log(self.state_path)
# log_entries = list(log.read_all())
# self.assertEqual(len(log_entries) - 1, len(added_events),
# 'There is a log entry for each event, plus a'
# 'FAILED entry')
# success_entries = [e for e in log_entries if e.state == 'SUCCESS']
# self.assertEqual(len(success_entries), len(added_events),
# 'There is one SUCCESS entry per event')
# failed_entries = [e for e in log_entries if e.state == 'FAILED']
# self.assertEqual(len(failed_entries), 1, 'There is one FAILED entry')
# for event, entry in zip(added_events, success_entries):
# self.assertEqual(event.event_id, entry.event_id,
# 'Log entries are in the same order as events')
# class TestLoadPredailyEvents(TestCase):
# """Load events from before there were events!"""
# @mock.patch(f'{backfill.__name__}.content', mock.MagicMock())
# @mock.patch(f'{backfill.__name__}.daily')
# @mock.patch(f'{backfill.__name__}.abs')
# def test_load_new_before_daily(self, mock_abs, mock_daily):
# """The first version of an e-print was announced prior to daily.log."""
# ident = Identifier('1902.00123')
# mock_abs.parse_versions.return_value = [
# abs.AbsData(
# identifier=VersionedIdentifier('1902.00123v1'),
# submitter=None,
# submitted_date=date(2019, 2, 1),
# announced_month='2019-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='foo title before daily.log existed',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.NEW,
# secondary_classification=[
# Category('cs.IR'),
# Category('cs.WT'),
# ],
# ),
# abs.AbsData(
# identifier=VersionedIdentifier('1902.00123v2'),
# submitter=None,
# submitted_date=date(2019, 2, 9),
# announced_month='2019-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='fooooo title after daily.log exists',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.REPLACED,
# secondary_classification=[
# Category('cs.IR'),
# Category('cs.WT'),
# Category('cs.FO')
# ],
# )
# ]
# mock_daily.scan.return_value = [
# daily.EventData(
# arxiv_id=ident,
# event_date=date(2019, 2, 10),
# event_type=EventType.REPLACED,
# version=-1, # Who knows what version this is?
# categories=[
# Category('cs.DL'),
# Category('cs.IR'),
# Category('cs.WT'),
# Category('cs.FO')
# ]
# )
# ]
# events = backfill._load_predaily('/foo', '/path', '/ba', ident, {}, {})
# self.assertEqual(len(events), 1, 'Generates one event')
# self.assertEqual(events[0].event_type, EventType.NEW,
# 'Generates a NEW event')
# self.assertEqual(events[0].version.identifier,
# VersionedIdentifier('1902.00123v1'),
# 'With the first version')
# self.assertEqual(events[0].version.metadata.title,
# 'foo title before daily.log existed',
# 'And the correct title')
# self.assertEqual(events[0].version.metadata.secondary_classification,
# [Category('cs.IR'), Category('cs.WT')],
# 'And the correct cross-list categories')
# @mock.patch(f'{backfill.__name__}.content', mock.MagicMock())
# @mock.patch(f'{backfill.__name__}.daily')
# @mock.patch(f'{backfill.__name__}.abs')
# def test_load_new_before_daily_with_cross(self, mock_abs, mock_daily):
# """First version of an e-print in pre-history, with a cross event."""
# ident = Identifier('1902.00123')
# mock_abs.parse_versions.return_value = [
# abs.AbsData(
# identifier=VersionedIdentifier('1902.00123v1'),
# submitter=None,
# submitted_date=date(2019, 2, 1),
# announced_month='2019-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='foo title before daily.log existed',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.NEW,
# secondary_classification=[
# Category('cs.IR'),
# Category('cs.WT'), # <- This was added by a cross event!
# ],
# ),
# abs.AbsData(
# identifier=VersionedIdentifier('1902.00123v2'),
# submitter=None,
# submitted_date=date(2019, 2, 9),
# announced_month='2019-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='fooooo title after daily.log exists',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.REPLACED,
# secondary_classification=[
# Category('cs.IR'),
# Category('cs.WT'),
# Category('cs.FO')
# ],
# )
# ]
# mock_daily.scan.return_value = [
# daily.EventData(
# arxiv_id=ident,
# event_date=date(2019, 2, 9),
# event_type=EventType.CROSSLIST,
# version=-1, # Who knows what version this is?
# categories=[Category('cs.WT')],
# ),
# daily.EventData(
# arxiv_id=ident,
# event_date=date(2019, 2, 10),
# event_type=EventType.REPLACED,
# version=-1, # Who knows what version this is?
# categories=[
# Category('cs.DL'),
# Category('cs.IR'),
# Category('cs.WT'),
# Category('cs.FO')
# ]
# )
# ]
# events = backfill._load_predaily('/foo', '/bar', '/baz', ident, {}, {})
# self.assertEqual(len(events), 1, 'Still generates one event')
# self.assertEqual(events[0].version.metadata.secondary_classification,
# [Category('cs.IR')],
# 'But the cross-list category is not included in the'
# ' NEW event for the first version!')
# class TestDailyEvents(TestCase):
# """Load daily events!"""
# @mock.patch(f'{backfill.__name__}.content', mock.MagicMock())
# @mock.patch(f'{backfill.__name__}.abs')
# def test_load_new(self, mock_abs):
# """Load a NEW event."""
# ident = Identifier('2302.00123')
# event_datum = daily.EventData(
# arxiv_id=ident,
# event_date=date(2019, 2, 10),
# event_type=EventType.NEW,
# version=1,
# categories=[
# Category('cs.DL'),
# Category('cs.IR'),
# ]
# )
# mock_abs.parse.return_value = abs.AbsData(
# identifier=VersionedIdentifier.from_parts(ident, 1),
# submitter=None,
# submitted_date=datetime(2023, 2, 1, 2, 42, 1),
# announced_month='2023-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='foo title',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.NEW,
# secondary_classification=[
# Category('cs.IR'),
# ],
# )
# event = backfill._load_daily_event('', '', event_datum, {}, {})
# self.assertEqual(event.event_type, EventType.NEW, 'Creates NEW event')
# self.assertEqual(event.version.identifier,
# VersionedIdentifier.from_parts(ident, 1),
# 'With the correct identifier')
# self.assertEqual(event.version.metadata.abstract, 'very abstract',
# 'And the correct abstract')
# self.assertEqual(
# event.event_date,
# datetime(2019, 2, 10, 20, 0, 0, 123, tzinfo=backfill.ET),
# 'Event timestamp reflects the announcement day, with microsecond'
# ' based on the incremental part of the identifier to preserve'
# ' order.'
# )
# @mock.patch(f'{backfill.__name__}.content', mock.MagicMock())
# @mock.patch(f'{backfill.__name__}.abs')
# def test_load_cross(self, mock_abs):
# """Load a CROSSLIST event."""
# ident = Identifier('2302.00123')
# event_datum = daily.EventData(
# arxiv_id=ident,
# event_date=date(2019, 2, 12),
# event_type=EventType.CROSSLIST,
# version=-1,
# categories=[Category('cs.WT')]
# )
# mock_abs.parse.return_value = abs.AbsData(
# identifier=VersionedIdentifier.from_parts(ident, 1),
# submitter=None,
# submitted_date=datetime(2023, 2, 1, 2, 42, 1),
# announced_month='2023-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='foo title',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.NEW,
# secondary_classification=[
# Category('cs.IR'), Category('cs.WT')
# ],
# )
# current = {ident: 1} # The current version number.
# event = backfill._load_daily_event('', '', event_datum, current, {})
# self.assertEqual(event.event_type, EventType.CROSSLIST,
# 'Creates CROSSLIST event')
# self.assertEqual(event.version.identifier,
# VersionedIdentifier.from_parts(ident, 1),
# 'With the correct identifier')
# self.assertEqual(event.version.metadata.secondary_classification,
# [Category('cs.IR'), Category('cs.WT')],
# 'And the correct cross-list classification')
# self.assertEqual(
# event.event_date,
# datetime(2019, 2, 12, 20, 0, 0, 123, tzinfo=backfill.ET),
# 'Event timestamp reflects the announcement day, with microsecond'
# ' based on the incremental part of the identifier to preserve'
# ' order.'
# )
# @mock.patch(f'{backfill.__name__}.content', mock.MagicMock())
# @mock.patch(f'{backfill.__name__}.abs')
# def test_load_replacement(self, mock_abs):
# """Load a REPLACED event."""
# ident = Identifier('2302.00123')
# event_datum = daily.EventData(
# arxiv_id=ident,
# event_date=date(2019, 2, 12),
# event_type=EventType.REPLACED,
# version=-1,
# categories=[
# Category('cs.DL'),
# Category('cs.IR'),
# Category('cs.WT')
# ]
# )
# mock_abs.parse.return_value = abs.AbsData(
# identifier=VersionedIdentifier.from_parts(ident, 2),
# submitter=None,
# submitted_date=datetime(2023, 2, 1, 2, 42, 1),
# announced_month='2023-02',
# updated_date=datetime.now(),
# license=License('http://foo.license'),
# primary_classification=Category('cs.DL'),
# title='foo title',
# abstract='very abstract',
# authors='Ima N. Author',
# size_kilobytes=42,
# submission_type=EventType.NEW,
# secondary_classification=[
# Category('cs.IR'), Category('cs.WT')
# ],
# )
# current = {ident: 1} # The current version number.
# first = {ident: date(2019, 2, 11)} # First announcement date.
# event = backfill._load_daily_event('', '', event_datum, current, first)
# self.assertEqual(event.event_type, EventType.REPLACED,
# 'Creates REPLACED event')
# self.assertEqual(event.version.identifier,
# VersionedIdentifier.from_parts(ident, 2),
# 'With the correct identifier')
# self.assertEqual(event.version.metadata.secondary_classification,
# [Category('cs.IR'), Category('cs.WT')],
# 'And the correct cross-list classification')
# self.assertEqual(
# event.event_date,
# datetime(2019, 2, 12, 20, 0, 0, 123, tzinfo=backfill.ET),
# 'Event timestamp reflects the announcement day, with microsecond'
# ' based on the incremental part of the identifier to preserve'
# ' order.'
# )
| 43.308677
| 91
| 0.5359
| 3,134
| 30,446
| 5.072112
| 0.115826
| 0.037745
| 0.036236
| 0.020382
| 0.77271
| 0.753963
| 0.716218
| 0.70458
| 0.692375
| 0.690677
| 0
| 0.040281
| 0.341161
| 30,446
| 702
| 92
| 43.37037
| 0.752181
| 0.851672
| 0
| 0.050633
| 0
| 0
| 0.097429
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 1
| 0.025316
| false
| 0
| 0.177215
| 0
| 0.227848
| 0.075949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
317b90276baf21cb39625099fa202915eb2ac5f6
| 162
|
py
|
Python
|
src/etl/__init__.py
|
shy166/hinreddit
|
e19abfa584b8b0cf801dd6968ac7b42d4b68ee96
|
[
"Apache-2.0"
] | null | null | null |
src/etl/__init__.py
|
shy166/hinreddit
|
e19abfa584b8b0cf801dd6968ac7b42d4b68ee96
|
[
"Apache-2.0"
] | 3
|
2020-05-16T04:29:28.000Z
|
2020-05-16T08:05:16.000Z
|
src/etl/__init__.py
|
syeehyn/hinreddit
|
e19abfa584b8b0cf801dd6968ac7b42d4b68ee96
|
[
"Apache-2.0"
] | null | null | null |
from .etl import fetch_submissions, submissions_detail, comments_detail
from .sentimental import label_posts, label_comments, load_nlp
from .label import labeling
| 54
| 71
| 0.864198
| 22
| 162
| 6.090909
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 162
| 3
| 72
| 54
| 0.911565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
31b50b4c2c0672fc5365dd022d0879ab080f897a
| 44
|
py
|
Python
|
libs/gym_pong-master/gym_pong/envs/__init__.py
|
Adaptive-RL/AdaRL-code
|
493b1ee5a0f98a220c5a1e5ce2e2ce6572d02e9f
|
[
"MIT"
] | 8
|
2022-03-15T04:24:03.000Z
|
2022-03-18T08:33:22.000Z
|
libs/gym_pong-master/gym_pong/envs/__init__.py
|
Adaptive-RL/AdaRL-code
|
493b1ee5a0f98a220c5a1e5ce2e2ce6572d02e9f
|
[
"MIT"
] | 1
|
2022-03-17T07:17:03.000Z
|
2022-03-20T06:23:12.000Z
|
libs/gym_pong-master/gym_pong/envs/__init__.py
|
Adaptive-RL/AdaRL-code
|
493b1ee5a0f98a220c5a1e5ce2e2ce6572d02e9f
|
[
"MIT"
] | 1
|
2022-03-18T06:08:37.000Z
|
2022-03-18T06:08:37.000Z
|
from gym_pong.envs.pong_env import AtariEnv
| 22
| 43
| 0.863636
| 8
| 44
| 4.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
31b7b67029d294c2fed9fbfe9ce3786bc8565d79
| 32,425
|
py
|
Python
|
funding/tests/test_views.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | null | null | null |
funding/tests/test_views.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | null | null | null |
funding/tests/test_views.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from funding.forms import FundingSourceForm
from funding.forms import PublicationForm
from funding.forms import AddFundingSourceForm
from funding.views import FundingSourceAddView
from funding.views import FundingSourceCreateView
from funding.views import PublicationCreateView
from funding.views import AttributionListView
from funding.views import FundingSourceListView
from funding.views import PublicationListView
from funding.views import AttributionUpdateView
from funding.views import AttributionDeleteView
from users.models import CustomUser
from funding.models import FundingSource
from funding.models import FundingSourceMembership
from funding.models import Publication
from institution.models import Institution
class FundingViewTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
'users/fixtures/tests/users.json',
'funding/fixtures/tests/funding_bodies.json',
'funding/fixtures/tests/attributions.json',
'funding/fixtures/tests/funding_source_memberships.json',
'project/fixtures/tests/categories.json',
'project/fixtures/tests/projects.json',
'project/fixtures/tests/memberships.json',
]
def access_view_as_unauthorised_user(self, path):
"""
Ensure an unauthorised user can not access a particular view.
Args:
path (str): Path to view.
"""
institution = Institution.objects.get(name="Example University")
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': 'invalid-remote-user',
}
response = self.client.get(path, **headers)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('register'))
class FundingSourceCreateViewTests(FundingViewTests, TestCase):
def test_fundingsource_view_as_an_authorised_user(self):
"""
Ensure the correct account types can access the funding source create view.
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
user2 = CustomUser.objects.get(email="guest.user@external.ac.uk")
user3 = CustomUser.objects.get(email="test.user@example2.ac.uk")
institution = Institution.objects.get(name="Example University")
accounts = [
{
'email': user.email,
'expected_status_code': 200,
},
{
'email': user2.email,
'expected_status_code': 200,
},
{
'email': user3.email,
'expected_status_code': 200,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
## funding source endpoint with no identifier
response = self.client.get(
reverse('create-funding-source'),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.assertTrue(isinstance(response.context_data.get('form'), FundingSourceForm))
self.assertTrue(isinstance(response.context_data.get('view'), FundingSourceCreateView))
# Check that initial value for identifier form field has not been set
self.assertEqual(response.context_data.get('form').initial, {})
## test funding source endpoint with identifier
test_identifier = 'my-identifier-for-testing-123$'
response = self.client.get(
reverse('create-funding-source-with-identifier', args=[test_identifier]),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.assertTrue(isinstance(response.context_data.get('form'), FundingSourceForm))
self.assertTrue(isinstance(response.context_data.get('view'), FundingSourceCreateView))
self.assertEqual(response.context_data.get('form').initial['identifier'], test_identifier)
def test_publication_create_view_without_popup(self):
"""
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
institution = Institution.objects.get(name="Example University")
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': user.email
}
post_data = {
'title' : 'My publication title',
'url' : f'https://{institution.local_repository_domain}'
}
url_appends = { '?_popup=1' : 200,
'' : 302}
for url_append, status_code in url_appends.items():
# Grab publication count
pub_count = Publication.objects.count()
# Fire post request
response = self.client.post(reverse('create-publication')+url_append, data=post_data, **headers)
# Check that the publication count is incremented with correct data publication
pub = Publication.objects.last()
self.assertEqual(pub.title, post_data['title'])
self.assertEqual(pub.url, post_data['url'])
self.assertEqual(pub.owner, user)
# Check that the expected status code is returned
expected_redirect_url = reverse('list-attributions')
self.assertEqual(response.status_code, status_code)
# check redirect URL only for 302
if status_code == 302:
self.assertEqual(response.url, expected_redirect_url)
def test_view_as_an_unauthorised_user(self):
"""
Ensure unauthorised users can not access the project create view.
"""
# test endpoint with no identifier
self.access_view_as_unauthorised_user(reverse('create-funding-source'))
# test endpoint with identifier
endpoint = reverse('create-funding-source-with-identifier', args=['some-identifier'])
self.access_view_as_unauthorised_user(endpoint)
class FundingSourceAddViewTests(FundingViewTests, TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
'users/fixtures/tests/users.json',
'funding/fixtures/tests/funding_bodies.json',
'funding/fixtures/tests/attributions.json',
]
url_append_str = '?_popup=1'
def test_add_fundingsource_view_as_an_authorised_user(self):
"""
Ensure the correct account types can access the funding source create view.
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
user2 = CustomUser.objects.get(email="guest.user@external.ac.uk")
institution = Institution.objects.get(name="Example University")
accounts = [
{
'email': user.email,
'expected_status_code': 200,
},
{
'email': user2.email,
'expected_status_code': 200,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
# Test get. Response is a form
response = self.client.get(
reverse('add-funding-source'),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.assertTrue(isinstance(response.context_data.get('form'), AddFundingSourceForm))
self.assertTrue(isinstance(response.context_data.get('view'), FundingSourceAddView))
# Test post with new id. Redirects to create form
new_identifier = 'n53c7'
response = self.client.post(
reverse('add-funding-source') + self.url_append_str,
data={
'identifier': new_identifier,
},
**headers
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/en-gb/funding/create-funding-source/" + new_identifier + self.url_append_str)
def test_add_fundingsource_view_as_authorised_with_approval_required(self):
"""
Ensure the correct account types can access the funding source create view.
"""
user = CustomUser.objects.get(email="test.user@example2.ac.uk")
institution = Institution.objects.get(base_domain="example2.ac.uk")
accounts = [
{
'email': user.email,
'expected_status_code': 200,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
# Test get. Response is a form
response = self.client.get(
reverse('add-funding-source') + self.url_append_str,
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.assertTrue(isinstance(response.context_data.get('form'), AddFundingSourceForm))
self.assertTrue(isinstance(response.context_data.get('view'), FundingSourceAddView))
# Test post with new id. Redirects to create form
new_identifier = 'n53c7'
response = self.client.post(
reverse('add-funding-source'),
data={
'identifier': new_identifier
},
**headers
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/en-gb/funding/create-funding-source/" + new_identifier)
# Test post with existing id
existing_identifier = 'scw0001'
response = self.client.post(
reverse('add-funding-source'),
data={
'identifier': existing_identifier,
},
**headers
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/en-gb/funding/list/")
def test_view_as_an_unauthorised_user(self):
"""
Ensure unauthorised users can not access the project create view.
"""
self.access_view_as_unauthorised_user(reverse('add-funding-source') + self.url_append_str)
class FundingSourceAddViewWithPopupTests(FundingSourceAddViewTests, TestCase):
url_append_str = '?_popup=1'
class FundingSourceAddViewWithFundingApprovalTests(FundingSourceAddViewTests, TestCase):
def setUp(self):
# Set funding approval to true
institution = Institution.objects.get(name="Example University")
institution.needs_funding_approval = True
institution.save()
class FundingSourceAddViewWithUserAsMember(FundingSourceAddViewTests, TestCase):
def setUp(self):
# Set funding approval to true
institution = Institution.objects.get(name="Example University")
institution.needs_funding_approval = True
institution.save()
# fetch test user
user = CustomUser.objects.get(email="test.user@example2.ac.uk")
# add user to existing funding source
existing_identifier = 'scw0001'
fundingsource = FundingSource.objects.get(identifier=existing_identifier)
fundingsource_membership = FundingSourceMembership.objects.create(
fundingsource=fundingsource,
user=user,
approved=True)
class AttributionListViewTests(FundingViewTests, TestCase):
view = AttributionListView
view_name = 'list-attributions'
def test_view_as_an_authorised_user(self):
"""
Ensure the correct account types can access the funding source list view.
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
user2 = CustomUser.objects.get(email="guest.user@external.ac.uk")
institution = Institution.objects.get(name="Example University")
accounts = [
{
'email': user.email,
'expected_status_code': 200,
},
{
'email': user2.email,
'expected_status_code': 200,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(self.view_name),
**headers
)
self.assertEqual(response.status_code,
account.get('expected_status_code'))
self.assertTrue(isinstance(response.context_data.get('view'),
self.view))
def test_view_as_an_unauthorised_user(self):
"""
Ensure unauthorised users can not access the attribution list view.
"""
self.access_view_as_unauthorised_user(reverse(self.view_name))
class FundingSourceListViewTests(AttributionListViewTests):
view = FundingSourceListView
view_name = 'list-funding_sources'
class PublicationListViewTests(AttributionListViewTests):
view = PublicationListView
view_name = 'list-publications'
class FundingSourceUpdateViewTests(FundingViewTests, TestCase):
def test_fundingource_view_as_an_authorised_user(self):
"""
Ensure the correct account types can access the funding source list view.
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
user2 = CustomUser.objects.get(email="test.user@example2.ac.uk")
institution = Institution.objects.get(name="Example University")
funding_source = FundingSource.objects.get(identifier="scw0001")
accounts = [
{
'email': user.email,
'expected_status_code': 200,
},
{
'email': user2.email,
'expected_status_code': 302,
},
]
for account in accounts:
funding_source.pi_email = account.get('email')
funding_source.save()
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(
'update-attribution',
args=[funding_source.id]
),
**headers
)
self.assertEqual(response.status_code,
account.get('expected_status_code'))
if response.status_code == 200:
# Allowed to update
self.assertTrue(isinstance(response.context_data.get('form'),
FundingSourceForm))
self.assertTrue(isinstance(response.context_data.get('view'),
AttributionUpdateView))
def test_fundingource_view_as_an_authorised_user(self):
"""
Ensure the correct account types can access the funding source list view.
"""
user = CustomUser.objects.get(email="test.user@example2.ac.uk")
institution = Institution.objects.get(base_domain="example2.ac.uk")
funding_source = FundingSource.objects.get(title="Test funding source 2")
accounts = [
{
'email': user.email,
'expected_status_code': 302,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(
'update-attribution',
args=[funding_source.id]
),
**headers
)
self.assertEqual(response.status_code,
account.get('expected_status_code'))
def test_publication_view_as_an_authorised_user(self):
"""
Ensure the correct account types can access the funding source list view.
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
user2 = CustomUser.objects.get(email="test.user@example2.ac.uk")
attr_user = CustomUser.objects.get(email="attr.user@example.ac.uk")
institution = Institution.objects.get(name="Example University")
publication = Publication.objects.get(title="Test publication")
accounts = [
{
'user': user,
'expected_status_code': 200,
},
{
'user': user2,
'expected_status_code': 200,
},
{
'user': attr_user,
'expected_status_code': 200,
},
]
for account in accounts:
# Attr users should always be able to see details
if account['user'] != attr_user:
publication.created_by = account.get('user')
publication.save()
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('user').email,
}
response = self.client.get(
reverse(
'update-attribution',
args=[publication.id]
),
**headers
)
self.assertEqual(response.status_code,
account.get('expected_status_code'))
self.assertTrue(isinstance(response.context_data.get('form'),
PublicationForm))
self.assertTrue(isinstance(response.context_data.get('view'),
AttributionUpdateView))
def test_view_as_an_unauthorised_user(self):
"""
Ensure unauthorised users can not access the project create view.
"""
user = CustomUser.objects.get(email="guest.user@external.ac.uk")
institution = Institution.objects.get(name="Example University")
funding_source = FundingSource.objects.get(title="Test funding source")
accounts = [
{
'user': user,
'expected_status_code': 302,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('user').email,
}
response = self.client.get(
reverse(
'update-attribution',
args=[funding_source.id]
),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.access_view_as_unauthorised_user(
reverse(
'update-attribution',
args=[funding_source.id]
)
)
class FundingSourceDeleteViewTests(FundingViewTests, TestCase):
def test_view_as_an_authorised_user(self):
"""
Ensure the correct account types can access the delete view.
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
institution = Institution.objects.get(name="Example University")
funding_source = FundingSource.objects.get(title="Test funding source")
accounts = [
{
'email': user.email,
'expected_status_code': 200,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(
'delete-attribution',
args=[funding_source.id]
),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.assertTrue(
isinstance(
response.context_data.get('view'),
AttributionDeleteView
)
)
def test_view_as_an_unauthorised_user(self):
"""
Ensure unauthorised users can not access the delete view.
"""
user = CustomUser.objects.get(email="guest.user@external.ac.uk")
user2 = CustomUser.objects.get(email="test.user@example2.ac.uk")
institution = Institution.objects.get(name="Example University")
funding_source = FundingSource.objects.get(title="Test funding source")
accounts = [
{
'email': user.email,
'expected_status_code': 302,
},
{
'email': user2.email,
'expected_status_code': 302,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(
'delete-attribution',
args=[funding_source.id]
),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.access_view_as_unauthorised_user(
reverse(
'delete-attribution',
args=[funding_source.id]
)
)
def test_view_without_user_approval(self):
"""
Ensure unauthorised users can not access the delete view.
"""
user = CustomUser.objects.get(email="test.user@example2.ac.uk")
institution = Institution.objects.get(name="Example University")
funding_source = FundingSource.objects.get(title="Test funding source")
accounts = [
{
'email': user.email,
'expected_status_code': 302,
},
]
for account in accounts:
funding_source.pi_email = account.get('email')
funding_source.save()
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(
'delete-attribution',
args=[funding_source.id]
),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.access_view_as_unauthorised_user(
reverse(
'delete-attribution',
args=[funding_source.id]
)
)
class FundingsourceDetailViewTest(FundingViewTests, TestCase):
def test_view_as_pending_user(self):
"""
Ensure an unapproved user can not view detail on a funding source.
"""
fundingsource = FundingSource.objects.get(
title='Test funding source'
)
user = CustomUser.objects.get(email="norman.gordon@example.ac.uk")
institution = user.profile.institution
path = reverse('funding_source-detail-view', args=[fundingsource.id])
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': user.email,
}
response = self.client.get(path, **headers)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('list-attributions'))
def test_view_as_owner(self):
"""
Ensure an unapproved user can not view detail on a funding source.
"""
fundingsource = FundingSource.objects.get(
title='Test funding source'
)
user = fundingsource.owner
institution = user.profile.institution
path = reverse('funding_source-detail-view', args=[fundingsource.id])
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': user.email,
}
response = self.client.get(path, **headers)
self.assertEqual(response.status_code, 200)
def test_view_as_other_institution_user(self):
"""
Ensure an unapproved user can not view detail on a funding source.
"""
fundingsource = FundingSource.objects.get(
title='Test funding source'
)
user = CustomUser.objects.get(email='test.user@example2.ac.uk')
institution = user.profile.institution
path = reverse('funding_source-detail-view', args=[fundingsource.id])
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': user.email,
}
response = self.client.get(path, **headers)
self.assertEqual(response.status_code, 200)
def test_view_as_unrelated_user(self):
"""
Ensure an unapproved user can not view detail on a funding source.
"""
fundingsource = FundingSource.objects.get(
title='Test funding source'
)
user = CustomUser.objects.get(email="test.user@example3.ac.uk")
institution = user.profile.institution
path = reverse('funding_source-detail-view', args=[fundingsource.id])
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': user.email,
}
response = self.client.get(path, **headers)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('list-attributions'))
class ListUnapprovedFundingSourcesTest(FundingViewTests, TestCase):
def test_view_as_different_users(self):
url = reverse('list-unapproved-funding_sources')
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
admin_user = CustomUser.objects.get(email="admin.user@example.ac.uk")
accounts = [
{
'email': user.email,
'expected_status_code': 403,
}, {
'email': admin_user.email,
'expected_status_code': 200,
}
]
for account in accounts:
institution = Institution.objects.get(name="Example University")
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account['email'],
}
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, account['expected_status_code'])
# Check that the page at least contains titles for all funding source objects
if response.status_code == 200:
[self.assertTrue(f.title in str(response.content)) for f in FundingSource.objects.all()]
class PublicationDeleteViewTests(FundingViewTests, TestCase):
def test_view_as_an_authorised_user(self):
"""
Ensure the correct account types can access the delete view.
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
institution = Institution.objects.get(name="Example University")
publication = Publication.objects.get(title="Test publication")
accounts = [
{
'email': user.email,
'expected_status_code': 200,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(
'delete-attribution',
args=[publication.id]
),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.assertTrue(
isinstance(
response.context_data.get('view'),
AttributionDeleteView
)
)
def test_view_as_an_unauthorised_user(self):
"""
Ensure unauthorised users can not access the delete view.
"""
user = CustomUser.objects.get(email="guest.user@external.ac.uk")
user2 = CustomUser.objects.get(email="test.user@example2.ac.uk")
institution = Institution.objects.get(name="Example University")
publication = Publication.objects.get(title="Test publication")
accounts = [
{
'email': user.email,
'expected_status_code': 302,
},
{
'email': user2.email,
'expected_status_code': 302,
},
]
for account in accounts:
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(
'delete-attribution',
args=[publication.id]
),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.access_view_as_unauthorised_user(
reverse(
'delete-attribution',
args=[publication.id]
)
)
def test_view_without_user_approval(self):
"""
Ensure unauthorised users can not access the delete view.
"""
user = CustomUser.objects.get(email="test.user@example2.ac.uk")
institution = Institution.objects.get(name="Example University")
publication = Publication.objects.get(title="Test publication")
accounts = [
{
'email': user.email,
'expected_status_code': 302,
},
]
for account in accounts:
publication.pi_email = account.get('email')
publication.save()
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': account.get('email'),
}
response = self.client.get(
reverse(
'delete-attribution',
args=[publication.id]
),
**headers
)
self.assertEqual(response.status_code, account.get('expected_status_code'))
self.access_view_as_unauthorised_user(
reverse(
'delete-attribution',
args=[publication.id]
)
)
class ListFundingSourceMembershipTests(FundingViewTests, TestCase):
def test_access_as_unauthorised_user(self):
"""
Ensure that users not logged in get booted out of this page
"""
self.access_view_as_unauthorised_user(
reverse('list-funding_source_memberships')
)
def test_access_as_authorised_user(self):
"""
Check that logged in users can see this page.
"""
user = CustomUser.objects.get(email="shibboleth.user@example.ac.uk")
institution = Institution.objects.get(name="Example University")
headers = {
'Shib-Identity-Provider': institution.identity_provider,
'REMOTE_USER': user.email,
}
response = self.client.get(
reverse('list-funding_source_memberships'),
**headers
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Test funding source")
| 36.972634
| 122
| 0.579861
| 3,024
| 32,425
| 6.07209
| 0.073413
| 0.039756
| 0.041172
| 0.042207
| 0.798334
| 0.775569
| 0.765167
| 0.744799
| 0.724975
| 0.719421
| 0
| 0.007969
| 0.322745
| 32,425
| 876
| 123
| 37.01484
| 0.828188
| 0.076392
| 0
| 0.619403
| 0
| 0
| 0.166303
| 0.074412
| 0
| 0
| 0
| 0
| 0.08209
| 1
| 0.041791
| false
| 0
| 0.026866
| 0
| 0.10597
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
31b9b785d99fd979c6bedb97c4cf6467ccb03c49
| 14,829
|
py
|
Python
|
TextMagic/__init__.py
|
textmagic/textmagic-rest-python-v2
|
49055e214a6cf0f7545b85aa03e49e6d92bcef13
|
[
"MIT"
] | 2
|
2020-10-21T09:44:33.000Z
|
2021-06-29T20:58:57.000Z
|
TextMagic/__init__.py
|
textmagic/textmagic-rest-python-v2
|
49055e214a6cf0f7545b85aa03e49e6d92bcef13
|
[
"MIT"
] | null | null | null |
TextMagic/__init__.py
|
textmagic/textmagic-rest-python-v2
|
49055e214a6cf0f7545b85aa03e49e6d92bcef13
|
[
"MIT"
] | 1
|
2021-12-02T12:15:56.000Z
|
2021-12-02T12:15:56.000Z
|
# coding: utf-8
# flake8: noqa
"""
TextMagic API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from TextMagic.api.text_magic_api import TextMagicApi
# import ApiClient
from TextMagic.api_client import ApiClient
from TextMagic.configuration import Configuration
# import models into sdk package
from TextMagic.models.assign_contacts_to_list_input_object import AssignContactsToListInputObject
from TextMagic.models.bad_request_response import BadRequestResponse
from TextMagic.models.bad_request_response_errors import BadRequestResponseErrors
from TextMagic.models.block_contact_input_object import BlockContactInputObject
from TextMagic.models.bulk_session import BulkSession
from TextMagic.models.buy_dedicated_number_input_object import BuyDedicatedNumberInputObject
from TextMagic.models.call_price_response import CallPriceResponse
from TextMagic.models.chat import Chat
from TextMagic.models.check_phone_verification_code_tfa_input_object import CheckPhoneVerificationCodeTFAInputObject
from TextMagic.models.clear_and_assign_contacts_to_list_input_object import ClearAndAssignContactsToListInputObject
from TextMagic.models.close_chats_bulk_input_object import CloseChatsBulkInputObject
from TextMagic.models.contact import Contact
from TextMagic.models.contact_custom_field import ContactCustomField
from TextMagic.models.contact_image import ContactImage
from TextMagic.models.contact_note import ContactNote
from TextMagic.models.conversation import Conversation
from TextMagic.models.country import Country
from TextMagic.models.create_contact_input_object import CreateContactInputObject
from TextMagic.models.create_contact_note_input_object import CreateContactNoteInputObject
from TextMagic.models.create_custom_field_input_object import CreateCustomFieldInputObject
from TextMagic.models.create_list_input_object import CreateListInputObject
from TextMagic.models.create_template_input_object import CreateTemplateInputObject
from TextMagic.models.currency import Currency
from TextMagic.models.custom_field_list_item import CustomFieldListItem
from TextMagic.models.delete_chat_messages_bulk_input_object import DeleteChatMessagesBulkInputObject
from TextMagic.models.delete_chats_bulk_input_object import DeleteChatsBulkInputObject
from TextMagic.models.delete_contacs_from_list_object import DeleteContacsFromListObject
from TextMagic.models.delete_contact_notes_bulk_input_object import DeleteContactNotesBulkInputObject
from TextMagic.models.delete_contacts_by_ids_input_object import DeleteContactsByIdsInputObject
from TextMagic.models.delete_inbound_messages_bulk_input_object import DeleteInboundMessagesBulkInputObject
from TextMagic.models.delete_list_contacts_bulk_input_object import DeleteListContactsBulkInputObject
from TextMagic.models.delete_lists_bulk_input_object import DeleteListsBulkInputObject
from TextMagic.models.delete_message_sessions_bulk_input_object import DeleteMessageSessionsBulkInputObject
from TextMagic.models.delete_outbound_messages_bulk_input_object import DeleteOutboundMessagesBulkInputObject
from TextMagic.models.delete_scheduled_messages_bulk_input_object import DeleteScheduledMessagesBulkInputObject
from TextMagic.models.delete_templates_bulk_input_object import DeleteTemplatesBulkInputObject
from TextMagic.models.do_auth_response import DoAuthResponse
from TextMagic.models.do_auth_response_min_versions import DoAuthResponseMinVersions
from TextMagic.models.do_carrier_lookup_response import DoCarrierLookupResponse
from TextMagic.models.do_email_lookup_response import DoEmailLookupResponse
from TextMagic.models.favorite_contact import FavoriteContact
from TextMagic.models.get_all_bulk_sessions_paginated_response import GetAllBulkSessionsPaginatedResponse
from TextMagic.models.get_all_chats_paginated_response import GetAllChatsPaginatedResponse
from TextMagic.models.get_all_inbound_messages_paginated_response import GetAllInboundMessagesPaginatedResponse
from TextMagic.models.get_all_message_sessions_paginated_response import GetAllMessageSessionsPaginatedResponse
from TextMagic.models.get_all_outbound_messages_paginated_response import GetAllOutboundMessagesPaginatedResponse
from TextMagic.models.get_all_scheduled_messages_paginated_response import GetAllScheduledMessagesPaginatedResponse
from TextMagic.models.get_all_templates_paginated_response import GetAllTemplatesPaginatedResponse
from TextMagic.models.get_available_dedicated_numbers_response import GetAvailableDedicatedNumbersResponse
from TextMagic.models.get_available_sender_setting_options_response import GetAvailableSenderSettingOptionsResponse
from TextMagic.models.get_balance_notification_options_response import GetBalanceNotificationOptionsResponse
from TextMagic.models.get_balance_notification_settings_response import GetBalanceNotificationSettingsResponse
from TextMagic.models.get_blocked_contacts_paginated_response import GetBlockedContactsPaginatedResponse
from TextMagic.models.get_callback_settings_response import GetCallbackSettingsResponse
from TextMagic.models.get_calls_prices_response import GetCallsPricesResponse
from TextMagic.models.get_chat_messages_paginated_response import GetChatMessagesPaginatedResponse
from TextMagic.models.get_contact_import_session_progress_response import GetContactImportSessionProgressResponse
from TextMagic.models.get_contact_notes_paginated_response import GetContactNotesPaginatedResponse
from TextMagic.models.get_contacts_autocomplete_response import GetContactsAutocompleteResponse
from TextMagic.models.get_contacts_autocomplete_response_item import GetContactsAutocompleteResponseItem
from TextMagic.models.get_contacts_by_list_id_paginated_response import GetContactsByListIdPaginatedResponse
from TextMagic.models.get_contacts_paginated_response import GetContactsPaginatedResponse
from TextMagic.models.get_countries_response import GetCountriesResponse
from TextMagic.models.get_custom_fields_paginated_response import GetCustomFieldsPaginatedResponse
from TextMagic.models.get_disallowed_rules_response import GetDisallowedRulesResponse
from TextMagic.models.get_favorites_paginated_response import GetFavoritesPaginatedResponse
from TextMagic.models.get_inbound_messages_notification_settings_response import GetInboundMessagesNotificationSettingsResponse
from TextMagic.models.get_invoices_paginated_response import GetInvoicesPaginatedResponse
from TextMagic.models.get_list_contacts_ids_response import GetListContactsIdsResponse
from TextMagic.models.get_lists_of_contact_paginated_response import GetListsOfContactPaginatedResponse
from TextMagic.models.get_lists_paginated_response import GetListsPaginatedResponse
from TextMagic.models.get_message_preview_response import GetMessagePreviewResponse
from TextMagic.models.get_message_price_response import GetMessagePriceResponse
from TextMagic.models.get_message_price_response_countries_item import GetMessagePriceResponseCountriesItem
from TextMagic.models.get_message_prices_response import GetMessagePricesResponse
from TextMagic.models.get_message_session_stat_response import GetMessageSessionStatResponse
from TextMagic.models.get_messages_by_session_id_paginated_response import GetMessagesBySessionIdPaginatedResponse
from TextMagic.models.get_messaging_counters_response import GetMessagingCountersResponse
from TextMagic.models.get_messaging_stat_response import GetMessagingStatResponse
from TextMagic.models.get_outbound_messages_history_paginated_response import GetOutboundMessagesHistoryPaginatedResponse
from TextMagic.models.get_push_tokens_response import GetPushTokensResponse
from TextMagic.models.get_sender_ids_paginated_response import GetSenderIdsPaginatedResponse
from TextMagic.models.get_sender_settings_response import GetSenderSettingsResponse
from TextMagic.models.get_spending_stat_paginated_response import GetSpendingStatPaginatedResponse
from TextMagic.models.get_state_response import GetStateResponse
from TextMagic.models.get_subaccounts_with_tokens_input_object import GetSubaccountsWithTokensInputObject
from TextMagic.models.get_subaccounts_with_tokens_response import GetSubaccountsWithTokensResponse
from TextMagic.models.get_survey_nodes_response import GetSurveyNodesResponse
from TextMagic.models.get_surveys_paginated_response import GetSurveysPaginatedResponse
from TextMagic.models.get_timezones_response import GetTimezonesResponse
from TextMagic.models.get_unread_messages_total_response import GetUnreadMessagesTotalResponse
from TextMagic.models.get_unsubscribers_paginated_response import GetUnsubscribersPaginatedResponse
from TextMagic.models.get_user_dedicated_numbers_paginated_response import GetUserDedicatedNumbersPaginatedResponse
from TextMagic.models.get_versions_response import GetVersionsResponse
from TextMagic.models.invite_subaccount_input_object import InviteSubaccountInputObject
from TextMagic.models.invoice import Invoice
from TextMagic.models.list import List
from TextMagic.models.list_image import ListImage
from TextMagic.models.mark_chats_read_bulk_input_object import MarkChatsReadBulkInputObject
from TextMagic.models.mark_chats_unread_bulk_input_object import MarkChatsUnreadBulkInputObject
from TextMagic.models.message_in import MessageIn
from TextMagic.models.message_out import MessageOut
from TextMagic.models.message_price_item import MessagePriceItem
from TextMagic.models.message_session import MessageSession
from TextMagic.models.message_template import MessageTemplate
from TextMagic.models.messages_ics import MessagesIcs
from TextMagic.models.messages_ics_parameters import MessagesIcsParameters
from TextMagic.models.messages_ics_parameters_recipients import MessagesIcsParametersRecipients
from TextMagic.models.messages_ics_text_parameters import MessagesIcsTextParameters
from TextMagic.models.messaging_stat_item import MessagingStatItem
from TextMagic.models.mute_chat_input_object import MuteChatInputObject
from TextMagic.models.mute_chats_bulk_input_object import MuteChatsBulkInputObject
from TextMagic.models.not_found_response import NotFoundResponse
from TextMagic.models.ping_response import PingResponse
from TextMagic.models.push_token import PushToken
from TextMagic.models.reopen_chats_bulk_input_object import ReopenChatsBulkInputObject
from TextMagic.models.request_new_subaccount_token_input_object import RequestNewSubaccountTokenInputObject
from TextMagic.models.request_sender_id_input_object import RequestSenderIdInputObject
from TextMagic.models.resource_link_response import ResourceLinkResponse
from TextMagic.models.search_chats_by_ids_paginated_response import SearchChatsByIdsPaginatedResponse
from TextMagic.models.search_chats_by_receipent_paginated_response import SearchChatsByReceipentPaginatedResponse
from TextMagic.models.search_chats_paginated_response import SearchChatsPaginatedResponse
from TextMagic.models.search_contacts_paginated_response import SearchContactsPaginatedResponse
from TextMagic.models.search_inbound_messages_paginated_response import SearchInboundMessagesPaginatedResponse
from TextMagic.models.search_lists_paginated_response import SearchListsPaginatedResponse
from TextMagic.models.search_outbound_messages_paginated_response import SearchOutboundMessagesPaginatedResponse
from TextMagic.models.search_scheduled_messages_paginated_response import SearchScheduledMessagesPaginatedResponse
from TextMagic.models.search_templates_paginated_response import SearchTemplatesPaginatedResponse
from TextMagic.models.send_message_input_object import SendMessageInputObject
from TextMagic.models.send_message_response import SendMessageResponse
from TextMagic.models.send_phone_verification_code_response import SendPhoneVerificationCodeResponse
from TextMagic.models.send_phone_verification_code_tfa_input_object import SendPhoneVerificationCodeTFAInputObject
from TextMagic.models.sender_id import SenderId
from TextMagic.models.sender_settings_item import SenderSettingsItem
from TextMagic.models.set_chat_status_input_object import SetChatStatusInputObject
from TextMagic.models.subaccount_with_token import SubaccountWithToken
from TextMagic.models.survey import Survey
from TextMagic.models.survey_node import SurveyNode
from TextMagic.models.survey_recipient import SurveyRecipient
from TextMagic.models.survey_sender_countries import SurveySenderCountries
from TextMagic.models.timezone import Timezone
from TextMagic.models.unauthorized_response import UnauthorizedResponse
from TextMagic.models.unblock_contact_input_object import UnblockContactInputObject
from TextMagic.models.unblock_contacts_bulk_input_object import UnblockContactsBulkInputObject
from TextMagic.models.unmute_chats_bulk_input_object import UnmuteChatsBulkInputObject
from TextMagic.models.unsubscribe_contact_input_object import UnsubscribeContactInputObject
from TextMagic.models.unsubscribed_contact import UnsubscribedContact
from TextMagic.models.update_balance_notification_settings_input_object import UpdateBalanceNotificationSettingsInputObject
from TextMagic.models.update_callback_settings_input_object import UpdateCallbackSettingsInputObject
from TextMagic.models.update_chat_desktop_notification_settings_input_object import UpdateChatDesktopNotificationSettingsInputObject
from TextMagic.models.update_contact_input_object import UpdateContactInputObject
from TextMagic.models.update_contact_note_input_object import UpdateContactNoteInputObject
from TextMagic.models.update_current_user_input_object import UpdateCurrentUserInputObject
from TextMagic.models.update_current_user_response import UpdateCurrentUserResponse
from TextMagic.models.update_custom_field_input_object import UpdateCustomFieldInputObject
from TextMagic.models.update_custom_field_value_input_object import UpdateCustomFieldValueInputObject
from TextMagic.models.update_inbound_messages_notification_settings_input_object import UpdateInboundMessagesNotificationSettingsInputObject
from TextMagic.models.update_list_object import UpdateListObject
from TextMagic.models.update_sender_setting_input_object import UpdateSenderSettingInputObject
from TextMagic.models.update_survey_country_item import UpdateSurveyCountryItem
from TextMagic.models.update_template_input_object import UpdateTemplateInputObject
from TextMagic.models.upload_message_attachment_response import UploadMessageAttachmentResponse
from TextMagic.models.user import User
from TextMagic.models.user_custom_field import UserCustomField
from TextMagic.models.user_image import UserImage
from TextMagic.models.user_statement import UserStatement
from TextMagic.models.users_inbound import UsersInbound
| 77.234375
| 140
| 0.923663
| 1,607
| 14,829
| 8.187306
| 0.21033
| 0.167971
| 0.241164
| 0.088622
| 0.214107
| 0.08201
| 0.043247
| 0
| 0
| 0
| 0
| 0.000426
| 0.051049
| 14,829
| 191
| 141
| 77.638743
| 0.934551
| 0.021984
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
31bee91a0e4e2375d2a15fc99bde32953b8bdd3e
| 254
|
py
|
Python
|
chapter 3/try_it_yourself.py
|
LC231/csws-week1
|
583d837063cf5b39e0798b4f628005a9c4bbef98
|
[
"Apache-2.0"
] | null | null | null |
chapter 3/try_it_yourself.py
|
LC231/csws-week1
|
583d837063cf5b39e0798b4f628005a9c4bbef98
|
[
"Apache-2.0"
] | null | null | null |
chapter 3/try_it_yourself.py
|
LC231/csws-week1
|
583d837063cf5b39e0798b4f628005a9c4bbef98
|
[
"Apache-2.0"
] | null | null | null |
names = ['H','E','L','L','O']
message = f"hello {names[0]}"
print(message)
message = f"hello {names[1]}"
print(message)
message = f"hello {names[2]}"
print(message)
message = f"hello {names[3]}"
print(message)
message = f"hello {names[4]}"
print(message)
| 23.090909
| 29
| 0.649606
| 41
| 254
| 4.02439
| 0.341463
| 0.242424
| 0.393939
| 0.545455
| 0.727273
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0.022026
| 0.106299
| 254
| 11
| 30
| 23.090909
| 0.704846
| 0
| 0
| 0.454545
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.454545
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
9ed2b01adec136d9b5bc3c998660bb1951e05b9e
| 167
|
py
|
Python
|
tiktok_dl/__init__.py
|
skyme5/tiktok-dl
|
39eafedf935216b3c51b2907e74056891803c107
|
[
"MIT"
] | null | null | null |
tiktok_dl/__init__.py
|
skyme5/tiktok-dl
|
39eafedf935216b3c51b2907e74056891803c107
|
[
"MIT"
] | 1
|
2020-06-21T13:42:02.000Z
|
2020-06-21T17:34:35.000Z
|
tiktok_dl/__init__.py
|
skyme5/tiktok-dl
|
39eafedf935216b3c51b2907e74056891803c107
|
[
"MIT"
] | null | null | null |
"""Top-level package for tiktok-dl."""
# For relative imports to work in Python 3.6
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
| 23.857143
| 60
| 0.748503
| 29
| 167
| 4.172414
| 0.758621
| 0.099174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.113772
| 167
| 6
| 61
| 27.833333
| 0.804054
| 0.45509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9ee079966f3a6db00d226ad66b11d305e5fad856
| 2,467
|
py
|
Python
|
ulfs/initializers.py
|
asappresearch/texrel
|
dff447a99d56f2f92284df866fa01e7762dc6eac
|
[
"MIT"
] | 2
|
2021-07-09T16:32:00.000Z
|
2022-03-21T17:32:39.000Z
|
ulfs/initializers.py
|
asappresearch/texrel
|
dff447a99d56f2f92284df866fa01e7762dc6eac
|
[
"MIT"
] | null | null | null |
ulfs/initializers.py
|
asappresearch/texrel
|
dff447a99d56f2f92284df866fa01e7762dc6eac
|
[
"MIT"
] | 1
|
2021-07-09T16:32:02.000Z
|
2021-07-09T16:32:02.000Z
|
import math
from ulfs import gru_utils
FACTOR_RELU = 1.43
def susillo_init_linear(linear, factor=1):
input_size = linear.weight.size(1)
linear.bias.data[:] = 0
rng = math.sqrt(3) / math.sqrt(input_size) * factor
linear.weight.data.uniform_(-rng, rng)
def susillo_init_embedding(embedding, factor=1):
input_size = embedding.weight.size(0)
rng = math.sqrt(3) / math.sqrt(input_size) * factor
embedding.weight.data.uniform_(-rng, rng)
def susillo_initialize_gru_reset_weight(gru_cell, factor, num_inputs):
rng = math.sqrt(3) / math.sqrt(num_inputs) * factor
gru_utils.get_gru_weight_ir(gru_cell).data.uniform_(-rng, rng)
gru_utils.get_gru_weight_hr(gru_cell).data.uniform_(-rng, rng)
def susillo_initialize_gru_update_weight(gru_cell, factor, num_inputs):
rng = math.sqrt(3) / math.sqrt(num_inputs) * factor
gru_utils.get_gru_weight_iz(gru_cell).data.uniform_(-rng, rng)
gru_utils.get_gru_weight_hz(gru_cell).data.uniform_(-rng, rng)
def susillo_initialize_gru_candidate_weight(gru_cell, factor, num_inputs):
rng = math.sqrt(3) / math.sqrt(num_inputs) * factor
gru_utils.get_gru_weight_in(gru_cell).data.uniform_(-rng, rng)
gru_utils.get_gru_weight_hn(gru_cell).data.uniform_(-rng, rng)
def constant_initialize_gru_reset_bias(gru_cell, value):
gru_utils.get_gru_bias_ir(gru_cell).data.fill_(value)
gru_utils.get_gru_bias_hr(gru_cell).data.fill_(value)
def constant_initialize_gru_update_bias(gru_cell, value):
gru_utils.get_gru_bias_iz(gru_cell).data.fill_(value)
gru_utils.get_gru_bias_hz(gru_cell).data.fill_(value)
def constant_initialize_gru_candidate_bias(gru_cell, value):
gru_utils.get_gru_bias_in(gru_cell).data.fill_(value)
gru_utils.get_gru_bias_hn(gru_cell).data.fill_(value)
def init_gru_cell(gru_cell):
hidden_size = gru_cell.bias_hh.data.size(0) // 3
input_size = gru_cell.weight_ih.data.size(1)
print('gru input_size', input_size, 'hidden_size', hidden_size)
susillo_initialize_gru_reset_weight(gru_cell, factor=1, num_inputs=input_size + hidden_size)
susillo_initialize_gru_update_weight(gru_cell, factor=1, num_inputs=input_size + hidden_size)
susillo_initialize_gru_candidate_weight(gru_cell, factor=1, num_inputs=(hidden_size * 3) // 2)
constant_initialize_gru_reset_bias(gru_cell, value=1)
constant_initialize_gru_update_bias(gru_cell, value=1)
constant_initialize_gru_candidate_bias(gru_cell, value=0)
| 36.820896
| 98
| 0.773004
| 403
| 2,467
| 4.307692
| 0.116625
| 0.112903
| 0.076037
| 0.096774
| 0.802419
| 0.802419
| 0.781106
| 0.737327
| 0.568548
| 0.459101
| 0
| 0.011009
| 0.116336
| 2,467
| 66
| 99
| 37.378788
| 0.785321
| 0
| 0
| 0.116279
| 0
| 0
| 0.010134
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.209302
| false
| 0
| 0.046512
| 0
| 0.255814
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9eee83890bde733f35527857260dd3b3cacfee45
| 50
|
py
|
Python
|
dvtag/__init__.py
|
NobeKanai/dvtag
|
ed7b9e53de5b5d0635370723c83f70e1dc51e19a
|
[
"MIT"
] | 6
|
2022-01-08T06:37:39.000Z
|
2022-02-14T02:03:30.000Z
|
dvtag/__init__.py
|
NobeKanai/dvtag
|
ed7b9e53de5b5d0635370723c83f70e1dc51e19a
|
[
"MIT"
] | 1
|
2022-01-04T14:59:48.000Z
|
2022-01-23T10:57:18.000Z
|
dvtag/__init__.py
|
NobeKanai/dvtag
|
ed7b9e53de5b5d0635370723c83f70e1dc51e19a
|
[
"MIT"
] | null | null | null |
from .dvtag import tag
from .utils import get_rjid
| 25
| 27
| 0.82
| 9
| 50
| 4.444444
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14
| 50
| 2
| 27
| 25
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
730ae1c8d43ae68412dd2143706c52088120e824
| 104
|
py
|
Python
|
omnilearn/models/unsup/__init__.py
|
fleeb24/foundation
|
18c4179cfe2988267827e532f8d8cd0726ef8709
|
[
"MIT"
] | 1
|
2020-10-08T21:33:58.000Z
|
2020-10-08T21:33:58.000Z
|
omnilearn/models/unsup/__init__.py
|
felixludos/foundation
|
62ac096e6c53e12f2e29480506687c652c399d50
|
[
"MIT"
] | null | null | null |
omnilearn/models/unsup/__init__.py
|
felixludos/foundation
|
62ac096e6c53e12f2e29480506687c652c399d50
|
[
"MIT"
] | null | null | null |
from .autoencoders import Autoencoder, Variational_Autoencoder, Wasserstein_Autoencoder, Generative_AE
| 34.666667
| 102
| 0.884615
| 10
| 104
| 8.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 104
| 2
| 103
| 52
| 0.927083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
731e65a82ad4e52976360d0b2c8e4949cdf5a11b
| 6,675
|
py
|
Python
|
cogs/moderation.py
|
Toricane/Encourage-Bot
|
65be63ee405b0e228738617c3ae12c395c3c6a08
|
[
"MIT"
] | 4
|
2021-03-26T04:20:05.000Z
|
2022-03-30T16:42:29.000Z
|
cogs/moderation.py
|
Toricane/Perseverance-Bot
|
65be63ee405b0e228738617c3ae12c395c3c6a08
|
[
"MIT"
] | null | null | null |
cogs/moderation.py
|
Toricane/Perseverance-Bot
|
65be63ee405b0e228738617c3ae12c395c3c6a08
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_option
from log import log
l = log()
class Moderation(commands.Cog, description="Moderation tools for your server!"):
def __init__(self, bot):
self.bot = bot
# addrole:
@commands.command(aliases=["ar"], help="Add a role to a member.\nRequires Manage Roles permission.")
@commands.has_permissions(manage_roles=True)
async def addrole(self, ctx, member: discord.Member, role: discord.Role):
l.used(ctx)
await member.add_roles(role)
await ctx.send(f"{member.mention} got the {role} role.")
@cog_ext.cog_slash(name="addrole", description="Adds a role")
@commands.has_permissions(manage_roles=True)
async def _addrole(self, ctx: SlashContext, member: discord.Member, role: discord.Role):
l.used(ctx)
await member.add_roles(role)
await ctx.send(f"{member.mention} got the {role} role.")
# removerole:
@commands.command(aliases=["rr"], help="Remove a role from someone.\nRequires Manage Roles permission.")
@commands.has_permissions(manage_roles=True)
async def removerole(self, ctx, member: discord.Member, role: discord.Role):
l.used(ctx)
await member.remove_roles(role)
await ctx.send(f"{member.mention} lost the {role} role.")
@cog_ext.cog_slash(name="removerole", description="Removes a role")
@commands.has_permissions(manage_roles=True)
async def _removerole(self, ctx: SlashContext, member: discord.Member, role: discord.Role):
l.used(ctx)
await member.remove_roles(role)
await ctx.send(f"{member.mention} lost the {role} role.")
# kick:
@commands.command(help="Kick a member.\nRequires Kick Members permission.")
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason=None):
l.used(ctx)
await member.kick(reason=f"{ctx.author.name}#{ctx.author.discriminator}: {reason}")
await ctx.send(f"{ctx.author.name}#{ctx.author.discriminator} kicked {member.mention} because {reason}.")
@cog_ext.cog_slash(name="kick", description="Kicks a member")
@commands.has_permissions(kick_members=True)
async def _kick(self, ctx: SlashContext, member: discord.Member, *, reason=None):
l.used(ctx)
await member.kick(reason=f"{ctx.author.name}#{ctx.author.discriminator}: {reason}")
await ctx.send(f"{ctx.author.name}#{ctx.author.discriminator} kicked {member.mention} because {reason}.")
# ban:
@commands.command(help="Ban someone.\nRequires Ban Users permission.")
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason=None):
l.used(ctx)
await member.ban(reason=f"{ctx.author.name}#{ctx.author.discriminator}: {reason}")
await ctx.send(f"{ctx.author.name}#{ctx.author.discriminator} banned {member.mention} because {reason}.")
@cog_ext.cog_slash(name="ban", description="Bans a member")
@commands.has_permissions(ban_members=True)
async def _ban(self, ctx: SlashContext, member: discord.Member, reason=None):
l.used(ctx)
await member.ban(reason=f"{ctx.author.name}#{ctx.author.discriminator}: {reason}")
await ctx.send(f"{ctx.author.name}#{ctx.author.discriminator} banned {member.mention} because {reason}.")
#unban:
@commands.command(help="Unban someone.\nRequires Ban Members permission.")
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member):
l.used(ctx)
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name,
member_discriminator):
await ctx.guild.unban(user)
person = f"{user.name}#{user.discriminator}"
await ctx.send(f"Unbanned {person}.")
return
@cog_ext.cog_slash(
name="unban",
description="Unbans a member",
options=[
create_option(name="member",
description="Add the member name here",
option_type=3,
required=True)
],
)
@commands.has_permissions(ban_members=True)
async def _unban(self, ctx, member):
l.used(ctx)
await ctx.defer()
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name,
member_discriminator):
await ctx.guild.unban(user)
person = f"{user.name}#{user.discriminator}"
await ctx.send(f"Unbanned {person}.")
return
# nick:
@commands.command(aliases=["nickname"], help="Change someone's nickname.\nRequires Manage Nicknames permission.")
@commands.has_permissions(manage_nicknames=True)
async def nick(ctx, member: discord.Member, *, nick):
l.used(ctx)
try:
await member.edit(nick=nick)
await ctx.send(f'Nickname was changed for {member.mention}.')
except Exception:
await ctx.send("I am missing `Manage Nicknames` permission.")
l.error(ctx, "I am missing `Manage Nicknames` permission.")
@cog_ext.cog_slash(
name="nick",
description="Sends a reciprocal of a fraction",
options=[
create_option(name="member",
description="Type member here",
option_type=6,
required=True),
create_option(name="nick",
description="Type new nick here",
option_type=3,
required=True)
],
)
@commands.has_permissions(manage_nicknames=True)
async def _nick(ctx, member: discord.Member, nick):
l.used(ctx)
try:
await member.edit(nick=nick)
await ctx.send(f'Nickname was changed for {member.mention}.')
except Exception:
await ctx.send("I am missing `Manage Nicknames` permission.")
l.error(ctx, "I am missing `Manage Nicknames` permission.")
def setup(bot):
bot.add_cog(Moderation(bot))
l.log("Loaded moderation.py")
| 43.627451
| 117
| 0.628165
| 807
| 6,675
| 5.099133
| 0.138786
| 0.036938
| 0.040826
| 0.03791
| 0.780073
| 0.75966
| 0.740219
| 0.740219
| 0.73147
| 0.704739
| 0
| 0.000598
| 0.24839
| 6,675
| 153
| 118
| 43.627451
| 0.819613
| 0.006442
| 0
| 0.641221
| 0
| 0.030534
| 0.251283
| 0.063387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015267
| false
| 0
| 0.038168
| 0
| 0.076336
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
732e20e69ed827322213de124a1f43421b60e7d2
| 188
|
py
|
Python
|
PeripheryFunctions/BF_iszscored.py
|
ClarkLabUVA/hctsa-py
|
4382a7e852d21cdfefdac1a4a09ea6e11abd9be1
|
[
"MIT"
] | 6
|
2020-08-14T00:16:19.000Z
|
2022-01-20T05:49:12.000Z
|
PeripheryFunctions/BF_iszscored.py
|
fairscape/hctsa-py
|
4382a7e852d21cdfefdac1a4a09ea6e11abd9be1
|
[
"MIT"
] | null | null | null |
PeripheryFunctions/BF_iszscored.py
|
fairscape/hctsa-py
|
4382a7e852d21cdfefdac1a4a09ea6e11abd9be1
|
[
"MIT"
] | 4
|
2020-08-14T00:22:45.000Z
|
2021-02-18T05:31:14.000Z
|
def BF_iszscored(x):
numericThreshold = 2.2204E-16
iszscored = ((np.absolute(np.mean(x)) < numericThreshold) & (np.absolute(np.std(x)-1) < numericThreshold))
return(iszscored)
| 37.6
| 110
| 0.691489
| 24
| 188
| 5.375
| 0.583333
| 0.263566
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049689
| 0.143617
| 188
| 4
| 111
| 47
| 0.751553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b405d50e9523f05191a573068290a1dd685bfcd5
| 9,448
|
py
|
Python
|
hangman.py
|
carlosqzh/Hangman
|
a63f4fe1726702367f32e05955b15f7b690cee7b
|
[
"MIT"
] | 1
|
2021-11-08T02:41:20.000Z
|
2021-11-08T02:41:20.000Z
|
hangman.py
|
carlosqzh/Hangman
|
a63f4fe1726702367f32e05955b15f7b690cee7b
|
[
"MIT"
] | null | null | null |
hangman.py
|
carlosqzh/Hangman
|
a63f4fe1726702367f32e05955b15f7b690cee7b
|
[
"MIT"
] | null | null | null |
import random
import os
from typing import Type
menu = """
===================================================================================================================================
===================================================================================================================================
(/) (/)
(/) (/)
(/) (/)
(/) 7MMF' `7MMF' (/)
(/) MM MM (/)
(/) MM MM ,6"Yb. `7MMpMMMb. .P"Ybmmm `7MMpMMMb.pMMMb. ,6"Yb. `7MMpMMMb. (/)
(/)) MMmmmmmmMM 8) MM MM MM :MI I8 MM MM MM 8) MM MM MM (/)
(/)(/) MM MM ,pm9MM MM MM WmmmP" MM MM MM ,pm9MM MM MM (/)(/)
(/)'`(/) MM MM 8M MM MM MM 8M MM MM MM 8M MM MM MM (/)'`(/)
(/) (/) JMML. .JMML.`Moo9^Yo..JMML JMML. YMMMMMb .JMML JMML JMML.`Moo9^Yo..JMML JMML. (/) (/)
(/) (/) 6' dP (/) (/)
(/) (/) Ybmmmd' (/) (/)
(/) (/) (/) (/)
(/) (/) ╔═════════════════════════════════════╗ (/) (/)
(/)(/) Desarrollado por Carlos Valencia 🦊 (/)(/)
`""` ╚═════════════════════════════════════╝ `""`
===================================================================================================================================
===================================================================================================================================
Bienvenido al juego del ahorcado!!!
En este juego tendrás que adivinar una palabra que la computadora elegió al azar.
Al iniciar el juego contars con 7 vidas ❤️❤️❤️❤️❤️❤️❤️
Por cada intento fallido perderás una vida, elige con cuidado 👀
Buena suerte 🍀
"""
IMAGES = [ """
____
|/ |
| (_)
| /|\\
| |
| | |
|
|_____
""",
"""
____
|/ |
| (_)
| \|/
| |
| / \
|
|_____
""",
"""
____
|/ |
| (_)
| \|/
| |
| /
|
|_____
""",
"""
____
|/ |
| (_)
| \|/
| |
|
|
|_____
""",
"""
____
|/ |
| (_)
| \|
| |
|
|
|_____
""",
"""
____
|/ |
| (_)
| |
| |
|
|
|_____
""",
"""
____
|/ |
| (_)
|
|
|
|
|_____
""",
"""
____
|/ |
|
|
|
|
|
|_____
""",
]
win = """
.--------.
.: : : :___`. sSSSSs .S_SSSs .S_sSSs .S_SSSs sSSs sdSS_SSSSSSbs sSSs
.'!!::::: \\_\ `. d%%%%SP .SS~SSSSS .SS~YS%%b .SS~SSSSS d%%SP YSSS~S%SSSSSP d%%SP
/%O!!::::::::\\_\. \ d%S' S%S SSSS S%S `S%b S%S SSSS d%S' S%S d%S'
/%%O!!::::::::: : . \ S%S S%S S%S S%S S%S S%S S%S S%| S%S S%S
|%%OO!!::::::::::: : . | S&S S%S SSSS%S S%S S&S S%S SSSS%S S&S S&S S&S
|%%OO!!::::::::::::: :| S&S S&S SSS%S S&S S&S S&S SSS%S Y&Ss S&S S&S_Ss
|%%OO!!!::::::::::::: :| S&S S&S S&S S&S S&S S&S S&S `S&&S S&S S&S~SP
\%%OO!!!:::::::::::: :| S&S sSSs S&S S&S S&S S&S S&S S&S `S*S S&S S&S
\%%OO!!!::::::::::::/ S*b `S%% S*S S&S S*S S*S S*S S&S l*S S*S S*b
\%OO!!!!::::::::::/ S*S S% S*S S*S S*S S*S S*S S*S .S*P S*S S*S.
;%%OO!!!!!!:::::' SS_sSSS S*S S*S S*S S*S S*S S*S sSS*S S*S SSSbs
`%%%OO!!!!!!:' Y~YSSY SSS S*S S*S SSS SSS S*S YSS' S*S YSSP
`%%%OO!%%' SP SP SP SP
`%%%%' Y Y Y Y
/__\`-. =================================================================================
/ =================================================================================
(
\
"""
lose = """
...
;::::;
;::::; :;
;:::::' :; .S_sSSs sSSs .S_sSSs .S_sSSs .S sSSs sdSS_SSSSSSbs sSSs
;:::::; ;. .SS~YS%%b d%%SP .SS~YS%%b .SS~YS%%b .SS d%%SP YSSS~S%SSSSSP d%%SP
,:::::' ; OOO S%S `S%b d%S' S%S `S%b S%S `S%b S%S d%S' S%S d%S'
::::::; ; OOOOO S%S S%S S%S S%S S%S S%S S%S S%S S%| S%S S%S
;:::::; ; OOOOOOOO S%S d*S S&S S%S d*S S%S S&S S&S S&S S&S S&S
,;::::::; ;' / OOOOOOO S&S .S*S S&S_Ss S&S .S*S S&S S&S S&S Y&Ss S&S S&S_Ss
;:::::::::`. ,,,;. / / DOOOOOO S&S_sdSSS S&S~SP S&S_sdSSS S&S S&S S&S `S&&S S&S S&S~SP
.';:::::::::::::::::;, / / DOOOO S&S~YSSY S&S S&S~YSY%b S&S S&S S&S `S*S S&S S&S
,::::::;::::::;;;;::::;, / / DOOO S*S S*b S*S `S%b S*S d*S S*S l*S S*S S*b
;`::::::`'::::::;;;::::: ,#/ / DOOO S*S S*S. S*S S%S S*S .S*S S*S .S*P S*S S*S.
:`:::::::`;::::::;;::: ;::# / DOOO S*S SSSbs S*S S&S S*S_sdSSS S*S sSS*S S*S SSSbs
::`:::::::`;:::::::: ;::::# / DOO S*S YSSP S*S SSS SSS~YSSY S*S YSS' S*S YSSP
`:`:::::::`;:::::: ;::::::#/ DOO SP SP SP SP
:::`:::::::`;; ;:::::::::## OO Y Y Y Y
::::`:::::::`;::::::::;:::# OO =================================================================================
`:::::`::::::::::::;'`:;::# O =================================================================================
`:::::`::::::::;' / / `:#
::::::`:::::;' / / `#
"""
def word_transformation():
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u")
)
with open("./archivos/data.txt", "r", encoding="utf-8") as f:
words = [i.replace("\n", "") for i in f]
word_selected = random.choice(words)
for a, b in replacements:
word_selected = word_selected.replace(a, b)
return word_selected
def run():
attemps = 7
word_selected = word_transformation()
spaces = ["_"] * len(word_selected)
while True:
os.system("clear")
print(menu)
for character in spaces:
print(character, end=" ")
print(IMAGES[attemps])
print("Te quedan", attemps, "vidas ❤️")
try:
letter = input("Ingresa una letra y presiona Enter: ").lower()
assert letter.isalpha(), input("¡Solo se puede ingresar letras! 👀, Presiona la tecla Enter para volver a ingresar un valor.")
assert len(letter) == 1, input("¡Solo se puede ingresar una letra a la vez! 👀, Presiona la tecla Enter para volver a ingresar un valor.")
except AssertionError as ae:
print(ae)
continue
found = False
for idx, character in enumerate(word_selected):
if character == letter:
spaces[idx] = letter
found = True
if not found:
attemps -= 1
if "_" not in spaces:
os.system("clear")
print(win)
print("Felicidades!!! encontraste la palabra 🦊", word_selected, "🦊")
break
input()
if attemps == 0:
os.system("clear")
print(lose)
print("Oh oh!!! la palabra que debías adivinar era 😲", word_selected, "😲")
break
input()
if __name__ == "__main__":
run()
| 41.991111
| 149
| 0.246401
| 853
| 9,448
| 2.709261
| 0.225088
| 0.236261
| 0.280398
| 0.30463
| 0.332756
| 0.315015
| 0.229771
| 0.198615
| 0.184769
| 0.173085
| 0
| 0.00503
| 0.495025
| 9,448
| 225
| 150
| 41.991111
| 0.457346
| 0
| 0
| 0.117241
| 0
| 0.172414
| 0.818634
| 0.123121
| 0
| 0
| 0
| 0
| 0.02069
| 1
| 0.013793
| false
| 0
| 0.02069
| 0
| 0.041379
| 0.062069
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b4101f7ba5a6182334ca1fc306abe72584c0b05b
| 172
|
py
|
Python
|
data/__init__.py
|
howiemac/evoke4
|
5d7af36c9fb23d94766d54c9c63436343959d3a8
|
[
"BSD-3-Clause"
] | null | null | null |
data/__init__.py
|
howiemac/evoke4
|
5d7af36c9fb23d94766d54c9c63436343959d3a8
|
[
"BSD-3-Clause"
] | null | null | null |
data/__init__.py
|
howiemac/evoke4
|
5d7af36c9fb23d94766d54c9c63436343959d3a8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
evoke base database interface
"""
from data import makeDataClass, RecordNotFoundError
from DB import execute, init_db
from schema import *
from patch import pre_schema
| 21.5
| 51
| 0.80814
| 23
| 172
| 5.956522
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 172
| 8
| 52
| 21.5
| 0.925676
| 0.168605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b434221422fb56d463557dd07bc803dfa6707e23
| 7,304
|
py
|
Python
|
sdk/tests/utilities/test_token_refresh.py
|
inwaves/lusid-sdk-python
|
9ad2cfc71c998928bf23f54ce0821a8ea2db556b
|
[
"MIT"
] | null | null | null |
sdk/tests/utilities/test_token_refresh.py
|
inwaves/lusid-sdk-python
|
9ad2cfc71c998928bf23f54ce0821a8ea2db556b
|
[
"MIT"
] | null | null | null |
sdk/tests/utilities/test_token_refresh.py
|
inwaves/lusid-sdk-python
|
9ad2cfc71c998928bf23f54ce0821a8ea2db556b
|
[
"MIT"
] | 1
|
2020-10-29T08:35:32.000Z
|
2020-10-29T08:35:32.000Z
|
import os
import unittest
from time import sleep
from lusid.utilities import ApiConfigurationLoader
from lusid.utilities.proxy_config import ProxyConfig
from lusid.utilities import RefreshingToken
from utilities import CredentialsSource
from unittest.mock import patch
from utilities import TokenUtilities as tu
from utilities.temp_file_manager import TempFileManager
source_config_details, config_keys = CredentialsSource.fetch_credentials(), CredentialsSource.fetch_config_keys()
class TokenRefresh(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = ApiConfigurationLoader.load(CredentialsSource.secrets_path())
def test_get_token(self):
original_token, refresh_token = tu.get_okta_tokens(CredentialsSource.secrets_path())
refreshed_token = RefreshingToken(token_url=self.config.token_url,
client_id=self.config.client_id,
client_secret=self.config.client_secret,
initial_access_token=original_token,
initial_token_expiry=3600,
refresh_token=refresh_token)
self.assertIsNotNone(refreshed_token)
self.assertEqual(original_token, refreshed_token)
def test_get_token_with_proxy(self):
secrets = {
"api": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key
},
"proxy": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" in key
}
}
secrets["api"].pop("clientCertificate", None)
if secrets["proxy"].get("address", None) is None:
self.skipTest(f"missing proxy configuration")
secrets_file = TempFileManager.create_temp_file(secrets)
original_token, refresh_token = tu.get_okta_tokens(secrets_file.name)
proxy_config = ProxyConfig(
address=secrets["proxy"]["address"],
username=secrets["proxy"]["username"],
password=secrets["proxy"]["password"]
)
proxies = proxy_config.format_proxy_schema()
with patch.dict('os.environ', {"HTTPS_PROXY": proxies["https"]}, clear=True):
proxy_url = os.getenv("HTTPS_PROXY", None)
if proxy_url is not None:
refreshed_token = RefreshingToken(token_url=self.config.token_url,
client_id=self.config.client_id,
client_secret=self.config.client_secret,
initial_access_token=original_token,
initial_token_expiry=1, # 1s expiry
refresh_token=refresh_token,
expiry_offset=3599, # set to 1s expiry
proxies={})
self.assertIsNotNone(refreshed_token)
def test_get_token_with_proxy_from_config(self):
secrets = {
"api": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key
},
"proxy": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" in key
}
}
secrets["api"].pop("clientCertificate", None)
if secrets["proxy"].get("address", None) is None:
self.skipTest(f"missing proxy configuration")
secrets_file = TempFileManager.create_temp_file(secrets)
original_token, refresh_token = tu.get_okta_tokens(secrets_file.name)
proxy_config = ProxyConfig(
address=secrets["proxy"]["address"],
username=secrets["proxy"]["username"],
password=secrets["proxy"]["password"]
)
proxies = proxy_config.format_proxy_schema()
refreshed_token = RefreshingToken(token_url=self.config.token_url,
client_id=self.config.client_id,
client_secret=self.config.client_secret,
initial_access_token=original_token,
initial_token_expiry=1, # 1s expiry
refresh_token=refresh_token,
expiry_offset=3599, # set to 1s expiry
proxies=proxies)
self.assertIsNotNone(refreshed_token)
def test_refreshed_token_when_expired(self):
original_token, refresh_token = tu.get_okta_tokens(CredentialsSource.secrets_path())
refreshed_token = RefreshingToken(token_url=self.config.token_url,
client_id=self.config.client_id,
client_secret=self.config.client_secret,
initial_access_token=original_token,
initial_token_expiry=1, # 1s expiry
refresh_token=refresh_token,
expiry_offset=3599) # set to 1s expiry
self.assertIsNotNone(refreshed_token)
# force de-referencing the token value
first_value = f"{refreshed_token}"
sleep(1)
self.assertNotEqual(first_value, refreshed_token)
def test_token_when_not_expired_does_not_refresh(self):
original_token, refresh_token = tu.get_okta_tokens(CredentialsSource.secrets_path())
refreshed_token = RefreshingToken(token_url=self.config.token_url,
client_id=self.config.client_id,
client_secret=self.config.client_secret,
initial_access_token=original_token,
initial_token_expiry=3600,
refresh_token=refresh_token)
self.assertIsNotNone(refreshed_token)
# force de-referencing the token value
first_value = f"{refreshed_token}"
sleep(1)
self.assertEqual(first_value, refreshed_token)
def test_can_make_header(self):
original_token, refresh_token = tu.get_okta_tokens(CredentialsSource.secrets_path())
refreshed_token = RefreshingToken(token_url=self.config.token_url,
client_id=self.config.client_id,
client_secret=self.config.client_secret,
initial_access_token=original_token,
initial_token_expiry=3600,
refresh_token=refresh_token)
header = "Bearer " + refreshed_token
self.assertIsNotNone(header)
| 41.5
| 113
| 0.56517
| 703
| 7,304
| 5.588905
| 0.153627
| 0.054976
| 0.051922
| 0.038178
| 0.786205
| 0.786205
| 0.770425
| 0.753627
| 0.734284
| 0.734284
| 0
| 0.007527
| 0.363363
| 7,304
| 175
| 114
| 41.737143
| 0.837419
| 0.021084
| 0
| 0.68
| 0
| 0
| 0.046492
| 0
| 0
| 0
| 0
| 0
| 0.072
| 1
| 0.056
| false
| 0.016
| 0.08
| 0
| 0.144
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b46cda0673bc078c4e86669cdc59f7d04d720ca0
| 100
|
py
|
Python
|
project_lexicon/annotation/admin.py
|
unfoldingWord-box3/lexicon-poc
|
916273c4e6f9873cb8f32a1b7314713deccf5ee5
|
[
"MIT"
] | null | null | null |
project_lexicon/annotation/admin.py
|
unfoldingWord-box3/lexicon-poc
|
916273c4e6f9873cb8f32a1b7314713deccf5ee5
|
[
"MIT"
] | null | null | null |
project_lexicon/annotation/admin.py
|
unfoldingWord-box3/lexicon-poc
|
916273c4e6f9873cb8f32a1b7314713deccf5ee5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Annotation, AnnotationLabel, AnnotationScheme
| 25
| 65
| 0.85
| 11
| 100
| 7.727273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11
| 100
| 3
| 66
| 33.333333
| 0.955056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c343b5fbf9f539f83ba4bed573b71d7834043f62
| 13,972
|
py
|
Python
|
src/axislogo.py
|
asimba/axis
|
e7c5b7ee38b711c626fc43a1930277ffdf85a3f6
|
[
"BSD-2-Clause"
] | null | null | null |
src/axislogo.py
|
asimba/axis
|
e7c5b7ee38b711c626fc43a1930277ffdf85a3f6
|
[
"BSD-2-Clause"
] | null | null | null |
src/axislogo.py
|
asimba/axis
|
e7c5b7ee38b711c626fc43a1930277ffdf85a3f6
|
[
"BSD-2-Clause"
] | null | null | null |
#----------------------------------------------------------------------
# This file was generated by /usr/share/pyshared/wx-2.8-gtk2-unicode/wx/tools/img2py.py
#
from wx.lib.embeddedimage import PyEmbeddedImage
axislogo = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAABHNCSVQICAgIfAhkiAAAAAlw"
"SFlzAAAB/gAAAf4B6HfDfQAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA"
"ACAASURBVHic7X13eBzV1f57p27vqy5Lsoqb5N4AGxeKC8bBBhwIJUAIhl/oJDiQEMJHMBhC"
"CJDQiwMBE4qNjSvNvWLcZFsukiWrS7baasv0+f6Y3ZWwsVWwsfL7eJ9nn1mt5tw597y3zNx7"
"zhmi6zp+wrkDda4V+L+Onwg4x/iJgHMM5lwr0FUQQqiC82eMyupdMNjq9AwCgFBL4+7SI4W7"
"Cjct2qrrunaudewKyH/TJHzJ1ffNzMgeNs+T0CuHoilQhAAEgA6oqoqGuvLioyXfzvnio+cW"
"nmtdO4v/CgLy8yd7+p438emMvGE3MSxPMzQFiiKgKBKzPzRNh6bpEISIevTQt/MPbP76wb17"
"Vzaea907Qo8nIP38880Tz7t1XVJ63nAAYBkKDEOBpggIRUBAoEOHrulQNR2KokFWNNRWHNr+"
"9eY3LqzYtClyrutwOvT4SXh0/own2hufZWmwDA2WpcExNFiWih7bfmcZCknpecNH58944lzr"
"3xF6NAGXXHXvpPScwb8BAJomBgEMBZalTiAhZnwqfg5NE6TnDP7NJVfdO+lc1+N06LEEeLKz"
"nVl9Rz/HcmaOEIBlaDAMHTUwDY6loKuCrKmCzLEUuBgpDBU9jwbHm7msvqOf82RnO891fU6F"
"HnsbOmTwtOluX1o/AGBoY9yPGZ+mCfbtXPtaWdGWFwCgd7/RdxcMn3AbIboxKeuAputQVQpu"
"X1q/IYOnTQfw7rmsz6nQYwlITMmbAACEAAxDxUlgGAqHi7Z++OrT99yuR+8gCCG33/fn11x9"
"Cs6bBUSNr+lQGQqKqsXK6pEE9MghiBBCWx3uiQBA04bxY2N7oLmuat3i92fr7W7fdF3XV695"
"f3ZrY11V+3mAoSnQNAWrwz2REEKfuxqdGj2SgHFTb53o8aVnAABDkzgJDEOhoa58yc6dq5tP"
"lNm5enXz8fryJcZc0WZ8hibw+NIzxk29deKPX5OO0SMJSM0eOh6EgCIENGUYkWYoEGj6/sIt"
"H55Kbl/hpg+JrukMTYFmonIUBYqijDJ7IHokAQzL+QGAirb+WEuuqyouWvHR62tOJffZgtfX"
"1FYWFzEx40dlKZrEy+xp6JEE6NB5AKApArqdIRVFKu5IVlXE4tjwY3wIaIrEy+xp6JEEUIRi"
"AYCiDOPRFAFDU5CE0JGOZCUhfISJzhkxWYoi8TJ7GnooATRLCOLGi7XiYEtTWUeywUBDGR0z"
"Pk3iJFI0/RMBnQYBF1vtpCgKNGUMJ/V1lWUdidbVVJXFh63oBGz0APITAZ0FIYSlondBbUQQ"
"SJFAuCPZiNAaptstV0eND5qmuB9D966iRxKgqkorIcZyM0UQN6SmK3JHsrqsyPR3jA8QikBT"
"5dYfQ/euokcSEIkEiggBCCHRD0ARAkmG1JGsDEWKtfr2ZQiR1qIfQ/euokcS0Hysuqi94Y2h"
"CFDFlg57gCqG5dj5MVlCgOb6mp8I6CyKCzfvVGRBJzD2fGOtOCKoHRIgCILcvuWDAIok6vuL"
"Nuz8MXTvKnokASVF6w81N9SUG4Zs+93rSTB1JOvxpJhIOyFCgKbj1eUlu9YfOivK/kD0SAIA"
"QBIjB+J/RInIyu2b2JFc75w+iUCUuCgPohA+cBqRc4oeS0A4Emgzmm5ssni83g4JcCf4EwHj"
"fOjfU1YPQ48loLWxoUiPGj4G3mrrkACbxZqoo01I14Fg47EeOQEDPZiAytI9n4Vbm0IAoOs6"
"dF2HibN0SADPWxJ1rU0mFGwOVZbv+uzsa9w99FgCdmxYXH28oXa5YUhA0wGaYzskgGLZRE3X"
"oUV7T0Nd1fINny+u/jF07g56LAEAUFW2f6Gm6dB0w/GKApPckQxFM8maZpyv6TrKjuzv0W6K"
"PZqARfPnfnysrrJMi3q9Mbypz+n2dgkhNMua+6jR8+uqy8v+8/rjH/+YOncVPZoAXdeVhvrq"
"RTG/T58/2fezK28efqrzr7zm5uE+f7JPUzVomo6G+upFuq4rP6bOXUWPJgAADuzZ8IEgCpqq"
"aVB1gn6DRgw61bn9h54/SNUBxXDS1fbu3PjBj6lrd9DjCfjqs39tq6ko2ayqOlRVg8ObUHCq"
"c10uX4GialBVDVVHSzavXPTGth9T1+7grDtmuVP6Z3AmSx+Od9gYlndEIk0Hag9v2dKVMupr"
"yhf2zh1wgaJq4Hhr/qnO4zhzvqJoUFQNNVXlXZ58E3NGnm+xePMUWQxIYiAoCeGDTdX7j3a1"
"nK7grBGQmjW8r9nT6/60vDHXms02G83QoAiBqqrIH3vjbjESXCE0lb5VUbLzcEdlFe5a/X5e"
"wahHE/1+B8ebCgghzIljOyGEefn9dQWKqqG54Xhgx/Yv3gfu7VDP9OwhuSZ31i0Mb52Sln3e"
"II7lonsPOoRwMJgzbOaCSGP536pKt5+Vp+mzQkD24OmPeTOG/9bucFjMZhN4ngUTJUDTNHg8"
"zkGCIA1qdiVem9JnzA3VBzesP11521Yvq73vsfkLvB7vbKc7yTttxi0jAGxuf860q28Z4fIm"
"eWVFQ0VF6YJtq5fVdqRnSp8xYz0pQ9+1Od0ZFrMJJhMHlqFBURQ0XYeiOGyiz//r1ta067IH"
"T/9rya4lj/5A05yEM05Ar/xL7/el9H3E5bQTm80Cq8UEE29UjFAEmqZDkhQIggizmc8w8fzi"
"jAGXzj667/OPTlfuli2rnkrJyJnl9Xjd2f2HDcEJBOT2HTZEUXU0NzU2fbNu2VN46PrT6pkx"
"4NKrfckDXnW5PW673QKbxQSTiQfHMaAoAl3TISsqBFGCxcJbLGbrI5kDL20p2/P53364ldpw"
"Rifh1H7jr3Un5Mx1uxzE5bLD63bA53HC73PB73cjwe82vvuc8Hqd8Ljt8Cf43amZfd9M7FWQ"
"fbqyN69YUHb08L5XFEWDw+0de+L/Hd6EsbKiovjQ/ldWr1hQdrqyEnsVZNu9GW+6PR6322WD"
"x22H1+uE32fomuB3wx/V1edxwut2wO1xksycwXN75Y+/9odZ6bs4oz3A7kyZ43Q6eYfdCpfD"
"BpfTBofdAovFGIYoioKqqhAECaGQAIahQUCgapo9kNLnLnQwaH+zddWzKek5N/AW1zhCCKvr"
"ugwYm/hPvfb5uPq6msotG5c9C/zqtHra/Dl3OZwuu91ugdNhg8tlg9NuhdVqDEM0TUPTNIii"
"jHBYAMsyIIRA13Ten5A9B8CCM2WzM9YDEjOHnG91+gdZLSZYrWY47BY4nVa43Xb4vE4k+FxI"
"8Lng97ng9TjhdtngdFhht5ths5rhSeh1gz9z+GmXGrZ+ubDh8IGdL7i8ycnTr7/30tjvM6+/"
"91KnJzm5+ODuF7Z+ubDhdGX4M4cnm22+GywWE2xWM+x2M5wOK9wuG7zR3hrT1ed1wu22w+m0"
"wmG3wGo1w5+YOigxZ+T5Z8puZ4wAhy/rcrPJaEEWMw+LxQS7zWhhHrcDfp8LiX43/F4XPG47"
"HA4rbFYzrBYTzCYeDpfbY3N6xnd0nTWLl71YU3m0KD1zwITYb6m9B0yorigr+vLjJS92JG+2"
"OcdbrQ6PiedgNvGwRolwOKzGkOiN6ulzweN2wOmwwW4zerHFzMNsMcOfkHP5DzRXHGeMAIY1"
"57MsA5ZlwHEMYhW0WU1wOazweZxI8LvhcTvgsFthtZhhNvHgOQ4cZ8gxrMnd0XVKS1cLh/bt"
"fJY32+MEmEyOCYf37Xi2tHS10JE8RXNupp2ePBcjwgyH3QqP24EEvxs+jxMuhxU2q9FATHyb"
"nhbrqZ9FuoozRoCmq0HDjbDNk41haHAsA97EwW63wO93wWY15gOWNWK+2p/PMWynYrk+euvR"
"t8KtrdJ542b2O2/czH7BYKv0wRuPvtUZWQLG+X16siwNnmdhs5rg97tgt1vAmzhwLHOSntC1"
"4A+zVhvO2CSsq0rA2MEy9qNiGyKapkNTNciygnBYgKKo0DRjsSx2Tux8TevY6wHGufrPrp3z"
"ZEafYVNAAVXlh57UOxnwrGuqhFPpqWlQFBXhsABZVhBb1DtRT1WRA9231HdxxghQRKlOUVUo"
"igpZViDLCkRJRkSQEAxFQCgCNhiGKMoIhQQIogRRkuPnCkJYbq4oXt7Z6y1eMG/JyPGzHgMF"
"bPv6wyWdlQs2FC+X0vPmKorKtddTEKN3ZjSNiCBCVlQEQxFEhO/qqSgqIlKorntWOhlnjICW"
"htLX3Ympd4oOq1sUJYQjIkKhCDiWASGAKMqgaQqyoiAUEtAaDCMcFowKijIaj9dtranZu7+z"
"1/Nke5y2RE8g9r2xpLGlM3LHqw8VDTjvuo2i2zVBFI0GEg4LaOVZ0JShH8swUFUN4YiAQGsI"
"oVAE4YgIUZQQaGlqaqk/9Hp37XQizhgBDVWFFbnDZs53e3z3hcICeJ6L3z8rioogH4k/B0QE"
"Ca3BMAKtYQRDEYQiEbQ01nW4eOYe4nalDkn9E+fgx+X+vG8/s8tspgiB0JL0+LB7RxSJgcja"
"qp1V/9O0s+mkGLL2CLU2fBKJZEwIRQSYQlE9KQJVMYze/jkgGIq06RkWcKyuYn5VcWHFmbLb"
"GX0Qa6ytfM7m9E7nODaboWkQAIqqIiKI4DkWhBCo7R5wAq1hBFpDKDtU+M7+rZ8+d7qyMydn"
"XpY1PvsZZ4qrn5kzg2c4MHT0AcmumxVVGSoq0lCr1z45c3Lm78pWli07VVlle1f+M3fYz4by"
"/OBbWIYGia5RiaIMPtoTdF2HKMkIR0QEW8MIBEKor60qqa88fFo9u4oznqzDnz04LzV9yIKU"
"9KyhDrslfp8f6w2apkFqNzccPVK06psv50/Xdf2Ujrc5M3Ju8g1KfNXpdHI2kxUW3gITy4Oh"
"WVCEgqZrUFQZgiwiLIbR3NwiHd12ZHbxouL5p6w4IVz/0dcsTsvKm+ywG88kZhMHjjOe2HVd"
"hywriAgiQmEB1VVHvy09vOHaiv0dr952BWclW0pSUk6Cp/fQt5JSek+y2+0MH1uMI8Yyrywr"
"aGo8XldffXT+0fJ9T7dU7D1lWpnk87IyUy/0b/Ql+1OcFgfsJjusJivMnAkcw8UJkBQJEUlA"
"SAihVWhFbXV99aFVRy6o2Vxadqqynen5noTEnAf9Sb1u8noTEk0mYziiCDEIUFQEAwGlrq58"
"ZfX+wltqanYcO9O2OqvparzJAwZ403KvsDvcl7AsZwchzbqmtYRaW3Y2Hd31clXVweMdlZF/"
"26BPk/ok/cxtdcFldcFpccButsPCW8AzvLF0rGkQFaP1t0Za0RIOoDnUjNLCssXb/7n1io6u"
"4Ujt40tJz7/DanUNYRnaSSjKpSpyazjY+OXx0gOLa2uLCs+MRU5Gj84XlDQwqSDrqpw9fo8P"
"HpsHHps7SoITdpMNJs4EmqKhaioESUCrEERLuAXNoWY0BptwrPEYvp3/zcDaPbVnzYA/FD02"
"VwQA2LJtw8xmM0ysCWbODAtvgc1kg8PsgNPigNVkBUszkFUFISEEQiiomhofjqxWKxzZ7mEA"
"fiKgO+Ac5v4sw4ClWXAMB57hYeZMsJoscFoccFgc4FkeoiyCoWgomgJBjoBneHAMB5ZmYU+w"
"9T/X9TgdejQBtIlOpggNmjI+DM2AiZHBmuAw2+EwOxCIBCDKEjiGA0OzYGgmLsNyHXvTnUv0"
"aLcUKSwXw8gIB8BYj0FsXUbXoGoaJFWGqmnQde07/4/JieGOo+vPJXp0DxDqIvtVTYWqqVBU"
"BYqqQFJkiLKIsBQBE25BRIpAUiSEpQhEWYSkyPFzVU1FsDrU6eWNc4EeTUC4vHVjqDkkOCwO"
"kyhLEGQBYSkMXuBAURRkRQZDM1BUBWEpjKAQRFgKQ5AFiLKEQGNAaC5t2Hiu63E6/Oi3oRab"
"0+FP6uW0WJ02ljPZeJOJ5002juPNHMuZGZrhOEIzLM3wXPXRA6ZG0/6x/abl3uC1eeC2ueCy"
"uOCwOGAzWWFiTfHnAEEWEBRCCIQDaA43oynYjG8+3v2uuTVrfUp6H0HTZElTZVlVJEmRBUUS"
"I5IYCUqSGBFlSQyGQ4Hg8dqjLaFgyxlbau4MzhoBvbILEvoOumBYUmpWnsPpzTGZLbkcx+fa"
"HM5ePGdhGI4DzTAADB8cTdPReKzmeGtryzfB1uatlaUHt9ZWHs5paT22xXsx+Ud6btool9UJ"
"h9kJm9kKC2cBx3KgCQVV1yDJktELIiEEIi04eqh865HFLXc67Amjk9Kyi1Mz+o6yO5yjrHbX"
"CJ8/2deW+FWHqshQFRmSGFGCrc3lsiQdFiLhw4GWxuLaqiOHinZt+PZoSWH92bDTGSEgM2+4"
"c+gFl8/wJqQOtlgteTzH57k8vgyH080YCTOotqj1dhGM9TXltaFgYFugpWlb6aHCrZ/95x9r"
"Y54OP7vh979QFLXPsgXPPNrr0l7D04ZlLkjulZhjLEVYYGJNYBnOcPbSdciKMUSFhDBqK2qK"
"D28tvbZk2YHtk3/+wGMMwxxc+t689wHDg+KyWb8Zl5VbMMrhdI+02p0jE5PTk9rrFQsKUaOb"
"NIGWZqW56fhRURAPRSLhQ8frqnZtX794UenB7Z1aAj8duk2Ax5/Gj5hwzYy0zH7TvP7kKR5f"
"gqctbSTVljasXdoAQgiO1VVWtTQeX15yqHD5Oy8/vvT73MevvOmhmf6krDnLPnxxfMWRwggA"
"JA1KSkgelTYvdUDqjTazleJZHixtrLDqug5ZlRGJRLSK/ZXvlK4vnlO7u7YeANKy8s2Trrpr"
"TUPtkXmfvjvvpCVvQghzw+1/nJbdp2Cqy+Of6k9MSwXQbpcsmpFX1aAoWjwzb2NDXePxupoV"
"FWVFS7d8+f6ixmOVYnfs2CUCCCHUuMvvmJqamT/d7U2c6k9KS+VYI4dnPHlqO+Mz0YRJx+ur"
"qgPNDSuPHNy77LXn/7TkdD77s255aEr/IeP/vWPLF79c8t5fl574/8zLc2Z4UjzjrU7TAN5q"
"zqcIgRiS9kaCkX0tVY1rDn56cNGJMpddc/+0waMu+deB3Wuv/2T+UytOUz/m1nseuzwnL3+q"
"0+Of4k9MTdU0HaoaJUBtI0BWVMiyCknWUF9bWdVwvHZ5RXHhkjWfvbS8KxncO0UAIYRMvmbO"
"bamZBQ/4Envl8jwDjmXAsTQ4jgL3ncy1BgliJBiprS5bWFa8/9PPPnp7ydHSQx3mefjFr/8w"
"YfDoSR+VlRQtf2nu7Bs7oRcFAJ2p8G1zXn4no3ffqbu3fn71h289uboTZbP/77d/ubx3Xv4V"
"KWlZM01mu1VR2xvfOEqyCknSokcZ1ZWlhytLC59d+cG81zqzT90hAWMm3zIyq+/IuSkZAy5i"
"GBocR4PnaHBs25FlY0cKrc3H6+qqS9/7dsuaVz7+9yudXjv/xa8fOn/EmKkLI4KoL/3wlVGb"
"vvq4vLOyncHoCVf2mnLl7K0mE0d2bF4x8z9vztvUWdkrfn5L9nkXTpqdnN77BofLn6QoRsuX"
"5FgvUCFK7Y6igvIjhV+VHtj28IaVb502RuGUBOTmj3EVjLrs0fTsQbeZLA4Lx1JRPxoaPEvH"
"v3NR4x+rPVpUU1E8f+0Xi99Y//WyLqWN/+Udfzhv+AWTPzLbvKlrP//4vndf+tPfuyLfWVxz"
"25/vvfCSK58LtzZU7dzy+dXvvTZ3c8dSbRg1ZqJ78uXX3pqakXNzQkpWPylq9LjhJQViu++B"
"luZwRcnu1wq3Lnvs8N4N37tN+r0EXDTj7sm9+41+0ZPQKweAYXSOBh8zeowIjkbT8eqSg4Vb"
"573w1Jy3uxOPdcdv504fOuritziLw3tw7851T/7+FxPO1lswCCHUA4//e3WfAYMvFCOBht3b"
"vrrl9b//sdMeFe3KYe77wzM39x80eo7bl5IdIyBOgqRCjB9VNNaXFx8p2nLXV4teWHlSWScS"
"MG7a7An5I6d+ZHN4vYBhfBPPfIcAnmOgyuFw6aHdL6/9YtETa79Y3NQdg9z/yHO/Hjr6oucp"
"xmxuamoSln/y9sXLPnr5rD65Tp45+4JJV9z0pcvtMmmKENm97et7XnjygW55OUyYdIV74uQr"
"/9g7b+DtDGe1fNfwxlEQjWMw0NCwd9vyq9cuffU78893CBgz+ZbRA0dPW2h3JSQD32N8ngHP"
"ElSVHVh1oHDrQ/965alup4B5+ImXHh06+uJHNJ2iRUnF5vWrXnzpybvv7m55XcFtv/37CyPG"
"XHKXiWNAEVXdte3rx5959M7Hulver+58eEj/gaOfTMvqN0mSNQiSClFUTiKhtbm+Zs+WpTM3"
"rHwrHqIVJ2D0RdcNGnz+FZ86vcmZAOJGN/FtLV9Xxcjeb9c88sKT9z/bXWUJIcxfnnvnHwOH"
"j50tyRoEUUFp8cGNC954auq+XRt/lGWAvoPOd8y66cHlmdl9LogNpXt3rH/1sd/dfOcPCWu9"
"/5G/PzBoxITHKcZkjhldlBQIYluPaGmoKdu16dMrtnz13m4gSgAhhL3pt29/m5CaUwAgfofD"
"8wxMUSIioYaqwu1rfv3G83865X10Rzhv7EXua2+++42cfkNniqICQVJQW1NdumLh/Es+X/x2"
"SWfK8GcPzrNYE8dzrL2AZtgCAFAVuVCSWwvDobo1x0p2dSov0MRpv8yeNP3GLxJTUrN4joaJ"
"Y1BycNfCD9958dZtG7/u1pAKALc/8MSUwSMmvG62e1LFqOHbeoQxYddXFRfO/+vNw3Rdl4mu"
"65hy7e9nDxw17RXAyNXP84ZCMRKaj1Vs/3bTyhs+euf5bgeq3Xb3QxeOHnvpi8npOQMN46to"
"bm5uXbPyk+kfvPnkmo7kCSFUr/6XznH6M+aYrQ4nz0XjzqJhT4qiIhQMtByvLp5Xvv/zeZ2Z"
"yK+6ac74sRfPXOJyO+08ZzS22qqSPds2fHHXWy/NW9fdul5z8319R46d8q7H32u4IClx4xvf"
"VSiqhj1bl96+YsFTrxIA5JcPvLk9MS1vKCGIjvmG8U0cg1CgvmTNivcvWvbJm90K1ySEkEfm"
"/vOh4edPeJjlbNZYVwyFBW3dl4tvn//iHzqcAFN6D82wurPecidkTLTZLLCYeZhMXJQAKu5U"
"KwiGS2R1+aGvA8eKb6k+sqNDna+/4/FfXzBx+isWC0/FGp0shkI7t62e++Qjd3fa6fdETJ91"
"a8ZFl13/lc2VkC2KUePHJmdRRU3FwR3/evZXw8nEK+6+bviFV/0bhLQNPRwDnqehSMGW7euX"
"Xfb+63O7dWfy8xtn97rwoste7jNg2FTjFq1tLNy28ctn//nknb/tTDnZw2Ys8SdlX+6MhjxZ"
"LSaYzXzchyfmaxSJGE5UgdYwyov3fla07ePpnSn/tt89/9fh5138QPvbbY6jUVy0Y/mGr5fd"
"8dF7r3frofDGO/5wwagLpy9jeKtTlFSI7eYCSVKwfd3H11OJaXnTQdqlCG6X8Hr/zg1Pd9f4"
"v3vk6VlXXjt7Q07foVMjgoKIoCAiGp/9hd8ufumpu37fmXLS+064x+3PvNzpsMLltMHtssPr"
"cbaFPfnd8XAiI/TJDpfThvTe/S/P6H/xPZ25xut/vff3RXt3LI7pFxENfXv3GTL1imtu23D/"
"w/NmdccG77z8xMbCHeuepinqhETkBBRNITEtbzrFMKwbiGYqp9pWL4OBhuZt65e/2dWL/uo3"
"c0b8Y/7SzyZMmfWB1elLDwsywoKMSPRztLRk5+eL37m1M3cbHo/Haff1+qPdZoERUGeJxnLZ"
"4fc6keCPEuB3we91wuuxR2PPLHA4rEjKyPujx+PpMOhD13Xl62Xv3lpRdmSn0VgMXcOCDIvD"
"lz5u8tUfPP/m4s9uvuPBEV21x5a1y94MNB9vjq0It7czw7BuilC0gyCWnRbRbLUEgeaGjXu2"
"r+20H/wVP78545mX//PqZVfetC4zd+A0SdZJOCIjHJERiRgtqrq6umzzmqU37tyyqkOPOADg"
"7LljrXavzxyNObNZLXDYrXA57fGAukR/W+Cfy2mH4edpxHR5fUk+izv3pJDW78PurZ8f37pu"
"6Y21NdVl4ViPjSgIR2RIsk4ycgZOmzLjl+ue+ud/Xv3ZrJszOmuXXdvW1AWaj28kiOYwjdrZ"
"sDftYCiKdhIqlqGWxBINghCqU+4cV113e/7QUeOuv/L6O3/ldHl9iqojHJGhqMaKoaRokGUV"
"lUdLvt285tPrVi+df7Czypud3v4cx4LnWCPmzMzDajXBYbfA5bLBYbeCYxlIsoIAF4Ku65Bk"
"w6PZxHPgORY2T1J/ACcta38fvlz82t4Lp/xy8qhx099L7ZU9TFE1sCoNRdHAshQY2mTKGzDy"
"tuS03jP/8vz7b+7ctu7fn7z3yt6OyiWESo7N5ARtOU1phnYyiiJWEqBvbEMoFobjS0wvuGPO"
"8w++PO+ep08scMCgUa4pM274eUp61lUzb7hrPM+bGVXTEYrIxtq5okFWNSiyCknRUHJg14p1"
"qxbctGvLyi5t67GcuR9DR98fxhpL4DzHwmTiYbea4XHZYbGYEA4L0DUNkYgEnmPBRYPwGIaG"
"yWzr15Vrrlvxr4MDR06aesEl18zP7jNoCqtoUBgKjEKDpTUwDAXO7PLl5Y+ak5U38IG/vbFs"
"TXVl6ccrFr37n327t5604Hbnwy882G/gBQXtw5wMUgBFliqZlsbaVelZ+RfH47uiqb4YlmcH"
"Dp8w7/F/LL1G0+RiTZZreZMpjef53Lsefj7b6fKaY2SFI7Lx2ihVi29eyIoRF7Zn+9q3l33w"
"3B0N9RVd3jHSdC3YPgtuPJ09TUAzNDiOhdNhhaKooKPvF4ttf8ZkNHQ9oG7PtlX1noT0GVNm"
"3fPygCHjblY4GoyitXszUyzIj2US03IvTkrPuzi33/Dnnn3z8xJZFA4LolDJMFwSxbA5g0ZM"
"HKKqgKxq8dRrsS3PlsaaVUzJvk2fZvcb9RfG4eRj22+UqoNAAxgKvsSMIRRNDaGptmTYABAR"
"FWPbTgc0zTC8qulQo+liwuGIsnvbF08tfPuxP3X3XloSQvtiwXPGNbTo0KZGVO5H2gAAA8pJ"
"REFUY80i8ch7UZQhyyoUVYWqanEZIRjY151rN9ZXiISQX02/8ZGqgSMv/r3FbGHiBDDtXxBB"
"gSIAa3aa3WZnPoD89nvKiqJBidolZiNN0xEJt4qH9276lNm3fVXxNb95YaGl3/BrKU0HUTUQ"
"En1FrK5DowkoVYt6EJB4Ntp4NkNNj29eK9GkSs1Nx5v2bv96zvIPnnkdb/25O/UHAISaK/bI"
"cgFkWYEkyUYwXTSmK8Ax0HUdLENDVtR4zJkQDaqTJBmSKKGlsXxPd68fbTiPTLr6gfIBwybO"
"c7p9buM9lcZLgqh2d43tGyd0wy8vlnAw3jijyaRUTUfV0QML921fVcwAwLav37vdZLampmX2"
"u9C4cPT9vBSBqraboE80fvuNa1VDKBQUKkv3v3dwz/q536z5sMP3vXSExuoDW5vSBn3jcNhH"
"RAQJsdgzhqaNfD6CBJqmoaoqwhERra1htAaNWK6IIKGupvyb+qN7t/5QPVZ99OzrI8bP+ipn"
"wJiHU7P6XWe12k2xtzS1f0lEexLiQ7retrFvEKGh4sj+dZu/+PftwF1tq6FDx8xIHTpm5srE"
"1Oz82LtX2hccb/rQ44XHCpYlSassK/qs7ND2J9Yvf+ObH1rh9kjKHT06K3fUF4lJiTa3y7jP"
"d9itsFrNMPEsKJqCpmoQokNSoDWEpuYg6urqg0U7V19Sun9Dl7JzdYSxU28dkZE7/A/Jvfpc"
"zvM8FX9HDdW+kX7XVu0bam1lyd7t6z6ZvGPDoirghP2AgpFTkvMGjrszMS33JpcnKYXEuhZO"
"9pnRdB3BQJPU0li7pap0z9NfLXrhlEFxPxS9B059JLPP0P9xOY0EHzabGRYzH43niuUgigbU"
"BSNoCQRxaP/2P+3d9MnjZ0uni2bcfVlq1sAHnZ6k0Rabi/u+XgBEXYR1oKWhtrqm8tD8Q7vX"
"/qNw24qa2P+/d0syu/9o9+Dzr7jd6vCOoxnWQ9Osh2E4L8UwtBBu3RkJNX/bUHd0R+mBrasP"
"F26oOluVjCtJCJWRP+m+1Kx+D3u9Pk9b4F/bYpwsG9GYjY0NjeUl++Ye+GbJc2dra7M9cgvG"
"pGb1HTXBm5gx1Gx1DTNZ7EM0RVEVRWpQVblRVeTGUKBh7a5Nn75Ssn/LScvcnfYLirqAMKeL"
"ZjzbSE8fXuDJ7Pu00+0b63T7rO2Xo1sDTaFA07H1NaUHH6wo2XTOImIIIRwApbPk9+gYsVOB"
"eDzOdG/uaKsnfQBNAYHj1fsqGg5s0Rs7Fy3fk/BfScD/T+jRETL/F/ATAecYPxFwjvG/jqgk"
"EZc0MPcAAAAASUVORK5CYII=")
getaxislogoData = axislogo.GetData
getaxislogoImage = axislogo.GetImage
getaxislogoBitmap = axislogo.GetBitmap
| 75.934783
| 87
| 0.873676
| 602
| 13,972
| 20.277409
| 0.963455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155887
| 0.063842
| 13,972
| 183
| 88
| 76.349727
| 0.77737
| 0.011165
| 0
| 0
| 1
| 0
| 0.898349
| 0.898349
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005618
| 0
| 0.005618
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c37185c482041d2d3fda034d2244ea6e1b1d0761
| 43
|
py
|
Python
|
python/src/api/database/database.py
|
ComicShrimp/Banco-24hrs
|
bc580ffb7941109a2566ff07e02be1723fb9b8e7
|
[
"MIT"
] | null | null | null |
python/src/api/database/database.py
|
ComicShrimp/Banco-24hrs
|
bc580ffb7941109a2566ff07e02be1723fb9b8e7
|
[
"MIT"
] | null | null | null |
python/src/api/database/database.py
|
ComicShrimp/Banco-24hrs
|
bc580ffb7941109a2566ff07e02be1723fb9b8e7
|
[
"MIT"
] | null | null | null |
from src.api.app import db
db.create_all()
| 14.333333
| 26
| 0.767442
| 9
| 43
| 3.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 43
| 2
| 27
| 21.5
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c37b71e14737774029e74cb52b31614fb4b1efde
| 149
|
py
|
Python
|
surfpy/basedata.py
|
mjmayank1/surfpy
|
969b1a626db7606a42fab0eae445fcb351d6cbcd
|
[
"MIT"
] | 46
|
2018-04-08T15:56:32.000Z
|
2022-01-05T17:36:55.000Z
|
surfpy/basedata.py
|
mjmayank1/surfpy
|
969b1a626db7606a42fab0eae445fcb351d6cbcd
|
[
"MIT"
] | 13
|
2017-08-15T13:12:10.000Z
|
2021-03-23T09:09:04.000Z
|
surfpy/basedata.py
|
mjmayank1/surfpy
|
969b1a626db7606a42fab0eae445fcb351d6cbcd
|
[
"MIT"
] | 15
|
2018-03-08T16:52:19.000Z
|
2021-12-27T21:17:37.000Z
|
class BaseData(object):
def __init__(self, unit):
self.unit = unit
def change_units(self, new_units):
self.unit = new_units
| 21.285714
| 38
| 0.637584
| 20
| 149
| 4.4
| 0.5
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261745
| 149
| 7
| 39
| 21.285714
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
c38115e14902cffcc71d21454d3ad35fb7aa0859
| 27
|
py
|
Python
|
research/audioset/__init__.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | 1
|
2021-05-17T01:42:29.000Z
|
2021-05-17T01:42:29.000Z
|
research/audioset/__init__.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
research/audioset/__init__.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
from . import vggish_input
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c3821f8a8e8b077665b13dca62ea546d539875a4
| 55
|
py
|
Python
|
examples/sc4/gen_software_report/sccsv/__init__.py
|
travisamacker/OldpySecurityCenter
|
f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880
|
[
"BSD-3-Clause"
] | 92
|
2015-03-18T00:57:03.000Z
|
2021-09-07T14:09:43.000Z
|
examples/sc4/gen_software_report/sccsv/__init__.py
|
travisamacker/OldpySecurityCenter
|
f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880
|
[
"BSD-3-Clause"
] | 46
|
2015-06-22T16:25:36.000Z
|
2018-10-29T16:56:57.000Z
|
examples/sc4/gen_software_report/sccsv/__init__.py
|
travisamacker/OldpySecurityCenter
|
f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880
|
[
"BSD-3-Clause"
] | 37
|
2015-05-07T20:02:26.000Z
|
2021-09-07T14:09:46.000Z
|
import fields
import generator
import debug
import mail
| 13.75
| 16
| 0.872727
| 8
| 55
| 6
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 55
| 4
| 17
| 13.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5efd65d2a040bc6da4573b7b62cc1e8c39223be6
| 94
|
py
|
Python
|
proxychains_conf_generator/__init__.py
|
neoctobers/py_proxychains_conf_generator
|
0db4edfcd8b954355b28fd78222a59cb71d65a2c
|
[
"MIT"
] | 1
|
2019-03-30T05:44:24.000Z
|
2019-03-30T05:44:24.000Z
|
proxychains_conf_generator/__init__.py
|
neoctobers/py_proxychains_conf_generator
|
0db4edfcd8b954355b28fd78222a59cb71d65a2c
|
[
"MIT"
] | null | null | null |
proxychains_conf_generator/__init__.py
|
neoctobers/py_proxychains_conf_generator
|
0db4edfcd8b954355b28fd78222a59cb71d65a2c
|
[
"MIT"
] | null | null | null |
# coding:utf-8
from .proxychains_conf_generator import *
name = 'proxychains-conf-generator'
| 18.8
| 41
| 0.787234
| 12
| 94
| 6
| 0.75
| 0.416667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.106383
| 94
| 4
| 42
| 23.5
| 0.845238
| 0.12766
| 0
| 0
| 0
| 0
| 0.325
| 0.325
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6f06fbc6f8b789b0779867419bcd1eea2f5571a1
| 359,004
|
py
|
Python
|
sdk/python/pulumi_azure_native/machinelearningservices/_inputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/machinelearningservices/_inputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/machinelearningservices/_inputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ACIServiceCreateRequestDataCollectionArgs',
'ACIServiceCreateRequestEncryptionPropertiesArgs',
'ACIServiceCreateRequestVnetConfigurationArgs',
'AKSPropertiesArgs',
'AKSServiceCreateRequestAutoScalerArgs',
'AKSServiceCreateRequestDataCollectionArgs',
'AKSServiceCreateRequestLivenessProbeRequirementsArgs',
'AKSArgs',
'AksNetworkingConfigurationArgs',
'AmlComputePropertiesArgs',
'AmlComputeArgs',
'AmlTokenArgs',
'AssignedUserArgs',
'AutoScaleSettingsArgs',
'BanditPolicyArgs',
'BatchDeploymentArgs',
'BatchEndpointArgs',
'BatchOutputConfigurationArgs',
'BatchRetrySettingsArgs',
'CodeConfigurationArgs',
'CodeContainerArgs',
'CodeVersionArgs',
'CommandJobArgs',
'ComputeBindingArgs',
'ComputeConfigurationArgs',
'ComputeInstancePropertiesArgs',
'ComputeInstanceSshSettingsArgs',
'ComputeInstanceArgs',
'ContainerResourceRequirementsArgs',
'CosmosDbSettingsArgs',
'CreateServiceRequestEnvironmentImageRequestArgs',
'CreateServiceRequestKeysArgs',
'DataContainerArgs',
'DataFactoryArgs',
'DataLakeAnalyticsPropertiesArgs',
'DataLakeAnalyticsArgs',
'DataPathAssetReferenceArgs',
'DataVersionArgs',
'DatabricksPropertiesArgs',
'DatabricksArgs',
'DatasetCreateRequestDataPathArgs',
'DatasetCreateRequestParametersArgs',
'DatasetCreateRequestPathArgs',
'DatasetCreateRequestQueryArgs',
'DatasetCreateRequestRegistrationArgs',
'DatasetCreateRequestTimeSeriesArgs',
'DatasetReferenceArgs',
'DockerBuildArgs',
'DockerImagePlatformArgs',
'DockerImageArgs',
'EncryptionPropertyArgs',
'EndpointAuthKeysArgs',
'EnvironmentContainerArgs',
'EnvironmentImageRequestEnvironmentReferenceArgs',
'EnvironmentImageRequestEnvironmentArgs',
'EnvironmentSpecificationVersionArgs',
'FlavorDataArgs',
'HDInsightPropertiesArgs',
'HDInsightArgs',
'IdAssetReferenceArgs',
'IdentityForCmkArgs',
'IdentityArgs',
'ImageAssetArgs',
'InferenceContainerPropertiesArgs',
'InputDataBindingArgs',
'K8sOnlineDeploymentArgs',
'KeyVaultPropertiesArgs',
'LabelCategoryArgs',
'LabelClassArgs',
'LabelingDatasetConfigurationArgs',
'LabelingJobImagePropertiesArgs',
'LabelingJobInstructionsArgs',
'LabelingJobPropertiesArgs',
'LinkedServicePropsArgs',
'LinkedWorkspacePropsArgs',
'MLAssistConfigurationArgs',
'ManagedIdentityArgs',
'ManagedOnlineDeploymentArgs',
'ManualScaleSettingsArgs',
'MedianStoppingPolicyArgs',
'ModelContainerArgs',
'ModelDockerSectionBaseImageRegistryArgs',
'ModelEnvironmentDefinitionDockerArgs',
'ModelEnvironmentDefinitionPythonArgs',
'ModelEnvironmentDefinitionRArgs',
'ModelEnvironmentDefinitionSparkArgs',
'ModelVersionArgs',
'ModelArgs',
'MpiArgs',
'ObjectiveArgs',
'OnlineEndpointArgs',
'OnlineRequestSettingsArgs',
'OutputDataBindingArgs',
'OutputPathAssetReferenceArgs',
'PersonalComputeInstanceSettingsArgs',
'PrivateLinkServiceConnectionStateArgs',
'ProbeSettingsArgs',
'PyTorchArgs',
'RCranPackageArgs',
'RGitHubPackageArgs',
'ResourceIdentityArgs',
'ResourceIdArgs',
'RouteArgs',
'ScaleSettingsArgs',
'ScriptReferenceArgs',
'ScriptsToExecuteArgs',
'ServiceManagedResourcesSettingsArgs',
'SetupScriptsArgs',
'SharedPrivateLinkResourceArgs',
'SkuArgs',
'SparkMavenPackageArgs',
'SslConfigurationArgs',
'SweepJobArgs',
'TensorFlowArgs',
'TrialComponentArgs',
'TruncationSelectionPolicyArgs',
'UserAccountCredentialsArgs',
'UserAssignedIdentityMetaArgs',
'VirtualMachineImageArgs',
'VirtualMachinePropertiesArgs',
'VirtualMachineSshCredentialsArgs',
'VirtualMachineArgs',
]
@pulumi.input_type
class ACIServiceCreateRequestDataCollectionArgs:
def __init__(__self__, *,
event_hub_enabled: Optional[pulumi.Input[bool]] = None,
storage_enabled: Optional[pulumi.Input[bool]] = None):
"""
Details of the data collection options specified.
:param pulumi.Input[bool] event_hub_enabled: Option for enabling/disabling Event Hub.
:param pulumi.Input[bool] storage_enabled: Option for enabling/disabling storage.
"""
if event_hub_enabled is not None:
pulumi.set(__self__, "event_hub_enabled", event_hub_enabled)
if storage_enabled is not None:
pulumi.set(__self__, "storage_enabled", storage_enabled)
@property
@pulumi.getter(name="eventHubEnabled")
def event_hub_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Option for enabling/disabling Event Hub.
"""
return pulumi.get(self, "event_hub_enabled")
@event_hub_enabled.setter
def event_hub_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "event_hub_enabled", value)
@property
@pulumi.getter(name="storageEnabled")
def storage_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Option for enabling/disabling storage.
"""
return pulumi.get(self, "storage_enabled")
@storage_enabled.setter
def storage_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "storage_enabled", value)
@pulumi.input_type
class ACIServiceCreateRequestEncryptionPropertiesArgs:
def __init__(__self__, *,
key_name: pulumi.Input[str],
key_version: pulumi.Input[str],
vault_base_url: pulumi.Input[str]):
"""
The encryption properties.
:param pulumi.Input[str] key_name: Encryption Key name
:param pulumi.Input[str] key_version: Encryption Key Version
:param pulumi.Input[str] vault_base_url: vault base Url
"""
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "key_version", key_version)
pulumi.set(__self__, "vault_base_url", vault_base_url)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> pulumi.Input[str]:
"""
Encryption Key name
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> pulumi.Input[str]:
"""
Encryption Key Version
"""
return pulumi.get(self, "key_version")
@key_version.setter
def key_version(self, value: pulumi.Input[str]):
pulumi.set(self, "key_version", value)
@property
@pulumi.getter(name="vaultBaseUrl")
def vault_base_url(self) -> pulumi.Input[str]:
"""
vault base Url
"""
return pulumi.get(self, "vault_base_url")
@vault_base_url.setter
def vault_base_url(self, value: pulumi.Input[str]):
pulumi.set(self, "vault_base_url", value)
@pulumi.input_type
class ACIServiceCreateRequestVnetConfigurationArgs:
def __init__(__self__, *,
subnet_name: Optional[pulumi.Input[str]] = None,
vnet_name: Optional[pulumi.Input[str]] = None):
"""
The virtual network configuration.
:param pulumi.Input[str] subnet_name: The name of the virtual network subnet.
:param pulumi.Input[str] vnet_name: The name of the virtual network.
"""
if subnet_name is not None:
pulumi.set(__self__, "subnet_name", subnet_name)
if vnet_name is not None:
pulumi.set(__self__, "vnet_name", vnet_name)
@property
@pulumi.getter(name="subnetName")
def subnet_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the virtual network subnet.
"""
return pulumi.get(self, "subnet_name")
@subnet_name.setter
def subnet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_name", value)
@property
@pulumi.getter(name="vnetName")
def vnet_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the virtual network.
"""
return pulumi.get(self, "vnet_name")
@vnet_name.setter
def vnet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vnet_name", value)
@pulumi.input_type
class AKSPropertiesArgs:
def __init__(__self__, *,
agent_count: Optional[pulumi.Input[int]] = None,
agent_vm_size: Optional[pulumi.Input[str]] = None,
aks_networking_configuration: Optional[pulumi.Input['AksNetworkingConfigurationArgs']] = None,
cluster_fqdn: Optional[pulumi.Input[str]] = None,
cluster_purpose: Optional[pulumi.Input[Union[str, 'ClusterPurpose']]] = None,
ssl_configuration: Optional[pulumi.Input['SslConfigurationArgs']] = None):
"""
AKS properties
:param pulumi.Input[int] agent_count: Number of agents
:param pulumi.Input[str] agent_vm_size: Agent virtual machine size
:param pulumi.Input['AksNetworkingConfigurationArgs'] aks_networking_configuration: AKS networking configuration for vnet
:param pulumi.Input[str] cluster_fqdn: Cluster full qualified domain name
:param pulumi.Input[Union[str, 'ClusterPurpose']] cluster_purpose: Intended usage of the cluster
:param pulumi.Input['SslConfigurationArgs'] ssl_configuration: SSL configuration
"""
if agent_count is not None:
pulumi.set(__self__, "agent_count", agent_count)
if agent_vm_size is not None:
pulumi.set(__self__, "agent_vm_size", agent_vm_size)
if aks_networking_configuration is not None:
pulumi.set(__self__, "aks_networking_configuration", aks_networking_configuration)
if cluster_fqdn is not None:
pulumi.set(__self__, "cluster_fqdn", cluster_fqdn)
if cluster_purpose is None:
cluster_purpose = 'FastProd'
if cluster_purpose is not None:
pulumi.set(__self__, "cluster_purpose", cluster_purpose)
if ssl_configuration is not None:
pulumi.set(__self__, "ssl_configuration", ssl_configuration)
@property
@pulumi.getter(name="agentCount")
def agent_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of agents
"""
return pulumi.get(self, "agent_count")
@agent_count.setter
def agent_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "agent_count", value)
@property
@pulumi.getter(name="agentVmSize")
def agent_vm_size(self) -> Optional[pulumi.Input[str]]:
"""
Agent virtual machine size
"""
return pulumi.get(self, "agent_vm_size")
@agent_vm_size.setter
def agent_vm_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_vm_size", value)
@property
@pulumi.getter(name="aksNetworkingConfiguration")
def aks_networking_configuration(self) -> Optional[pulumi.Input['AksNetworkingConfigurationArgs']]:
"""
AKS networking configuration for vnet
"""
return pulumi.get(self, "aks_networking_configuration")
@aks_networking_configuration.setter
def aks_networking_configuration(self, value: Optional[pulumi.Input['AksNetworkingConfigurationArgs']]):
pulumi.set(self, "aks_networking_configuration", value)
@property
@pulumi.getter(name="clusterFqdn")
def cluster_fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Cluster full qualified domain name
"""
return pulumi.get(self, "cluster_fqdn")
@cluster_fqdn.setter
def cluster_fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_fqdn", value)
@property
@pulumi.getter(name="clusterPurpose")
def cluster_purpose(self) -> Optional[pulumi.Input[Union[str, 'ClusterPurpose']]]:
"""
Intended usage of the cluster
"""
return pulumi.get(self, "cluster_purpose")
@cluster_purpose.setter
def cluster_purpose(self, value: Optional[pulumi.Input[Union[str, 'ClusterPurpose']]]):
pulumi.set(self, "cluster_purpose", value)
@property
@pulumi.getter(name="sslConfiguration")
def ssl_configuration(self) -> Optional[pulumi.Input['SslConfigurationArgs']]:
"""
SSL configuration
"""
return pulumi.get(self, "ssl_configuration")
@ssl_configuration.setter
def ssl_configuration(self, value: Optional[pulumi.Input['SslConfigurationArgs']]):
pulumi.set(self, "ssl_configuration", value)
@pulumi.input_type
class AKSServiceCreateRequestAutoScalerArgs:
def __init__(__self__, *,
autoscale_enabled: Optional[pulumi.Input[bool]] = None,
max_replicas: Optional[pulumi.Input[int]] = None,
min_replicas: Optional[pulumi.Input[int]] = None,
refresh_period_in_seconds: Optional[pulumi.Input[int]] = None,
target_utilization: Optional[pulumi.Input[int]] = None):
"""
The auto scaler properties.
:param pulumi.Input[bool] autoscale_enabled: Option to enable/disable auto scaling.
:param pulumi.Input[int] max_replicas: The maximum number of replicas in the cluster.
:param pulumi.Input[int] min_replicas: The minimum number of replicas to scale down to.
:param pulumi.Input[int] refresh_period_in_seconds: The amount of seconds to wait between auto scale updates.
:param pulumi.Input[int] target_utilization: The target utilization percentage to use for determining whether to scale the cluster.
"""
if autoscale_enabled is not None:
pulumi.set(__self__, "autoscale_enabled", autoscale_enabled)
if max_replicas is not None:
pulumi.set(__self__, "max_replicas", max_replicas)
if min_replicas is not None:
pulumi.set(__self__, "min_replicas", min_replicas)
if refresh_period_in_seconds is not None:
pulumi.set(__self__, "refresh_period_in_seconds", refresh_period_in_seconds)
if target_utilization is not None:
pulumi.set(__self__, "target_utilization", target_utilization)
@property
@pulumi.getter(name="autoscaleEnabled")
def autoscale_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Option to enable/disable auto scaling.
"""
return pulumi.get(self, "autoscale_enabled")
@autoscale_enabled.setter
def autoscale_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_enabled", value)
@property
@pulumi.getter(name="maxReplicas")
def max_replicas(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of replicas in the cluster.
"""
return pulumi.get(self, "max_replicas")
@max_replicas.setter
def max_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_replicas", value)
@property
@pulumi.getter(name="minReplicas")
def min_replicas(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of replicas to scale down to.
"""
return pulumi.get(self, "min_replicas")
@min_replicas.setter
def min_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replicas", value)
@property
@pulumi.getter(name="refreshPeriodInSeconds")
def refresh_period_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The amount of seconds to wait between auto scale updates.
"""
return pulumi.get(self, "refresh_period_in_seconds")
@refresh_period_in_seconds.setter
def refresh_period_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "refresh_period_in_seconds", value)
@property
@pulumi.getter(name="targetUtilization")
def target_utilization(self) -> Optional[pulumi.Input[int]]:
"""
The target utilization percentage to use for determining whether to scale the cluster.
"""
return pulumi.get(self, "target_utilization")
@target_utilization.setter
def target_utilization(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_utilization", value)
@pulumi.input_type
class AKSServiceCreateRequestDataCollectionArgs:
def __init__(__self__, *,
event_hub_enabled: Optional[pulumi.Input[bool]] = None,
storage_enabled: Optional[pulumi.Input[bool]] = None):
"""
Details of the data collection options specified.
:param pulumi.Input[bool] event_hub_enabled: Option for enabling/disabling Event Hub.
:param pulumi.Input[bool] storage_enabled: Option for enabling/disabling storage.
"""
if event_hub_enabled is not None:
pulumi.set(__self__, "event_hub_enabled", event_hub_enabled)
if storage_enabled is not None:
pulumi.set(__self__, "storage_enabled", storage_enabled)
@property
@pulumi.getter(name="eventHubEnabled")
def event_hub_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Option for enabling/disabling Event Hub.
"""
return pulumi.get(self, "event_hub_enabled")
@event_hub_enabled.setter
def event_hub_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "event_hub_enabled", value)
@property
@pulumi.getter(name="storageEnabled")
def storage_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Option for enabling/disabling storage.
"""
return pulumi.get(self, "storage_enabled")
@storage_enabled.setter
def storage_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "storage_enabled", value)
@pulumi.input_type
class AKSServiceCreateRequestLivenessProbeRequirementsArgs:
def __init__(__self__, *,
failure_threshold: Optional[pulumi.Input[int]] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
The liveness probe requirements.
:param pulumi.Input[int] failure_threshold: The number of failures to allow before returning an unhealthy status.
:param pulumi.Input[int] initial_delay_seconds: The delay before the first probe in seconds.
:param pulumi.Input[int] period_seconds: The length of time between probes in seconds.
:param pulumi.Input[int] success_threshold: The number of successful probes before returning a healthy status.
:param pulumi.Input[int] timeout_seconds: The probe timeout in seconds.
"""
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of failures to allow before returning an unhealthy status.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The delay before the first probe in seconds.
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The length of time between probes in seconds.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of successful probes before returning a healthy status.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The probe timeout in seconds.
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class AKSArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['AKSPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A Machine Learning compute based on AKS.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'AKS'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input['AKSPropertiesArgs'] properties: AKS properties
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'AKS')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'AKS'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['AKSPropertiesArgs']]:
"""
AKS properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['AKSPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class AksNetworkingConfigurationArgs:
def __init__(__self__, *,
dns_service_ip: Optional[pulumi.Input[str]] = None,
docker_bridge_cidr: Optional[pulumi.Input[str]] = None,
service_cidr: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None):
"""
Advance configuration for AKS networking
:param pulumi.Input[str] dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
:param pulumi.Input[str] docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:param pulumi.Input[str] service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
:param pulumi.Input[str] subnet_id: Virtual network subnet resource ID the compute nodes belong to
"""
if dns_service_ip is not None:
pulumi.set(__self__, "dns_service_ip", dns_service_ip)
if docker_bridge_cidr is not None:
pulumi.set(__self__, "docker_bridge_cidr", docker_bridge_cidr)
if service_cidr is not None:
pulumi.set(__self__, "service_cidr", service_cidr)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="dnsServiceIP")
def dns_service_ip(self) -> Optional[pulumi.Input[str]]:
"""
An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
"""
return pulumi.get(self, "dns_service_ip")
@dns_service_ip.setter
def dns_service_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_service_ip", value)
@property
@pulumi.getter(name="dockerBridgeCidr")
def docker_bridge_cidr(self) -> Optional[pulumi.Input[str]]:
"""
A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
"""
return pulumi.get(self, "docker_bridge_cidr")
@docker_bridge_cidr.setter
def docker_bridge_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "docker_bridge_cidr", value)
@property
@pulumi.getter(name="serviceCidr")
def service_cidr(self) -> Optional[pulumi.Input[str]]:
"""
A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
"""
return pulumi.get(self, "service_cidr")
@service_cidr.setter
def service_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_cidr", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
Virtual network subnet resource ID the compute nodes belong to
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class AmlComputePropertiesArgs:
def __init__(__self__, *,
enable_node_public_ip: Optional[pulumi.Input[bool]] = None,
isolated_network: Optional[pulumi.Input[bool]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OsType']]] = None,
remote_login_port_public_access: Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]] = None,
scale_settings: Optional[pulumi.Input['ScaleSettingsArgs']] = None,
subnet: Optional[pulumi.Input['ResourceIdArgs']] = None,
user_account_credentials: Optional[pulumi.Input['UserAccountCredentialsArgs']] = None,
virtual_machine_image: Optional[pulumi.Input['VirtualMachineImageArgs']] = None,
vm_priority: Optional[pulumi.Input[Union[str, 'VmPriority']]] = None,
vm_size: Optional[pulumi.Input[str]] = None):
"""
AML Compute properties
:param pulumi.Input[bool] enable_node_public_ip: Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that the compute nodes will have public IPs provisioned. false - Indicates that the compute nodes will have a private endpoint and no public IPs.
:param pulumi.Input[bool] isolated_network: Network is isolated or not
:param pulumi.Input[Union[str, 'OsType']] os_type: Compute OS Type
:param pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']] remote_login_port_public_access: State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled.
:param pulumi.Input['ScaleSettingsArgs'] scale_settings: Scale settings for AML Compute
:param pulumi.Input['ResourceIdArgs'] subnet: Virtual network subnet resource ID the compute nodes belong to.
:param pulumi.Input['UserAccountCredentialsArgs'] user_account_credentials: Credentials for an administrator user account that will be created on each compute node.
:param pulumi.Input['VirtualMachineImageArgs'] virtual_machine_image: Virtual Machine image for AML Compute - windows only
:param pulumi.Input[Union[str, 'VmPriority']] vm_priority: Virtual Machine priority
:param pulumi.Input[str] vm_size: Virtual Machine Size
"""
if enable_node_public_ip is None:
enable_node_public_ip = True
if enable_node_public_ip is not None:
pulumi.set(__self__, "enable_node_public_ip", enable_node_public_ip)
if isolated_network is not None:
pulumi.set(__self__, "isolated_network", isolated_network)
if os_type is None:
os_type = 'Linux'
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if remote_login_port_public_access is None:
remote_login_port_public_access = 'NotSpecified'
if remote_login_port_public_access is not None:
pulumi.set(__self__, "remote_login_port_public_access", remote_login_port_public_access)
if scale_settings is not None:
pulumi.set(__self__, "scale_settings", scale_settings)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if user_account_credentials is not None:
pulumi.set(__self__, "user_account_credentials", user_account_credentials)
if virtual_machine_image is not None:
pulumi.set(__self__, "virtual_machine_image", virtual_machine_image)
if vm_priority is not None:
pulumi.set(__self__, "vm_priority", vm_priority)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
@property
@pulumi.getter(name="enableNodePublicIp")
def enable_node_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that the compute nodes will have public IPs provisioned. false - Indicates that the compute nodes will have a private endpoint and no public IPs.
"""
return pulumi.get(self, "enable_node_public_ip")
@enable_node_public_ip.setter
def enable_node_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_node_public_ip", value)
@property
@pulumi.getter(name="isolatedNetwork")
def isolated_network(self) -> Optional[pulumi.Input[bool]]:
"""
Network is isolated or not
"""
return pulumi.get(self, "isolated_network")
@isolated_network.setter
def isolated_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "isolated_network", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input[Union[str, 'OsType']]]:
"""
Compute OS Type
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input[Union[str, 'OsType']]]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="remoteLoginPortPublicAccess")
def remote_login_port_public_access(self) -> Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]]:
"""
State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled.
"""
return pulumi.get(self, "remote_login_port_public_access")
@remote_login_port_public_access.setter
def remote_login_port_public_access(self, value: Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]]):
pulumi.set(self, "remote_login_port_public_access", value)
@property
@pulumi.getter(name="scaleSettings")
def scale_settings(self) -> Optional[pulumi.Input['ScaleSettingsArgs']]:
"""
Scale settings for AML Compute
"""
return pulumi.get(self, "scale_settings")
@scale_settings.setter
def scale_settings(self, value: Optional[pulumi.Input['ScaleSettingsArgs']]):
pulumi.set(self, "scale_settings", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:
"""
Virtual network subnet resource ID the compute nodes belong to.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['ResourceIdArgs']]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="userAccountCredentials")
def user_account_credentials(self) -> Optional[pulumi.Input['UserAccountCredentialsArgs']]:
"""
Credentials for an administrator user account that will be created on each compute node.
"""
return pulumi.get(self, "user_account_credentials")
@user_account_credentials.setter
def user_account_credentials(self, value: Optional[pulumi.Input['UserAccountCredentialsArgs']]):
pulumi.set(self, "user_account_credentials", value)
@property
@pulumi.getter(name="virtualMachineImage")
def virtual_machine_image(self) -> Optional[pulumi.Input['VirtualMachineImageArgs']]:
"""
Virtual Machine image for AML Compute - windows only
"""
return pulumi.get(self, "virtual_machine_image")
@virtual_machine_image.setter
def virtual_machine_image(self, value: Optional[pulumi.Input['VirtualMachineImageArgs']]):
pulumi.set(self, "virtual_machine_image", value)
@property
@pulumi.getter(name="vmPriority")
def vm_priority(self) -> Optional[pulumi.Input[Union[str, 'VmPriority']]]:
"""
Virtual Machine priority
"""
return pulumi.get(self, "vm_priority")
@vm_priority.setter
def vm_priority(self, value: Optional[pulumi.Input[Union[str, 'VmPriority']]]):
pulumi.set(self, "vm_priority", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[pulumi.Input[str]]:
"""
Virtual Machine Size
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_size", value)
@pulumi.input_type
class AmlComputeArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['AmlComputePropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
An Azure Machine Learning compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'AmlCompute'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input['AmlComputePropertiesArgs'] properties: AML Compute properties
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'AmlCompute')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'AmlCompute'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['AmlComputePropertiesArgs']]:
"""
AML Compute properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['AmlComputePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class AmlTokenArgs:
def __init__(__self__, *,
identity_type: pulumi.Input[str]):
"""
AML Token identity configuration.
:param pulumi.Input[str] identity_type: Enum to determine identity framework.
Expected value is 'AMLToken'.
"""
pulumi.set(__self__, "identity_type", 'AMLToken')
@property
@pulumi.getter(name="identityType")
def identity_type(self) -> pulumi.Input[str]:
"""
Enum to determine identity framework.
Expected value is 'AMLToken'.
"""
return pulumi.get(self, "identity_type")
@identity_type.setter
def identity_type(self, value: pulumi.Input[str]):
pulumi.set(self, "identity_type", value)
@pulumi.input_type
class AssignedUserArgs:
def __init__(__self__, *,
object_id: pulumi.Input[str],
tenant_id: pulumi.Input[str]):
"""
A user that can be assigned to a compute instance.
:param pulumi.Input[str] object_id: User’s AAD Object Id.
:param pulumi.Input[str] tenant_id: User’s AAD Tenant Id.
"""
pulumi.set(__self__, "object_id", object_id)
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> pulumi.Input[str]:
"""
User’s AAD Object Id.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: pulumi.Input[str]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
"""
User’s AAD Tenant Id.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class AutoScaleSettingsArgs:
def __init__(__self__, *,
scale_type: pulumi.Input[str],
max_instances: Optional[pulumi.Input[int]] = None,
min_instances: Optional[pulumi.Input[int]] = None,
polling_interval: Optional[pulumi.Input[str]] = None,
target_utilization_percentage: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] scale_type:
Expected value is 'Auto'.
:param pulumi.Input[int] max_instances: Maximum number of instances for this deployment.
:param pulumi.Input[int] min_instances: Minimum number of instances for this deployment.
:param pulumi.Input[str] polling_interval: The polling interval in ISO 8691 format. Only supports duration with precision as low as Seconds.
:param pulumi.Input[int] target_utilization_percentage: Target CPU usage for the autoscaler.
"""
pulumi.set(__self__, "scale_type", 'Auto')
if max_instances is not None:
pulumi.set(__self__, "max_instances", max_instances)
if min_instances is not None:
pulumi.set(__self__, "min_instances", min_instances)
if polling_interval is not None:
pulumi.set(__self__, "polling_interval", polling_interval)
if target_utilization_percentage is not None:
pulumi.set(__self__, "target_utilization_percentage", target_utilization_percentage)
@property
@pulumi.getter(name="scaleType")
def scale_type(self) -> pulumi.Input[str]:
"""
Expected value is 'Auto'.
"""
return pulumi.get(self, "scale_type")
@scale_type.setter
def scale_type(self, value: pulumi.Input[str]):
pulumi.set(self, "scale_type", value)
@property
@pulumi.getter(name="maxInstances")
def max_instances(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of instances for this deployment.
"""
return pulumi.get(self, "max_instances")
@max_instances.setter
def max_instances(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_instances", value)
@property
@pulumi.getter(name="minInstances")
def min_instances(self) -> Optional[pulumi.Input[int]]:
"""
Minimum number of instances for this deployment.
"""
return pulumi.get(self, "min_instances")
@min_instances.setter
def min_instances(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instances", value)
@property
@pulumi.getter(name="pollingInterval")
def polling_interval(self) -> Optional[pulumi.Input[str]]:
"""
The polling interval in ISO 8691 format. Only supports duration with precision as low as Seconds.
"""
return pulumi.get(self, "polling_interval")
@polling_interval.setter
def polling_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "polling_interval", value)
@property
@pulumi.getter(name="targetUtilizationPercentage")
def target_utilization_percentage(self) -> Optional[pulumi.Input[int]]:
"""
Target CPU usage for the autoscaler.
"""
return pulumi.get(self, "target_utilization_percentage")
@target_utilization_percentage.setter
def target_utilization_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_utilization_percentage", value)
@pulumi.input_type
class BanditPolicyArgs:
def __init__(__self__, *,
policy_type: pulumi.Input[str],
delay_evaluation: Optional[pulumi.Input[int]] = None,
evaluation_interval: Optional[pulumi.Input[int]] = None,
slack_amount: Optional[pulumi.Input[float]] = None,
slack_factor: Optional[pulumi.Input[float]] = None):
"""
Defines an early termination policy based on slack criteria, and a frequency and delay interval for evaluation.
:param pulumi.Input[str] policy_type:
Expected value is 'Bandit'.
:param pulumi.Input[int] delay_evaluation: Number of intervals by which to delay the first evaluation.
:param pulumi.Input[int] evaluation_interval: Interval (number of runs) between policy evaluations.
:param pulumi.Input[float] slack_amount: Absolute distance allowed from the best performing run.
:param pulumi.Input[float] slack_factor: Ratio of the allowed distance from the best performing run.
"""
pulumi.set(__self__, "policy_type", 'Bandit')
if delay_evaluation is not None:
pulumi.set(__self__, "delay_evaluation", delay_evaluation)
if evaluation_interval is not None:
pulumi.set(__self__, "evaluation_interval", evaluation_interval)
if slack_amount is not None:
pulumi.set(__self__, "slack_amount", slack_amount)
if slack_factor is not None:
pulumi.set(__self__, "slack_factor", slack_factor)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Input[str]:
"""
Expected value is 'Bandit'.
"""
return pulumi.get(self, "policy_type")
@policy_type.setter
def policy_type(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_type", value)
@property
@pulumi.getter(name="delayEvaluation")
def delay_evaluation(self) -> Optional[pulumi.Input[int]]:
"""
Number of intervals by which to delay the first evaluation.
"""
return pulumi.get(self, "delay_evaluation")
@delay_evaluation.setter
def delay_evaluation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "delay_evaluation", value)
@property
@pulumi.getter(name="evaluationInterval")
def evaluation_interval(self) -> Optional[pulumi.Input[int]]:
"""
Interval (number of runs) between policy evaluations.
"""
return pulumi.get(self, "evaluation_interval")
@evaluation_interval.setter
def evaluation_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_interval", value)
@property
@pulumi.getter(name="slackAmount")
def slack_amount(self) -> Optional[pulumi.Input[float]]:
"""
Absolute distance allowed from the best performing run.
"""
return pulumi.get(self, "slack_amount")
@slack_amount.setter
def slack_amount(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "slack_amount", value)
@property
@pulumi.getter(name="slackFactor")
def slack_factor(self) -> Optional[pulumi.Input[float]]:
"""
Ratio of the allowed distance from the best performing run.
"""
return pulumi.get(self, "slack_factor")
@slack_factor.setter
def slack_factor(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "slack_factor", value)
@pulumi.input_type
class BatchDeploymentArgs:
def __init__(__self__, *,
code_configuration: Optional[pulumi.Input['CodeConfigurationArgs']] = None,
compute: Optional[pulumi.Input['ComputeConfigurationArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
error_threshold: Optional[pulumi.Input[int]] = None,
logging_level: Optional[pulumi.Input[Union[str, 'BatchLoggingLevel']]] = None,
mini_batch_size: Optional[pulumi.Input[float]] = None,
model: Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]] = None,
output_configuration: Optional[pulumi.Input['BatchOutputConfigurationArgs']] = None,
partition_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
retry_settings: Optional[pulumi.Input['BatchRetrySettingsArgs']] = None):
"""
Batch inference settings per deployment.
:param pulumi.Input['CodeConfigurationArgs'] code_configuration: Code configuration for the endpoint deployment.
:param pulumi.Input['ComputeConfigurationArgs'] compute: Configuration for compute binding.
:param pulumi.Input[str] description: Description of the endpoint deployment.
:param pulumi.Input[str] environment_id: ARM resource ID of the environment specification for the endpoint deployment.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: Environment variables configuration for the deployment.
:param pulumi.Input[int] error_threshold: Error threshold, if the error count for the entire input goes above this value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
For FileDataset, this value is the count of file failures.
For TabularDataset, this value is the count of record failures.
If set to -1 (the lower bound), all failures during batch inference will be ignored.
:param pulumi.Input[Union[str, 'BatchLoggingLevel']] logging_level: Logging level for batch inference operation.
:param pulumi.Input[float] mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:param pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']] model: Reference to the model asset for the endpoint deployment.
:param pulumi.Input['BatchOutputConfigurationArgs'] output_configuration: Output configuration for the batch inference operation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] partition_keys: Partition keys list used for Named partitioning.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Property dictionary. Properties can be added, but not removed or altered.
:param pulumi.Input['BatchRetrySettingsArgs'] retry_settings: Retry Settings for the batch inference operation.
"""
if code_configuration is not None:
pulumi.set(__self__, "code_configuration", code_configuration)
if compute is not None:
pulumi.set(__self__, "compute", compute)
if description is not None:
pulumi.set(__self__, "description", description)
if environment_id is not None:
pulumi.set(__self__, "environment_id", environment_id)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if error_threshold is not None:
pulumi.set(__self__, "error_threshold", error_threshold)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if mini_batch_size is not None:
pulumi.set(__self__, "mini_batch_size", mini_batch_size)
if model is not None:
pulumi.set(__self__, "model", model)
if output_configuration is not None:
pulumi.set(__self__, "output_configuration", output_configuration)
if partition_keys is not None:
pulumi.set(__self__, "partition_keys", partition_keys)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if retry_settings is not None:
pulumi.set(__self__, "retry_settings", retry_settings)
@property
@pulumi.getter(name="codeConfiguration")
def code_configuration(self) -> Optional[pulumi.Input['CodeConfigurationArgs']]:
"""
Code configuration for the endpoint deployment.
"""
return pulumi.get(self, "code_configuration")
@code_configuration.setter
def code_configuration(self, value: Optional[pulumi.Input['CodeConfigurationArgs']]):
pulumi.set(self, "code_configuration", value)
@property
@pulumi.getter
def compute(self) -> Optional[pulumi.Input['ComputeConfigurationArgs']]:
"""
Configuration for compute binding.
"""
return pulumi.get(self, "compute")
@compute.setter
def compute(self, value: Optional[pulumi.Input['ComputeConfigurationArgs']]):
pulumi.set(self, "compute", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the endpoint deployment.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the environment specification for the endpoint deployment.
"""
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Environment variables configuration for the deployment.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="errorThreshold")
def error_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Error threshold, if the error count for the entire input goes above this value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
For FileDataset, this value is the count of file failures.
For TabularDataset, this value is the count of record failures.
If set to -1 (the lower bound), all failures during batch inference will be ignored.
"""
return pulumi.get(self, "error_threshold")
@error_threshold.setter
def error_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "error_threshold", value)
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[pulumi.Input[Union[str, 'BatchLoggingLevel']]]:
"""
Logging level for batch inference operation.
"""
return pulumi.get(self, "logging_level")
@logging_level.setter
def logging_level(self, value: Optional[pulumi.Input[Union[str, 'BatchLoggingLevel']]]):
pulumi.set(self, "logging_level", value)
@property
@pulumi.getter(name="miniBatchSize")
def mini_batch_size(self) -> Optional[pulumi.Input[float]]:
"""
Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
"""
return pulumi.get(self, "mini_batch_size")
@mini_batch_size.setter
def mini_batch_size(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "mini_batch_size", value)
@property
@pulumi.getter
def model(self) -> Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]]:
"""
Reference to the model asset for the endpoint deployment.
"""
return pulumi.get(self, "model")
@model.setter
def model(self, value: Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]]):
pulumi.set(self, "model", value)
@property
@pulumi.getter(name="outputConfiguration")
def output_configuration(self) -> Optional[pulumi.Input['BatchOutputConfigurationArgs']]:
"""
Output configuration for the batch inference operation.
"""
return pulumi.get(self, "output_configuration")
@output_configuration.setter
def output_configuration(self, value: Optional[pulumi.Input['BatchOutputConfigurationArgs']]):
pulumi.set(self, "output_configuration", value)
@property
@pulumi.getter(name="partitionKeys")
def partition_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Partition keys list used for Named partitioning.
"""
return pulumi.get(self, "partition_keys")
@partition_keys.setter
def partition_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "partition_keys", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Property dictionary. Properties can be added, but not removed or altered.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="retrySettings")
def retry_settings(self) -> Optional[pulumi.Input['BatchRetrySettingsArgs']]:
"""
Retry Settings for the batch inference operation.
"""
return pulumi.get(self, "retry_settings")
@retry_settings.setter
def retry_settings(self, value: Optional[pulumi.Input['BatchRetrySettingsArgs']]):
pulumi.set(self, "retry_settings", value)
@pulumi.input_type
class BatchEndpointArgs:
def __init__(__self__, *,
auth_mode: Optional[pulumi.Input[Union[str, 'EndpointAuthMode']]] = None,
description: Optional[pulumi.Input[str]] = None,
keys: Optional[pulumi.Input['EndpointAuthKeysArgs']] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
traffic: Optional[pulumi.Input[Mapping[str, pulumi.Input[int]]]] = None):
"""
Batch endpoint configuration.
:param pulumi.Input[Union[str, 'EndpointAuthMode']] auth_mode: Enum to determine endpoint authentication mode.
:param pulumi.Input[str] description: Description of the inference endpoint.
:param pulumi.Input['EndpointAuthKeysArgs'] keys: EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the ListKeys API.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Property dictionary. Properties can be added, but not removed or altered.
:param pulumi.Input[Mapping[str, pulumi.Input[int]]] traffic: Traffic rules on how the traffic will be routed across deployments.
"""
if auth_mode is not None:
pulumi.set(__self__, "auth_mode", auth_mode)
if description is not None:
pulumi.set(__self__, "description", description)
if keys is not None:
pulumi.set(__self__, "keys", keys)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if traffic is not None:
pulumi.set(__self__, "traffic", traffic)
@property
@pulumi.getter(name="authMode")
def auth_mode(self) -> Optional[pulumi.Input[Union[str, 'EndpointAuthMode']]]:
"""
Enum to determine endpoint authentication mode.
"""
return pulumi.get(self, "auth_mode")
@auth_mode.setter
def auth_mode(self, value: Optional[pulumi.Input[Union[str, 'EndpointAuthMode']]]):
pulumi.set(self, "auth_mode", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the inference endpoint.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def keys(self) -> Optional[pulumi.Input['EndpointAuthKeysArgs']]:
"""
EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the ListKeys API.
"""
return pulumi.get(self, "keys")
@keys.setter
def keys(self, value: Optional[pulumi.Input['EndpointAuthKeysArgs']]):
pulumi.set(self, "keys", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Property dictionary. Properties can be added, but not removed or altered.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def traffic(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[int]]]]:
"""
Traffic rules on how the traffic will be routed across deployments.
"""
return pulumi.get(self, "traffic")
@traffic.setter
def traffic(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[int]]]]):
pulumi.set(self, "traffic", value)
@pulumi.input_type
class BatchOutputConfigurationArgs:
def __init__(__self__, *,
append_row_file_name: Optional[pulumi.Input[str]] = None,
output_action: Optional[pulumi.Input[Union[str, 'BatchOutputAction']]] = None):
"""
Batch inference output configuration.
:param pulumi.Input[str] append_row_file_name: Customized output file name for append_row output action.
:param pulumi.Input[Union[str, 'BatchOutputAction']] output_action: Indicates how the output will be organized.
"""
if append_row_file_name is not None:
pulumi.set(__self__, "append_row_file_name", append_row_file_name)
if output_action is not None:
pulumi.set(__self__, "output_action", output_action)
@property
@pulumi.getter(name="appendRowFileName")
def append_row_file_name(self) -> Optional[pulumi.Input[str]]:
"""
Customized output file name for append_row output action.
"""
return pulumi.get(self, "append_row_file_name")
@append_row_file_name.setter
def append_row_file_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "append_row_file_name", value)
@property
@pulumi.getter(name="outputAction")
def output_action(self) -> Optional[pulumi.Input[Union[str, 'BatchOutputAction']]]:
"""
Indicates how the output will be organized.
"""
return pulumi.get(self, "output_action")
@output_action.setter
def output_action(self, value: Optional[pulumi.Input[Union[str, 'BatchOutputAction']]]):
pulumi.set(self, "output_action", value)
@pulumi.input_type
class BatchRetrySettingsArgs:
def __init__(__self__, *,
max_retries: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Retry settings for a batch inference operation.
:param pulumi.Input[int] max_retries: Maximum retry count for a mini-batch
:param pulumi.Input[str] timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
"""
if max_retries is not None:
pulumi.set(__self__, "max_retries", max_retries)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="maxRetries")
def max_retries(self) -> Optional[pulumi.Input[int]]:
"""
Maximum retry count for a mini-batch
"""
return pulumi.get(self, "max_retries")
@max_retries.setter
def max_retries(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_retries", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
Invocation timeout for a mini-batch, in ISO 8601 format.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class CodeConfigurationArgs:
def __init__(__self__, *,
scoring_script: pulumi.Input[str],
code_id: Optional[pulumi.Input[str]] = None):
"""
Configuration for a scoring code asset.
:param pulumi.Input[str] scoring_script: The script to execute on startup. eg. "score.py"
:param pulumi.Input[str] code_id: ARM resource ID of the code asset.
"""
pulumi.set(__self__, "scoring_script", scoring_script)
if code_id is not None:
pulumi.set(__self__, "code_id", code_id)
@property
@pulumi.getter(name="scoringScript")
def scoring_script(self) -> pulumi.Input[str]:
"""
The script to execute on startup. eg. "score.py"
"""
return pulumi.get(self, "scoring_script")
@scoring_script.setter
def scoring_script(self, value: pulumi.Input[str]):
pulumi.set(self, "scoring_script", value)
@property
@pulumi.getter(name="codeId")
def code_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the code asset.
"""
return pulumi.get(self, "code_id")
@code_id.setter
def code_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "code_id", value)
@pulumi.input_type
class CodeContainerArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Container for code asset versions.
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class CodeVersionArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
datastore_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
is_anonymous: Optional[pulumi.Input[bool]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Code asset version details.
:param pulumi.Input[str] path: The path of the file/directory in the datastore.
:param pulumi.Input[str] datastore_id: ARM resource ID of the datastore where the asset is located.
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[bool] is_anonymous: If the name version are system generated (anonymous registration).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
"""
pulumi.set(__self__, "path", path)
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
if description is not None:
pulumi.set(__self__, "description", description)
if is_anonymous is not None:
pulumi.set(__self__, "is_anonymous", is_anonymous)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The path of the file/directory in the datastore.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the datastore where the asset is located.
"""
return pulumi.get(self, "datastore_id")
@datastore_id.setter
def datastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="isAnonymous")
def is_anonymous(self) -> Optional[pulumi.Input[bool]]:
"""
If the name version are system generated (anonymous registration).
"""
return pulumi.get(self, "is_anonymous")
@is_anonymous.setter
def is_anonymous(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_anonymous", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class CommandJobArgs:
def __init__(__self__, *,
command: pulumi.Input[str],
compute: pulumi.Input['ComputeConfigurationArgs'],
job_type: pulumi.Input[str],
code_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution: Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
experiment_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[Union['AmlTokenArgs', 'ManagedIdentityArgs']]] = None,
input_data_bindings: Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]] = None,
output_data_bindings: Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]] = None,
priority: Optional[pulumi.Input[int]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Command job definition.
:param pulumi.Input[str] command: The command to execute on startup of the job. eg. "python train.py"
:param pulumi.Input['ComputeConfigurationArgs'] compute: Compute binding for the job.
:param pulumi.Input[str] job_type: Enum to determine the type of job.
Expected value is 'Command'.
:param pulumi.Input[str] code_id: ARM resource ID of the code asset.
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']] distribution: Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
:param pulumi.Input[str] environment_id: The ARM resource ID of the Environment specification for the job.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: Environment variables included in the job.
:param pulumi.Input[str] experiment_name: The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
:param pulumi.Input[Union['AmlTokenArgs', 'ManagedIdentityArgs']] identity: Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, or null.
Defaults to AmlToken if null.
:param pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]] input_data_bindings: Mapping of input data bindings used in the job.
:param pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]] output_data_bindings: Mapping of output data bindings used in the job.
:param pulumi.Input[int] priority: Job priority for scheduling policy. Only applies to AMLCompute.
Private preview feature and only available to users on the allow list.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
:param pulumi.Input[str] timeout: The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
"""
pulumi.set(__self__, "command", command)
pulumi.set(__self__, "compute", compute)
pulumi.set(__self__, "job_type", 'Command')
if code_id is not None:
pulumi.set(__self__, "code_id", code_id)
if description is not None:
pulumi.set(__self__, "description", description)
if distribution is not None:
pulumi.set(__self__, "distribution", distribution)
if environment_id is not None:
pulumi.set(__self__, "environment_id", environment_id)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if experiment_name is not None:
pulumi.set(__self__, "experiment_name", experiment_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if input_data_bindings is not None:
pulumi.set(__self__, "input_data_bindings", input_data_bindings)
if output_data_bindings is not None:
pulumi.set(__self__, "output_data_bindings", output_data_bindings)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter
def command(self) -> pulumi.Input[str]:
"""
The command to execute on startup of the job. eg. "python train.py"
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: pulumi.Input[str]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def compute(self) -> pulumi.Input['ComputeConfigurationArgs']:
"""
Compute binding for the job.
"""
return pulumi.get(self, "compute")
@compute.setter
def compute(self, value: pulumi.Input['ComputeConfigurationArgs']):
pulumi.set(self, "compute", value)
@property
@pulumi.getter(name="jobType")
def job_type(self) -> pulumi.Input[str]:
"""
Enum to determine the type of job.
Expected value is 'Command'.
"""
return pulumi.get(self, "job_type")
@job_type.setter
def job_type(self, value: pulumi.Input[str]):
pulumi.set(self, "job_type", value)
@property
@pulumi.getter(name="codeId")
def code_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the code asset.
"""
return pulumi.get(self, "code_id")
@code_id.setter
def code_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "code_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def distribution(self) -> Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]]:
"""
Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
"""
return pulumi.get(self, "distribution")
@distribution.setter
def distribution(self, value: Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]]):
pulumi.set(self, "distribution", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> Optional[pulumi.Input[str]]:
"""
The ARM resource ID of the Environment specification for the job.
"""
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Environment variables included in the job.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="experimentName")
def experiment_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
"""
return pulumi.get(self, "experiment_name")
@experiment_name.setter
def experiment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "experiment_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input[Union['AmlTokenArgs', 'ManagedIdentityArgs']]]:
"""
Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, or null.
Defaults to AmlToken if null.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input[Union['AmlTokenArgs', 'ManagedIdentityArgs']]]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="inputDataBindings")
def input_data_bindings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]]:
"""
Mapping of input data bindings used in the job.
"""
return pulumi.get(self, "input_data_bindings")
@input_data_bindings.setter
def input_data_bindings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]]):
pulumi.set(self, "input_data_bindings", value)
@property
@pulumi.getter(name="outputDataBindings")
def output_data_bindings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]]:
"""
Mapping of output data bindings used in the job.
"""
return pulumi.get(self, "output_data_bindings")
@output_data_bindings.setter
def output_data_bindings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]]):
pulumi.set(self, "output_data_bindings", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Job priority for scheduling policy. Only applies to AMLCompute.
Private preview feature and only available to users on the allow list.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class ComputeBindingArgs:
def __init__(__self__, *,
compute_id: Optional[pulumi.Input[str]] = None,
node_count: Optional[pulumi.Input[int]] = None):
"""
Compute binding definition.
:param pulumi.Input[str] compute_id: ID of the compute resource.
:param pulumi.Input[int] node_count: Number of nodes.
"""
if compute_id is not None:
pulumi.set(__self__, "compute_id", compute_id)
if node_count is not None:
pulumi.set(__self__, "node_count", node_count)
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the compute resource.
"""
return pulumi.get(self, "compute_id")
@compute_id.setter
def compute_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_id", value)
@property
@pulumi.getter(name="nodeCount")
def node_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of nodes.
"""
return pulumi.get(self, "node_count")
@node_count.setter
def node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "node_count", value)
@pulumi.input_type
class ComputeConfigurationArgs:
def __init__(__self__, *,
instance_count: Optional[pulumi.Input[int]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
is_local: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target: Optional[pulumi.Input[str]] = None):
"""
Configuration for compute binding.
:param pulumi.Input[int] instance_count: Number of instances or nodes.
:param pulumi.Input[str] instance_type: SKU type to run on.
:param pulumi.Input[bool] is_local: Set to true for jobs running on local compute.
:param pulumi.Input[str] location: Location for virtual cluster run.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Additional properties.
:param pulumi.Input[str] target: ARM resource ID of the compute resource.
"""
if instance_count is not None:
pulumi.set(__self__, "instance_count", instance_count)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if is_local is not None:
pulumi.set(__self__, "is_local", is_local)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of instances or nodes.
"""
return pulumi.get(self, "instance_count")
@instance_count.setter
def instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "instance_count", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
SKU type to run on.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="isLocal")
def is_local(self) -> Optional[pulumi.Input[bool]]:
"""
Set to true for jobs running on local compute.
"""
return pulumi.get(self, "is_local")
@is_local.setter
def is_local(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_local", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location for virtual cluster run.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Additional properties.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the compute resource.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@pulumi.input_type
class ComputeInstancePropertiesArgs:
def __init__(__self__, *,
application_sharing_policy: Optional[pulumi.Input[Union[str, 'ApplicationSharingPolicy']]] = None,
compute_instance_authorization_type: Optional[pulumi.Input[Union[str, 'ComputeInstanceAuthorizationType']]] = None,
personal_compute_instance_settings: Optional[pulumi.Input['PersonalComputeInstanceSettingsArgs']] = None,
setup_scripts: Optional[pulumi.Input['SetupScriptsArgs']] = None,
ssh_settings: Optional[pulumi.Input['ComputeInstanceSshSettingsArgs']] = None,
subnet: Optional[pulumi.Input['ResourceIdArgs']] = None,
vm_size: Optional[pulumi.Input[str]] = None):
"""
Compute Instance properties
:param pulumi.Input[Union[str, 'ApplicationSharingPolicy']] application_sharing_policy: Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role.
:param pulumi.Input[Union[str, 'ComputeInstanceAuthorizationType']] compute_instance_authorization_type: The Compute Instance Authorization type. Available values are personal (default).
:param pulumi.Input['PersonalComputeInstanceSettingsArgs'] personal_compute_instance_settings: Settings for a personal compute instance.
:param pulumi.Input['SetupScriptsArgs'] setup_scripts: Details of customized scripts to execute for setting up the cluster.
:param pulumi.Input['ComputeInstanceSshSettingsArgs'] ssh_settings: Specifies policy and settings for SSH access.
:param pulumi.Input['ResourceIdArgs'] subnet: Virtual network subnet resource ID the compute nodes belong to.
:param pulumi.Input[str] vm_size: Virtual Machine Size
"""
if application_sharing_policy is None:
application_sharing_policy = 'Shared'
if application_sharing_policy is not None:
pulumi.set(__self__, "application_sharing_policy", application_sharing_policy)
if compute_instance_authorization_type is None:
compute_instance_authorization_type = 'personal'
if compute_instance_authorization_type is not None:
pulumi.set(__self__, "compute_instance_authorization_type", compute_instance_authorization_type)
if personal_compute_instance_settings is not None:
pulumi.set(__self__, "personal_compute_instance_settings", personal_compute_instance_settings)
if setup_scripts is not None:
pulumi.set(__self__, "setup_scripts", setup_scripts)
if ssh_settings is not None:
pulumi.set(__self__, "ssh_settings", ssh_settings)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
@property
@pulumi.getter(name="applicationSharingPolicy")
def application_sharing_policy(self) -> Optional[pulumi.Input[Union[str, 'ApplicationSharingPolicy']]]:
"""
Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role.
"""
return pulumi.get(self, "application_sharing_policy")
@application_sharing_policy.setter
def application_sharing_policy(self, value: Optional[pulumi.Input[Union[str, 'ApplicationSharingPolicy']]]):
pulumi.set(self, "application_sharing_policy", value)
@property
@pulumi.getter(name="computeInstanceAuthorizationType")
def compute_instance_authorization_type(self) -> Optional[pulumi.Input[Union[str, 'ComputeInstanceAuthorizationType']]]:
"""
The Compute Instance Authorization type. Available values are personal (default).
"""
return pulumi.get(self, "compute_instance_authorization_type")
@compute_instance_authorization_type.setter
def compute_instance_authorization_type(self, value: Optional[pulumi.Input[Union[str, 'ComputeInstanceAuthorizationType']]]):
pulumi.set(self, "compute_instance_authorization_type", value)
@property
@pulumi.getter(name="personalComputeInstanceSettings")
def personal_compute_instance_settings(self) -> Optional[pulumi.Input['PersonalComputeInstanceSettingsArgs']]:
"""
Settings for a personal compute instance.
"""
return pulumi.get(self, "personal_compute_instance_settings")
@personal_compute_instance_settings.setter
def personal_compute_instance_settings(self, value: Optional[pulumi.Input['PersonalComputeInstanceSettingsArgs']]):
pulumi.set(self, "personal_compute_instance_settings", value)
@property
@pulumi.getter(name="setupScripts")
def setup_scripts(self) -> Optional[pulumi.Input['SetupScriptsArgs']]:
"""
Details of customized scripts to execute for setting up the cluster.
"""
return pulumi.get(self, "setup_scripts")
@setup_scripts.setter
def setup_scripts(self, value: Optional[pulumi.Input['SetupScriptsArgs']]):
pulumi.set(self, "setup_scripts", value)
@property
@pulumi.getter(name="sshSettings")
def ssh_settings(self) -> Optional[pulumi.Input['ComputeInstanceSshSettingsArgs']]:
"""
Specifies policy and settings for SSH access.
"""
return pulumi.get(self, "ssh_settings")
@ssh_settings.setter
def ssh_settings(self, value: Optional[pulumi.Input['ComputeInstanceSshSettingsArgs']]):
pulumi.set(self, "ssh_settings", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:
"""
Virtual network subnet resource ID the compute nodes belong to.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['ResourceIdArgs']]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[pulumi.Input[str]]:
"""
Virtual Machine Size
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_size", value)
@pulumi.input_type
class ComputeInstanceSshSettingsArgs:
def __init__(__self__, *,
admin_public_key: Optional[pulumi.Input[str]] = None,
ssh_public_access: Optional[pulumi.Input[Union[str, 'SshPublicAccess']]] = None):
"""
Specifies policy and settings for SSH access.
:param pulumi.Input[str] admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs.
:param pulumi.Input[Union[str, 'SshPublicAccess']] ssh_public_access: State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the public ssh port is open and accessible according to the VNet/subnet policy if applicable.
"""
if admin_public_key is not None:
pulumi.set(__self__, "admin_public_key", admin_public_key)
if ssh_public_access is None:
ssh_public_access = 'Disabled'
if ssh_public_access is not None:
pulumi.set(__self__, "ssh_public_access", ssh_public_access)
@property
@pulumi.getter(name="adminPublicKey")
def admin_public_key(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs.
"""
return pulumi.get(self, "admin_public_key")
@admin_public_key.setter
def admin_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_public_key", value)
@property
@pulumi.getter(name="sshPublicAccess")
def ssh_public_access(self) -> Optional[pulumi.Input[Union[str, 'SshPublicAccess']]]:
"""
State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the public ssh port is open and accessible according to the VNet/subnet policy if applicable.
"""
return pulumi.get(self, "ssh_public_access")
@ssh_public_access.setter
def ssh_public_access(self, value: Optional[pulumi.Input[Union[str, 'SshPublicAccess']]]):
pulumi.set(self, "ssh_public_access", value)
@pulumi.input_type
class ComputeInstanceArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['ComputeInstancePropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
An Azure Machine Learning compute instance.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'ComputeInstance'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input['ComputeInstancePropertiesArgs'] properties: Compute Instance properties
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'ComputeInstance')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'ComputeInstance'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['ComputeInstancePropertiesArgs']]:
"""
Compute Instance properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['ComputeInstancePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class ContainerResourceRequirementsArgs:
def __init__(__self__, *,
cpu: Optional[pulumi.Input[float]] = None,
cpu_limit: Optional[pulumi.Input[float]] = None,
fpga: Optional[pulumi.Input[int]] = None,
gpu: Optional[pulumi.Input[int]] = None,
memory_in_gb: Optional[pulumi.Input[float]] = None,
memory_in_gb_limit: Optional[pulumi.Input[float]] = None):
"""
The resource requirements for the container (cpu and memory).
:param pulumi.Input[float] cpu: The minimum amount of CPU cores to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[float] cpu_limit: The maximum amount of CPU cores allowed to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[int] fpga: The number of FPGA PCIE devices exposed to the container. Must be multiple of 2.
:param pulumi.Input[int] gpu: The number of GPU cores in the container.
:param pulumi.Input[float] memory_in_gb: The minimum amount of memory (in GB) to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[float] memory_in_gb_limit: The maximum amount of memory (in GB) allowed to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if cpu_limit is not None:
pulumi.set(__self__, "cpu_limit", cpu_limit)
if fpga is not None:
pulumi.set(__self__, "fpga", fpga)
if gpu is not None:
pulumi.set(__self__, "gpu", gpu)
if memory_in_gb is not None:
pulumi.set(__self__, "memory_in_gb", memory_in_gb)
if memory_in_gb_limit is not None:
pulumi.set(__self__, "memory_in_gb_limit", memory_in_gb_limit)
@property
@pulumi.getter
def cpu(self) -> Optional[pulumi.Input[float]]:
"""
The minimum amount of CPU cores to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "cpu", value)
@property
@pulumi.getter(name="cpuLimit")
def cpu_limit(self) -> Optional[pulumi.Input[float]]:
"""
The maximum amount of CPU cores allowed to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "cpu_limit")
@cpu_limit.setter
def cpu_limit(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "cpu_limit", value)
@property
@pulumi.getter
def fpga(self) -> Optional[pulumi.Input[int]]:
"""
The number of FPGA PCIE devices exposed to the container. Must be multiple of 2.
"""
return pulumi.get(self, "fpga")
@fpga.setter
def fpga(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fpga", value)
@property
@pulumi.getter
def gpu(self) -> Optional[pulumi.Input[int]]:
"""
The number of GPU cores in the container.
"""
return pulumi.get(self, "gpu")
@gpu.setter
def gpu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "gpu", value)
@property
@pulumi.getter(name="memoryInGB")
def memory_in_gb(self) -> Optional[pulumi.Input[float]]:
"""
The minimum amount of memory (in GB) to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "memory_in_gb")
@memory_in_gb.setter
def memory_in_gb(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "memory_in_gb", value)
@property
@pulumi.getter(name="memoryInGBLimit")
def memory_in_gb_limit(self) -> Optional[pulumi.Input[float]]:
"""
The maximum amount of memory (in GB) allowed to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "memory_in_gb_limit")
@memory_in_gb_limit.setter
def memory_in_gb_limit(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "memory_in_gb_limit", value)
@pulumi.input_type
class CosmosDbSettingsArgs:
def __init__(__self__, *,
collections_throughput: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] collections_throughput: The throughput of the collections in cosmosdb database
"""
if collections_throughput is not None:
pulumi.set(__self__, "collections_throughput", collections_throughput)
@property
@pulumi.getter(name="collectionsThroughput")
def collections_throughput(self) -> Optional[pulumi.Input[int]]:
"""
The throughput of the collections in cosmosdb database
"""
return pulumi.get(self, "collections_throughput")
@collections_throughput.setter
def collections_throughput(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "collections_throughput", value)
@pulumi.input_type
class CreateServiceRequestEnvironmentImageRequestArgs:
def __init__(__self__, *,
assets: Optional[pulumi.Input[Sequence[pulumi.Input['ImageAssetArgs']]]] = None,
driver_program: Optional[pulumi.Input[str]] = None,
environment: Optional[pulumi.Input['EnvironmentImageRequestEnvironmentArgs']] = None,
environment_reference: Optional[pulumi.Input['EnvironmentImageRequestEnvironmentReferenceArgs']] = None,
model_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
models: Optional[pulumi.Input[Sequence[pulumi.Input['ModelArgs']]]] = None):
"""
The Environment, models and assets needed for inferencing.
:param pulumi.Input[Sequence[pulumi.Input['ImageAssetArgs']]] assets: The list of assets.
:param pulumi.Input[str] driver_program: The name of the driver file.
:param pulumi.Input['EnvironmentImageRequestEnvironmentArgs'] environment: The details of the AZURE ML environment.
:param pulumi.Input['EnvironmentImageRequestEnvironmentReferenceArgs'] environment_reference: The unique identifying details of the AZURE ML environment.
:param pulumi.Input[Sequence[pulumi.Input[str]]] model_ids: The list of model Ids.
:param pulumi.Input[Sequence[pulumi.Input['ModelArgs']]] models: The list of models.
"""
if assets is not None:
pulumi.set(__self__, "assets", assets)
if driver_program is not None:
pulumi.set(__self__, "driver_program", driver_program)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if environment_reference is not None:
pulumi.set(__self__, "environment_reference", environment_reference)
if model_ids is not None:
pulumi.set(__self__, "model_ids", model_ids)
if models is not None:
pulumi.set(__self__, "models", models)
@property
@pulumi.getter
def assets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ImageAssetArgs']]]]:
"""
The list of assets.
"""
return pulumi.get(self, "assets")
@assets.setter
def assets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ImageAssetArgs']]]]):
pulumi.set(self, "assets", value)
@property
@pulumi.getter(name="driverProgram")
def driver_program(self) -> Optional[pulumi.Input[str]]:
"""
The name of the driver file.
"""
return pulumi.get(self, "driver_program")
@driver_program.setter
def driver_program(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver_program", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input['EnvironmentImageRequestEnvironmentArgs']]:
"""
The details of the AZURE ML environment.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input['EnvironmentImageRequestEnvironmentArgs']]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter(name="environmentReference")
def environment_reference(self) -> Optional[pulumi.Input['EnvironmentImageRequestEnvironmentReferenceArgs']]:
"""
The unique identifying details of the AZURE ML environment.
"""
return pulumi.get(self, "environment_reference")
@environment_reference.setter
def environment_reference(self, value: Optional[pulumi.Input['EnvironmentImageRequestEnvironmentReferenceArgs']]):
pulumi.set(self, "environment_reference", value)
@property
@pulumi.getter(name="modelIds")
def model_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of model Ids.
"""
return pulumi.get(self, "model_ids")
@model_ids.setter
def model_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "model_ids", value)
@property
@pulumi.getter
def models(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ModelArgs']]]]:
"""
The list of models.
"""
return pulumi.get(self, "models")
@models.setter
def models(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ModelArgs']]]]):
pulumi.set(self, "models", value)
@pulumi.input_type
class CreateServiceRequestKeysArgs:
def __init__(__self__, *,
primary_key: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None):
"""
The authentication keys.
:param pulumi.Input[str] primary_key: The primary key.
:param pulumi.Input[str] secondary_key: The secondary key.
"""
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[pulumi.Input[str]]:
"""
The primary key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_key", value)
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[pulumi.Input[str]]:
"""
The secondary key.
"""
return pulumi.get(self, "secondary_key")
@secondary_key.setter
def secondary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_key", value)
@pulumi.input_type
class DataContainerArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Container for data asset versions.
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class DataFactoryArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A DataFactory compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'DataFactory'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'DataFactory')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'DataFactory'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class DataLakeAnalyticsPropertiesArgs:
def __init__(__self__, *,
data_lake_store_account_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] data_lake_store_account_name: DataLake Store Account Name
"""
if data_lake_store_account_name is not None:
pulumi.set(__self__, "data_lake_store_account_name", data_lake_store_account_name)
@property
@pulumi.getter(name="dataLakeStoreAccountName")
def data_lake_store_account_name(self) -> Optional[pulumi.Input[str]]:
"""
DataLake Store Account Name
"""
return pulumi.get(self, "data_lake_store_account_name")
@data_lake_store_account_name.setter
def data_lake_store_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_lake_store_account_name", value)
@pulumi.input_type
class DataLakeAnalyticsArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['DataLakeAnalyticsPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A DataLakeAnalytics compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'DataLakeAnalytics'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'DataLakeAnalytics')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'DataLakeAnalytics'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['DataLakeAnalyticsPropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['DataLakeAnalyticsPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class DataPathAssetReferenceArgs:
def __init__(__self__, *,
reference_type: pulumi.Input[str],
datastore_id: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None):
"""
Reference to an asset via its path in a datastore.
:param pulumi.Input[str] reference_type: Enum to determine which reference method to use for an asset.
Expected value is 'DataPath'.
:param pulumi.Input[str] datastore_id: ARM resource ID of the datastore where the asset is located.
:param pulumi.Input[str] path: The path of the file/directory in the datastore.
"""
pulumi.set(__self__, "reference_type", 'DataPath')
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="referenceType")
def reference_type(self) -> pulumi.Input[str]:
"""
Enum to determine which reference method to use for an asset.
Expected value is 'DataPath'.
"""
return pulumi.get(self, "reference_type")
@reference_type.setter
def reference_type(self, value: pulumi.Input[str]):
pulumi.set(self, "reference_type", value)
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the datastore where the asset is located.
"""
return pulumi.get(self, "datastore_id")
@datastore_id.setter
def datastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_id", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The path of the file/directory in the datastore.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@pulumi.input_type
class DataVersionArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
dataset_type: Optional[pulumi.Input[Union[str, 'DatasetType']]] = None,
datastore_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
is_anonymous: Optional[pulumi.Input[bool]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Data asset version details.
:param pulumi.Input[str] path: The path of the file/directory in the datastore.
:param pulumi.Input[Union[str, 'DatasetType']] dataset_type: The Format of dataset.
:param pulumi.Input[str] datastore_id: ARM resource ID of the datastore where the asset is located.
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[bool] is_anonymous: If the name version are system generated (anonymous registration).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
"""
pulumi.set(__self__, "path", path)
if dataset_type is not None:
pulumi.set(__self__, "dataset_type", dataset_type)
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
if description is not None:
pulumi.set(__self__, "description", description)
if is_anonymous is not None:
pulumi.set(__self__, "is_anonymous", is_anonymous)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The path of the file/directory in the datastore.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="datasetType")
def dataset_type(self) -> Optional[pulumi.Input[Union[str, 'DatasetType']]]:
"""
The Format of dataset.
"""
return pulumi.get(self, "dataset_type")
@dataset_type.setter
def dataset_type(self, value: Optional[pulumi.Input[Union[str, 'DatasetType']]]):
pulumi.set(self, "dataset_type", value)
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the datastore where the asset is located.
"""
return pulumi.get(self, "datastore_id")
@datastore_id.setter
def datastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="isAnonymous")
def is_anonymous(self) -> Optional[pulumi.Input[bool]]:
"""
If the name version are system generated (anonymous registration).
"""
return pulumi.get(self, "is_anonymous")
@is_anonymous.setter
def is_anonymous(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_anonymous", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class DatabricksPropertiesArgs:
def __init__(__self__, *,
databricks_access_token: Optional[pulumi.Input[str]] = None,
workspace_url: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] databricks_access_token: Databricks access token
:param pulumi.Input[str] workspace_url: Workspace Url
"""
if databricks_access_token is not None:
pulumi.set(__self__, "databricks_access_token", databricks_access_token)
if workspace_url is not None:
pulumi.set(__self__, "workspace_url", workspace_url)
@property
@pulumi.getter(name="databricksAccessToken")
def databricks_access_token(self) -> Optional[pulumi.Input[str]]:
"""
Databricks access token
"""
return pulumi.get(self, "databricks_access_token")
@databricks_access_token.setter
def databricks_access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "databricks_access_token", value)
@property
@pulumi.getter(name="workspaceUrl")
def workspace_url(self) -> Optional[pulumi.Input[str]]:
"""
Workspace Url
"""
return pulumi.get(self, "workspace_url")
@workspace_url.setter
def workspace_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_url", value)
@pulumi.input_type
class DatabricksArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['DatabricksPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A DataFactory compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'Databricks'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'Databricks')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'Databricks'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['DatabricksPropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['DatabricksPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class DatasetCreateRequestDataPathArgs:
def __init__(__self__, *,
datastore_name: Optional[pulumi.Input[str]] = None,
relative_path: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] datastore_name: The datastore name.
:param pulumi.Input[str] relative_path: Path within the datastore.
"""
if datastore_name is not None:
pulumi.set(__self__, "datastore_name", datastore_name)
if relative_path is not None:
pulumi.set(__self__, "relative_path", relative_path)
@property
@pulumi.getter(name="datastoreName")
def datastore_name(self) -> Optional[pulumi.Input[str]]:
"""
The datastore name.
"""
return pulumi.get(self, "datastore_name")
@datastore_name.setter
def datastore_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_name", value)
@property
@pulumi.getter(name="relativePath")
def relative_path(self) -> Optional[pulumi.Input[str]]:
"""
Path within the datastore.
"""
return pulumi.get(self, "relative_path")
@relative_path.setter
def relative_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "relative_path", value)
@pulumi.input_type
class DatasetCreateRequestParametersArgs:
def __init__(__self__, *,
header: Optional[pulumi.Input[Union[str, 'Header']]] = None,
include_path: Optional[pulumi.Input[bool]] = None,
partition_format: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input['DatasetCreateRequestPathArgs']] = None,
query: Optional[pulumi.Input['DatasetCreateRequestQueryArgs']] = None,
separator: Optional[pulumi.Input[str]] = None,
source_type: Optional[pulumi.Input[Union[str, 'SourceType']]] = None):
"""
:param pulumi.Input[Union[str, 'Header']] header: Header type.
:param pulumi.Input[bool] include_path: Boolean to keep path information as column in the dataset. Defaults to False. This is useful when reading multiple files, and want to know which file a particular record originated from, or to keep useful information in file path.
:param pulumi.Input[str] partition_format: The partition information of each path will be extracted into columns based on the specified format. Format part '{column_name}' creates string column, and '{column_name:yyyy/MM/dd/HH/mm/ss}' creates datetime column, where 'yyyy', 'MM', 'dd', 'HH', 'mm' and 'ss' are used to extract year, month, day, hour, minute and second for the datetime type. The format should start from the position of first partition key until the end of file path. For example, given the path '../USA/2019/01/01/data.parquet' where the partition is by country/region and time, partition_format='/{CountryOrRegion}/{PartitionDate:yyyy/MM/dd}/data.csv' creates a string column 'CountryOrRegion' with the value 'USA' and a datetime column 'PartitionDate' with the value '2019-01-01
:param pulumi.Input[str] separator: The separator used to split columns for 'delimited_files' sourceType.
:param pulumi.Input[Union[str, 'SourceType']] source_type: Data source type.
"""
if header is not None:
pulumi.set(__self__, "header", header)
if include_path is None:
include_path = False
if include_path is not None:
pulumi.set(__self__, "include_path", include_path)
if partition_format is not None:
pulumi.set(__self__, "partition_format", partition_format)
if path is not None:
pulumi.set(__self__, "path", path)
if query is not None:
pulumi.set(__self__, "query", query)
if separator is not None:
pulumi.set(__self__, "separator", separator)
if source_type is not None:
pulumi.set(__self__, "source_type", source_type)
@property
@pulumi.getter
def header(self) -> Optional[pulumi.Input[Union[str, 'Header']]]:
"""
Header type.
"""
return pulumi.get(self, "header")
@header.setter
def header(self, value: Optional[pulumi.Input[Union[str, 'Header']]]):
pulumi.set(self, "header", value)
@property
@pulumi.getter(name="includePath")
def include_path(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean to keep path information as column in the dataset. Defaults to False. This is useful when reading multiple files, and want to know which file a particular record originated from, or to keep useful information in file path.
"""
return pulumi.get(self, "include_path")
@include_path.setter
def include_path(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_path", value)
@property
@pulumi.getter(name="partitionFormat")
def partition_format(self) -> Optional[pulumi.Input[str]]:
"""
The partition information of each path will be extracted into columns based on the specified format. Format part '{column_name}' creates string column, and '{column_name:yyyy/MM/dd/HH/mm/ss}' creates datetime column, where 'yyyy', 'MM', 'dd', 'HH', 'mm' and 'ss' are used to extract year, month, day, hour, minute and second for the datetime type. The format should start from the position of first partition key until the end of file path. For example, given the path '../USA/2019/01/01/data.parquet' where the partition is by country/region and time, partition_format='/{CountryOrRegion}/{PartitionDate:yyyy/MM/dd}/data.csv' creates a string column 'CountryOrRegion' with the value 'USA' and a datetime column 'PartitionDate' with the value '2019-01-01
"""
return pulumi.get(self, "partition_format")
@partition_format.setter
def partition_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partition_format", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input['DatasetCreateRequestPathArgs']]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input['DatasetCreateRequestPathArgs']]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input['DatasetCreateRequestQueryArgs']]:
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input['DatasetCreateRequestQueryArgs']]):
pulumi.set(self, "query", value)
@property
@pulumi.getter
def separator(self) -> Optional[pulumi.Input[str]]:
"""
The separator used to split columns for 'delimited_files' sourceType.
"""
return pulumi.get(self, "separator")
@separator.setter
def separator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "separator", value)
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[pulumi.Input[Union[str, 'SourceType']]]:
"""
Data source type.
"""
return pulumi.get(self, "source_type")
@source_type.setter
def source_type(self, value: Optional[pulumi.Input[Union[str, 'SourceType']]]):
pulumi.set(self, "source_type", value)
@pulumi.input_type
class DatasetCreateRequestPathArgs:
def __init__(__self__, *,
data_path: Optional[pulumi.Input['DatasetCreateRequestDataPathArgs']] = None,
http_url: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] http_url: The Http URL.
"""
if data_path is not None:
pulumi.set(__self__, "data_path", data_path)
if http_url is not None:
pulumi.set(__self__, "http_url", http_url)
@property
@pulumi.getter(name="dataPath")
def data_path(self) -> Optional[pulumi.Input['DatasetCreateRequestDataPathArgs']]:
return pulumi.get(self, "data_path")
@data_path.setter
def data_path(self, value: Optional[pulumi.Input['DatasetCreateRequestDataPathArgs']]):
pulumi.set(self, "data_path", value)
@property
@pulumi.getter(name="httpUrl")
def http_url(self) -> Optional[pulumi.Input[str]]:
"""
The Http URL.
"""
return pulumi.get(self, "http_url")
@http_url.setter
def http_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_url", value)
@pulumi.input_type
class DatasetCreateRequestQueryArgs:
def __init__(__self__, *,
datastore_name: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] datastore_name: The SQL/PostgreSQL/MySQL datastore name.
:param pulumi.Input[str] query: SQL Quey.
"""
if datastore_name is not None:
pulumi.set(__self__, "datastore_name", datastore_name)
if query is not None:
pulumi.set(__self__, "query", query)
@property
@pulumi.getter(name="datastoreName")
def datastore_name(self) -> Optional[pulumi.Input[str]]:
"""
The SQL/PostgreSQL/MySQL datastore name.
"""
return pulumi.get(self, "datastore_name")
@datastore_name.setter
def datastore_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_name", value)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input[str]]:
"""
SQL Quey.
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query", value)
@pulumi.input_type
class DatasetCreateRequestRegistrationArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] description: The description for the dataset.
:param pulumi.Input[str] name: The name of the dataset.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags associated with the dataset.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the dataset.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the dataset.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags associated with the dataset.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class DatasetCreateRequestTimeSeriesArgs:
def __init__(__self__, *,
coarse_grain_timestamp: Optional[pulumi.Input[str]] = None,
fine_grain_timestamp: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] coarse_grain_timestamp: Column name to be used as CoarseGrainTimestamp. Can only be used if 'fineGrainTimestamp' is specified and cannot be same as 'fineGrainTimestamp'.
:param pulumi.Input[str] fine_grain_timestamp: Column name to be used as FineGrainTimestamp
"""
if coarse_grain_timestamp is not None:
pulumi.set(__self__, "coarse_grain_timestamp", coarse_grain_timestamp)
if fine_grain_timestamp is not None:
pulumi.set(__self__, "fine_grain_timestamp", fine_grain_timestamp)
@property
@pulumi.getter(name="coarseGrainTimestamp")
def coarse_grain_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
Column name to be used as CoarseGrainTimestamp. Can only be used if 'fineGrainTimestamp' is specified and cannot be same as 'fineGrainTimestamp'.
"""
return pulumi.get(self, "coarse_grain_timestamp")
@coarse_grain_timestamp.setter
def coarse_grain_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "coarse_grain_timestamp", value)
@property
@pulumi.getter(name="fineGrainTimestamp")
def fine_grain_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
Column name to be used as FineGrainTimestamp
"""
return pulumi.get(self, "fine_grain_timestamp")
@fine_grain_timestamp.setter
def fine_grain_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fine_grain_timestamp", value)
@pulumi.input_type
class DatasetReferenceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The dataset reference object.
:param pulumi.Input[str] id: The id of the dataset reference.
:param pulumi.Input[str] name: The name of the dataset reference.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the dataset reference.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the dataset reference.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class DockerBuildArgs:
def __init__(__self__, *,
docker_specification_type: pulumi.Input[str],
dockerfile: pulumi.Input[str],
context: Optional[pulumi.Input[str]] = None,
platform: Optional[pulumi.Input['DockerImagePlatformArgs']] = None):
"""
Class to represent configuration settings for Docker Build
:param pulumi.Input[str] docker_specification_type: Enum to determine docker specification type. Must be either Build or Image.
Expected value is 'Build'.
:param pulumi.Input[str] dockerfile: Docker command line instructions to assemble an image.
<seealso href="https://repo2docker.readthedocs.io/en/latest/config_files.html#dockerfile-advanced-environments" />
:param pulumi.Input[str] context: Path to a snapshot of the Docker Context. This property is only valid if Dockerfile is specified.
The path is relative to the asset path which must contain a single Blob URI value.
<seealso href="https://docs.docker.com/engine/context/working-with-contexts/" />
:param pulumi.Input['DockerImagePlatformArgs'] platform: The platform information of the docker image.
"""
pulumi.set(__self__, "docker_specification_type", 'Build')
pulumi.set(__self__, "dockerfile", dockerfile)
if context is not None:
pulumi.set(__self__, "context", context)
if platform is not None:
pulumi.set(__self__, "platform", platform)
@property
@pulumi.getter(name="dockerSpecificationType")
def docker_specification_type(self) -> pulumi.Input[str]:
"""
Enum to determine docker specification type. Must be either Build or Image.
Expected value is 'Build'.
"""
return pulumi.get(self, "docker_specification_type")
@docker_specification_type.setter
def docker_specification_type(self, value: pulumi.Input[str]):
pulumi.set(self, "docker_specification_type", value)
@property
@pulumi.getter
def dockerfile(self) -> pulumi.Input[str]:
"""
Docker command line instructions to assemble an image.
<seealso href="https://repo2docker.readthedocs.io/en/latest/config_files.html#dockerfile-advanced-environments" />
"""
return pulumi.get(self, "dockerfile")
@dockerfile.setter
def dockerfile(self, value: pulumi.Input[str]):
pulumi.set(self, "dockerfile", value)
@property
@pulumi.getter
def context(self) -> Optional[pulumi.Input[str]]:
"""
Path to a snapshot of the Docker Context. This property is only valid if Dockerfile is specified.
The path is relative to the asset path which must contain a single Blob URI value.
<seealso href="https://docs.docker.com/engine/context/working-with-contexts/" />
"""
return pulumi.get(self, "context")
@context.setter
def context(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "context", value)
@property
@pulumi.getter
def platform(self) -> Optional[pulumi.Input['DockerImagePlatformArgs']]:
"""
The platform information of the docker image.
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: Optional[pulumi.Input['DockerImagePlatformArgs']]):
pulumi.set(self, "platform", value)
@pulumi.input_type
class DockerImagePlatformArgs:
def __init__(__self__, *,
operating_system_type: Optional[pulumi.Input[Union[str, 'OperatingSystemType']]] = None):
"""
:param pulumi.Input[Union[str, 'OperatingSystemType']] operating_system_type: The OS type the Environment.
"""
if operating_system_type is not None:
pulumi.set(__self__, "operating_system_type", operating_system_type)
@property
@pulumi.getter(name="operatingSystemType")
def operating_system_type(self) -> Optional[pulumi.Input[Union[str, 'OperatingSystemType']]]:
"""
The OS type the Environment.
"""
return pulumi.get(self, "operating_system_type")
@operating_system_type.setter
def operating_system_type(self, value: Optional[pulumi.Input[Union[str, 'OperatingSystemType']]]):
pulumi.set(self, "operating_system_type", value)
@pulumi.input_type
class DockerImageArgs:
def __init__(__self__, *,
docker_image_uri: pulumi.Input[str],
docker_specification_type: pulumi.Input[str],
platform: Optional[pulumi.Input['DockerImagePlatformArgs']] = None):
"""
Class to represent configuration settings for Docker Build
:param pulumi.Input[str] docker_image_uri: Image name of a custom base image.
<seealso href="https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-custom-docker-image#use-a-custom-base-image" />
:param pulumi.Input[str] docker_specification_type: Enum to determine docker specification type. Must be either Build or Image.
Expected value is 'Image'.
:param pulumi.Input['DockerImagePlatformArgs'] platform: The platform information of the docker image.
"""
pulumi.set(__self__, "docker_image_uri", docker_image_uri)
pulumi.set(__self__, "docker_specification_type", 'Image')
if platform is not None:
pulumi.set(__self__, "platform", platform)
@property
@pulumi.getter(name="dockerImageUri")
def docker_image_uri(self) -> pulumi.Input[str]:
"""
Image name of a custom base image.
<seealso href="https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-custom-docker-image#use-a-custom-base-image" />
"""
return pulumi.get(self, "docker_image_uri")
@docker_image_uri.setter
def docker_image_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "docker_image_uri", value)
@property
@pulumi.getter(name="dockerSpecificationType")
def docker_specification_type(self) -> pulumi.Input[str]:
"""
Enum to determine docker specification type. Must be either Build or Image.
Expected value is 'Image'.
"""
return pulumi.get(self, "docker_specification_type")
@docker_specification_type.setter
def docker_specification_type(self, value: pulumi.Input[str]):
pulumi.set(self, "docker_specification_type", value)
@property
@pulumi.getter
def platform(self) -> Optional[pulumi.Input['DockerImagePlatformArgs']]:
"""
The platform information of the docker image.
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: Optional[pulumi.Input['DockerImagePlatformArgs']]):
pulumi.set(self, "platform", value)
@pulumi.input_type
class EncryptionPropertyArgs:
def __init__(__self__, *,
key_vault_properties: pulumi.Input['KeyVaultPropertiesArgs'],
status: pulumi.Input[Union[str, 'EncryptionStatus']],
identity: Optional[pulumi.Input['IdentityForCmkArgs']] = None):
"""
:param pulumi.Input['KeyVaultPropertiesArgs'] key_vault_properties: Customer Key vault properties.
:param pulumi.Input[Union[str, 'EncryptionStatus']] status: Indicates whether or not the encryption is enabled for the workspace.
:param pulumi.Input['IdentityForCmkArgs'] identity: The identity that will be used to access the key vault for encryption at rest.
"""
pulumi.set(__self__, "key_vault_properties", key_vault_properties)
pulumi.set(__self__, "status", status)
if identity is not None:
pulumi.set(__self__, "identity", identity)
@property
@pulumi.getter(name="keyVaultProperties")
def key_vault_properties(self) -> pulumi.Input['KeyVaultPropertiesArgs']:
"""
Customer Key vault properties.
"""
return pulumi.get(self, "key_vault_properties")
@key_vault_properties.setter
def key_vault_properties(self, value: pulumi.Input['KeyVaultPropertiesArgs']):
pulumi.set(self, "key_vault_properties", value)
@property
@pulumi.getter
def status(self) -> pulumi.Input[Union[str, 'EncryptionStatus']]:
"""
Indicates whether or not the encryption is enabled for the workspace.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[Union[str, 'EncryptionStatus']]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityForCmkArgs']]:
"""
The identity that will be used to access the key vault for encryption at rest.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityForCmkArgs']]):
pulumi.set(self, "identity", value)
@pulumi.input_type
class EndpointAuthKeysArgs:
def __init__(__self__, *,
primary_key: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None):
"""
Keys for endpoint authentication.
:param pulumi.Input[str] primary_key: The primary key.
:param pulumi.Input[str] secondary_key: The secondary key.
"""
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[pulumi.Input[str]]:
"""
The primary key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_key", value)
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[pulumi.Input[str]]:
"""
The secondary key.
"""
return pulumi.get(self, "secondary_key")
@secondary_key.setter
def secondary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_key", value)
@pulumi.input_type
class EnvironmentContainerArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Container for environment specification versions.
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class EnvironmentImageRequestEnvironmentReferenceArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
The unique identifying details of the AZURE ML environment.
:param pulumi.Input[str] name: Name of the environment.
:param pulumi.Input[str] version: Version of the environment.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the environment.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the environment.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class EnvironmentImageRequestEnvironmentArgs:
def __init__(__self__, *,
docker: Optional[pulumi.Input['ModelEnvironmentDefinitionDockerArgs']] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
inferencing_stack_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
python: Optional[pulumi.Input['ModelEnvironmentDefinitionPythonArgs']] = None,
r: Optional[pulumi.Input['ModelEnvironmentDefinitionRArgs']] = None,
spark: Optional[pulumi.Input['ModelEnvironmentDefinitionSparkArgs']] = None,
version: Optional[pulumi.Input[str]] = None):
"""
The details of the AZURE ML environment.
:param pulumi.Input['ModelEnvironmentDefinitionDockerArgs'] docker: The definition of a Docker container.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: Definition of environment variables to be defined in the environment.
:param pulumi.Input[str] inferencing_stack_version: The inferencing stack version added to the image. To avoid adding an inferencing stack, do not set this value. Valid values: "latest".
:param pulumi.Input[str] name: The name of the environment.
:param pulumi.Input['ModelEnvironmentDefinitionPythonArgs'] python: Settings for a Python environment.
:param pulumi.Input['ModelEnvironmentDefinitionRArgs'] r: Settings for a R environment.
:param pulumi.Input['ModelEnvironmentDefinitionSparkArgs'] spark: The configuration for a Spark environment.
:param pulumi.Input[str] version: The environment version.
"""
if docker is not None:
pulumi.set(__self__, "docker", docker)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if inferencing_stack_version is not None:
pulumi.set(__self__, "inferencing_stack_version", inferencing_stack_version)
if name is not None:
pulumi.set(__self__, "name", name)
if python is not None:
pulumi.set(__self__, "python", python)
if r is not None:
pulumi.set(__self__, "r", r)
if spark is not None:
pulumi.set(__self__, "spark", spark)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def docker(self) -> Optional[pulumi.Input['ModelEnvironmentDefinitionDockerArgs']]:
"""
The definition of a Docker container.
"""
return pulumi.get(self, "docker")
@docker.setter
def docker(self, value: Optional[pulumi.Input['ModelEnvironmentDefinitionDockerArgs']]):
pulumi.set(self, "docker", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Definition of environment variables to be defined in the environment.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="inferencingStackVersion")
def inferencing_stack_version(self) -> Optional[pulumi.Input[str]]:
"""
The inferencing stack version added to the image. To avoid adding an inferencing stack, do not set this value. Valid values: "latest".
"""
return pulumi.get(self, "inferencing_stack_version")
@inferencing_stack_version.setter
def inferencing_stack_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inferencing_stack_version", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the environment.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def python(self) -> Optional[pulumi.Input['ModelEnvironmentDefinitionPythonArgs']]:
"""
Settings for a Python environment.
"""
return pulumi.get(self, "python")
@python.setter
def python(self, value: Optional[pulumi.Input['ModelEnvironmentDefinitionPythonArgs']]):
pulumi.set(self, "python", value)
@property
@pulumi.getter
def r(self) -> Optional[pulumi.Input['ModelEnvironmentDefinitionRArgs']]:
"""
Settings for a R environment.
"""
return pulumi.get(self, "r")
@r.setter
def r(self, value: Optional[pulumi.Input['ModelEnvironmentDefinitionRArgs']]):
pulumi.set(self, "r", value)
@property
@pulumi.getter
def spark(self) -> Optional[pulumi.Input['ModelEnvironmentDefinitionSparkArgs']]:
"""
The configuration for a Spark environment.
"""
return pulumi.get(self, "spark")
@spark.setter
def spark(self, value: Optional[pulumi.Input['ModelEnvironmentDefinitionSparkArgs']]):
pulumi.set(self, "spark", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
The environment version.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class EnvironmentSpecificationVersionArgs:
def __init__(__self__, *,
conda_file: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
docker: Optional[pulumi.Input[Union['DockerBuildArgs', 'DockerImageArgs']]] = None,
inference_container_properties: Optional[pulumi.Input['InferenceContainerPropertiesArgs']] = None,
is_anonymous: Optional[pulumi.Input[bool]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Environment specification version details.
<see href="https://repo2docker.readthedocs.io/en/latest/specification.html" />
:param pulumi.Input[str] conda_file: Standard configuration file used by Conda that lets you install any kind of package, including Python, R, and C/C++ packages.
<see href="https://repo2docker.readthedocs.io/en/latest/config_files.html#environment-yml-install-a-conda-environment" />
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[Union['DockerBuildArgs', 'DockerImageArgs']] docker: Configuration settings for Docker.
:param pulumi.Input['InferenceContainerPropertiesArgs'] inference_container_properties: Defines configuration specific to inference.
:param pulumi.Input[bool] is_anonymous: If the name version are system generated (anonymous registration).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
"""
if conda_file is not None:
pulumi.set(__self__, "conda_file", conda_file)
if description is not None:
pulumi.set(__self__, "description", description)
if docker is not None:
pulumi.set(__self__, "docker", docker)
if inference_container_properties is not None:
pulumi.set(__self__, "inference_container_properties", inference_container_properties)
if is_anonymous is not None:
pulumi.set(__self__, "is_anonymous", is_anonymous)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="condaFile")
def conda_file(self) -> Optional[pulumi.Input[str]]:
"""
Standard configuration file used by Conda that lets you install any kind of package, including Python, R, and C/C++ packages.
<see href="https://repo2docker.readthedocs.io/en/latest/config_files.html#environment-yml-install-a-conda-environment" />
"""
return pulumi.get(self, "conda_file")
@conda_file.setter
def conda_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "conda_file", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def docker(self) -> Optional[pulumi.Input[Union['DockerBuildArgs', 'DockerImageArgs']]]:
"""
Configuration settings for Docker.
"""
return pulumi.get(self, "docker")
@docker.setter
def docker(self, value: Optional[pulumi.Input[Union['DockerBuildArgs', 'DockerImageArgs']]]):
pulumi.set(self, "docker", value)
@property
@pulumi.getter(name="inferenceContainerProperties")
def inference_container_properties(self) -> Optional[pulumi.Input['InferenceContainerPropertiesArgs']]:
"""
Defines configuration specific to inference.
"""
return pulumi.get(self, "inference_container_properties")
@inference_container_properties.setter
def inference_container_properties(self, value: Optional[pulumi.Input['InferenceContainerPropertiesArgs']]):
pulumi.set(self, "inference_container_properties", value)
@property
@pulumi.getter(name="isAnonymous")
def is_anonymous(self) -> Optional[pulumi.Input[bool]]:
"""
If the name version are system generated (anonymous registration).
"""
return pulumi.get(self, "is_anonymous")
@is_anonymous.setter
def is_anonymous(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_anonymous", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class FlavorDataArgs:
def __init__(__self__, *,
data: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] data: Model flavor-specific data.
"""
if data is not None:
pulumi.set(__self__, "data", data)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Model flavor-specific data.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "data", value)
@pulumi.input_type
class HDInsightPropertiesArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
administrator_account: Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']] = None,
ssh_port: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] address: Public IP address of the master node of the cluster.
:param pulumi.Input['VirtualMachineSshCredentialsArgs'] administrator_account: Admin credentials for master node of the cluster
:param pulumi.Input[int] ssh_port: Port open for ssh connections on the master node of the cluster.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if administrator_account is not None:
pulumi.set(__self__, "administrator_account", administrator_account)
if ssh_port is not None:
pulumi.set(__self__, "ssh_port", ssh_port)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Public IP address of the master node of the cluster.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="administratorAccount")
def administrator_account(self) -> Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']]:
"""
Admin credentials for master node of the cluster
"""
return pulumi.get(self, "administrator_account")
@administrator_account.setter
def administrator_account(self, value: Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']]):
pulumi.set(self, "administrator_account", value)
@property
@pulumi.getter(name="sshPort")
def ssh_port(self) -> Optional[pulumi.Input[int]]:
"""
Port open for ssh connections on the master node of the cluster.
"""
return pulumi.get(self, "ssh_port")
@ssh_port.setter
def ssh_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ssh_port", value)
@pulumi.input_type
class HDInsightArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['HDInsightPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A HDInsight compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'HDInsight'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'HDInsight')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'HDInsight'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['HDInsightPropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['HDInsightPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class IdAssetReferenceArgs:
def __init__(__self__, *,
asset_id: pulumi.Input[str],
reference_type: pulumi.Input[str]):
"""
Reference to an asset via its ARM resource ID.
:param pulumi.Input[str] asset_id: ARM resource ID of the asset.
:param pulumi.Input[str] reference_type: Enum to determine which reference method to use for an asset.
Expected value is 'Id'.
"""
pulumi.set(__self__, "asset_id", asset_id)
pulumi.set(__self__, "reference_type", 'Id')
@property
@pulumi.getter(name="assetId")
def asset_id(self) -> pulumi.Input[str]:
"""
ARM resource ID of the asset.
"""
return pulumi.get(self, "asset_id")
@asset_id.setter
def asset_id(self, value: pulumi.Input[str]):
pulumi.set(self, "asset_id", value)
@property
@pulumi.getter(name="referenceType")
def reference_type(self) -> pulumi.Input[str]:
"""
Enum to determine which reference method to use for an asset.
Expected value is 'Id'.
"""
return pulumi.get(self, "reference_type")
@reference_type.setter
def reference_type(self, value: pulumi.Input[str]):
pulumi.set(self, "reference_type", value)
@pulumi.input_type
class IdentityForCmkArgs:
def __init__(__self__, *,
user_assigned_identity: pulumi.Input[str]):
"""
Identity that will be used to access key vault for encryption at rest
:param pulumi.Input[str] user_assigned_identity: The ArmId of the user assigned identity that will be used to access the customer managed key vault
"""
pulumi.set(__self__, "user_assigned_identity", user_assigned_identity)
@property
@pulumi.getter(name="userAssignedIdentity")
def user_assigned_identity(self) -> pulumi.Input[str]:
"""
The ArmId of the user assigned identity that will be used to access the customer managed key vault
"""
return pulumi.get(self, "user_assigned_identity")
@user_assigned_identity.setter
def user_assigned_identity(self, value: pulumi.Input[str]):
pulumi.set(self, "user_assigned_identity", value)
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None,
user_assigned_identities: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
Identity for the resource.
:param pulumi.Input['ResourceIdentityType'] type: The identity type.
:param pulumi.Input[Mapping[str, Any]] user_assigned_identities: The user assigned identities associated with the resource.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The user assigned identities associated with the resource.
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "user_assigned_identities", value)
@pulumi.input_type
class ImageAssetArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
mime_type: Optional[pulumi.Input[str]] = None,
unpack: Optional[pulumi.Input[bool]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
An Image asset.
:param pulumi.Input[str] id: The Asset Id.
:param pulumi.Input[str] mime_type: The mime type.
:param pulumi.Input[bool] unpack: Whether the Asset is unpacked.
:param pulumi.Input[str] url: The Url of the Asset.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if mime_type is not None:
pulumi.set(__self__, "mime_type", mime_type)
if unpack is not None:
pulumi.set(__self__, "unpack", unpack)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The Asset Id.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> Optional[pulumi.Input[str]]:
"""
The mime type.
"""
return pulumi.get(self, "mime_type")
@mime_type.setter
def mime_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mime_type", value)
@property
@pulumi.getter
def unpack(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the Asset is unpacked.
"""
return pulumi.get(self, "unpack")
@unpack.setter
def unpack(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unpack", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The Url of the Asset.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class InferenceContainerPropertiesArgs:
def __init__(__self__, *,
liveness_route: Optional[pulumi.Input['RouteArgs']] = None,
readiness_route: Optional[pulumi.Input['RouteArgs']] = None,
scoring_route: Optional[pulumi.Input['RouteArgs']] = None):
"""
:param pulumi.Input['RouteArgs'] liveness_route: The route to check the liveness of the inference server container.
:param pulumi.Input['RouteArgs'] readiness_route: The route to check the readiness of the inference server container.
:param pulumi.Input['RouteArgs'] scoring_route: The port to send the scoring requests to, within the inference server container.
"""
if liveness_route is not None:
pulumi.set(__self__, "liveness_route", liveness_route)
if readiness_route is not None:
pulumi.set(__self__, "readiness_route", readiness_route)
if scoring_route is not None:
pulumi.set(__self__, "scoring_route", scoring_route)
@property
@pulumi.getter(name="livenessRoute")
def liveness_route(self) -> Optional[pulumi.Input['RouteArgs']]:
"""
The route to check the liveness of the inference server container.
"""
return pulumi.get(self, "liveness_route")
@liveness_route.setter
def liveness_route(self, value: Optional[pulumi.Input['RouteArgs']]):
pulumi.set(self, "liveness_route", value)
@property
@pulumi.getter(name="readinessRoute")
def readiness_route(self) -> Optional[pulumi.Input['RouteArgs']]:
"""
The route to check the readiness of the inference server container.
"""
return pulumi.get(self, "readiness_route")
@readiness_route.setter
def readiness_route(self, value: Optional[pulumi.Input['RouteArgs']]):
pulumi.set(self, "readiness_route", value)
@property
@pulumi.getter(name="scoringRoute")
def scoring_route(self) -> Optional[pulumi.Input['RouteArgs']]:
"""
The port to send the scoring requests to, within the inference server container.
"""
return pulumi.get(self, "scoring_route")
@scoring_route.setter
def scoring_route(self, value: Optional[pulumi.Input['RouteArgs']]):
pulumi.set(self, "scoring_route", value)
@pulumi.input_type
class InputDataBindingArgs:
def __init__(__self__, *,
data_id: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[Union[str, 'DataBindingMode']]] = None,
path_on_compute: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] data_id: ARM resource ID of the registered dataVersion.
:param pulumi.Input[Union[str, 'DataBindingMode']] mode: Mechanism for accessing the data artifact.
:param pulumi.Input[str] path_on_compute: Location of data inside the container process.
"""
if data_id is not None:
pulumi.set(__self__, "data_id", data_id)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if path_on_compute is not None:
pulumi.set(__self__, "path_on_compute", path_on_compute)
@property
@pulumi.getter(name="dataId")
def data_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the registered dataVersion.
"""
return pulumi.get(self, "data_id")
@data_id.setter
def data_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_id", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[Union[str, 'DataBindingMode']]]:
"""
Mechanism for accessing the data artifact.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[Union[str, 'DataBindingMode']]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter(name="pathOnCompute")
def path_on_compute(self) -> Optional[pulumi.Input[str]]:
"""
Location of data inside the container process.
"""
return pulumi.get(self, "path_on_compute")
@path_on_compute.setter
def path_on_compute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path_on_compute", value)
@pulumi.input_type
class K8sOnlineDeploymentArgs:
def __init__(__self__, *,
endpoint_compute_type: pulumi.Input[str],
app_insights_enabled: Optional[pulumi.Input[bool]] = None,
code_configuration: Optional[pulumi.Input['CodeConfigurationArgs']] = None,
container_resource_requirements: Optional[pulumi.Input['ContainerResourceRequirementsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
liveness_probe: Optional[pulumi.Input['ProbeSettingsArgs']] = None,
model: Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
request_settings: Optional[pulumi.Input['OnlineRequestSettingsArgs']] = None,
scale_settings: Optional[pulumi.Input[Union['AutoScaleSettingsArgs', 'ManualScaleSettingsArgs']]] = None):
"""
:param pulumi.Input[str] endpoint_compute_type: Enum to determine endpoint compute type.
Expected value is 'K8S'.
:param pulumi.Input[bool] app_insights_enabled: If true, enables Application Insights logging.
:param pulumi.Input['CodeConfigurationArgs'] code_configuration: Code configuration for the endpoint deployment.
:param pulumi.Input['ContainerResourceRequirementsArgs'] container_resource_requirements: Resource requirements for each container instance within an online deployment.
:param pulumi.Input[str] description: Description of the endpoint deployment.
:param pulumi.Input[str] environment_id: ARM resource ID of the environment specification for the endpoint deployment.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: Environment variables configuration for the deployment.
:param pulumi.Input['ProbeSettingsArgs'] liveness_probe: Deployment container liveness/readiness probe configuration.
:param pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']] model: Reference to the model asset for the endpoint deployment.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Property dictionary. Properties can be added, but not removed or altered.
:param pulumi.Input['OnlineRequestSettingsArgs'] request_settings: Online deployment scoring requests configuration.
:param pulumi.Input[Union['AutoScaleSettingsArgs', 'ManualScaleSettingsArgs']] scale_settings: Online deployment scaling configuration.
"""
pulumi.set(__self__, "endpoint_compute_type", 'K8S')
if app_insights_enabled is not None:
pulumi.set(__self__, "app_insights_enabled", app_insights_enabled)
if code_configuration is not None:
pulumi.set(__self__, "code_configuration", code_configuration)
if container_resource_requirements is not None:
pulumi.set(__self__, "container_resource_requirements", container_resource_requirements)
if description is not None:
pulumi.set(__self__, "description", description)
if environment_id is not None:
pulumi.set(__self__, "environment_id", environment_id)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if model is not None:
pulumi.set(__self__, "model", model)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if request_settings is not None:
pulumi.set(__self__, "request_settings", request_settings)
if scale_settings is not None:
pulumi.set(__self__, "scale_settings", scale_settings)
@property
@pulumi.getter(name="endpointComputeType")
def endpoint_compute_type(self) -> pulumi.Input[str]:
"""
Enum to determine endpoint compute type.
Expected value is 'K8S'.
"""
return pulumi.get(self, "endpoint_compute_type")
@endpoint_compute_type.setter
def endpoint_compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint_compute_type", value)
@property
@pulumi.getter(name="appInsightsEnabled")
def app_insights_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If true, enables Application Insights logging.
"""
return pulumi.get(self, "app_insights_enabled")
@app_insights_enabled.setter
def app_insights_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "app_insights_enabled", value)
@property
@pulumi.getter(name="codeConfiguration")
def code_configuration(self) -> Optional[pulumi.Input['CodeConfigurationArgs']]:
"""
Code configuration for the endpoint deployment.
"""
return pulumi.get(self, "code_configuration")
@code_configuration.setter
def code_configuration(self, value: Optional[pulumi.Input['CodeConfigurationArgs']]):
pulumi.set(self, "code_configuration", value)
@property
@pulumi.getter(name="containerResourceRequirements")
def container_resource_requirements(self) -> Optional[pulumi.Input['ContainerResourceRequirementsArgs']]:
"""
Resource requirements for each container instance within an online deployment.
"""
return pulumi.get(self, "container_resource_requirements")
@container_resource_requirements.setter
def container_resource_requirements(self, value: Optional[pulumi.Input['ContainerResourceRequirementsArgs']]):
pulumi.set(self, "container_resource_requirements", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the endpoint deployment.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the environment specification for the endpoint deployment.
"""
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Environment variables configuration for the deployment.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['ProbeSettingsArgs']]:
"""
Deployment container liveness/readiness probe configuration.
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['ProbeSettingsArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter
def model(self) -> Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]]:
"""
Reference to the model asset for the endpoint deployment.
"""
return pulumi.get(self, "model")
@model.setter
def model(self, value: Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]]):
pulumi.set(self, "model", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Property dictionary. Properties can be added, but not removed or altered.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="requestSettings")
def request_settings(self) -> Optional[pulumi.Input['OnlineRequestSettingsArgs']]:
"""
Online deployment scoring requests configuration.
"""
return pulumi.get(self, "request_settings")
@request_settings.setter
def request_settings(self, value: Optional[pulumi.Input['OnlineRequestSettingsArgs']]):
pulumi.set(self, "request_settings", value)
@property
@pulumi.getter(name="scaleSettings")
def scale_settings(self) -> Optional[pulumi.Input[Union['AutoScaleSettingsArgs', 'ManualScaleSettingsArgs']]]:
"""
Online deployment scaling configuration.
"""
return pulumi.get(self, "scale_settings")
@scale_settings.setter
def scale_settings(self, value: Optional[pulumi.Input[Union['AutoScaleSettingsArgs', 'ManualScaleSettingsArgs']]]):
pulumi.set(self, "scale_settings", value)
@pulumi.input_type
class KeyVaultPropertiesArgs:
def __init__(__self__, *,
key_identifier: pulumi.Input[str],
key_vault_arm_id: pulumi.Input[str],
identity_client_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key_identifier: Key vault uri to access the encryption key.
:param pulumi.Input[str] key_vault_arm_id: The ArmId of the keyVault where the customer owned encryption key is present.
:param pulumi.Input[str] identity_client_id: For future use - The client id of the identity which will be used to access key vault.
"""
pulumi.set(__self__, "key_identifier", key_identifier)
pulumi.set(__self__, "key_vault_arm_id", key_vault_arm_id)
if identity_client_id is not None:
pulumi.set(__self__, "identity_client_id", identity_client_id)
@property
@pulumi.getter(name="keyIdentifier")
def key_identifier(self) -> pulumi.Input[str]:
"""
Key vault uri to access the encryption key.
"""
return pulumi.get(self, "key_identifier")
@key_identifier.setter
def key_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "key_identifier", value)
@property
@pulumi.getter(name="keyVaultArmId")
def key_vault_arm_id(self) -> pulumi.Input[str]:
"""
The ArmId of the keyVault where the customer owned encryption key is present.
"""
return pulumi.get(self, "key_vault_arm_id")
@key_vault_arm_id.setter
def key_vault_arm_id(self, value: pulumi.Input[str]):
pulumi.set(self, "key_vault_arm_id", value)
@property
@pulumi.getter(name="identityClientId")
def identity_client_id(self) -> Optional[pulumi.Input[str]]:
"""
For future use - The client id of the identity which will be used to access key vault.
"""
return pulumi.get(self, "identity_client_id")
@identity_client_id.setter
def identity_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_client_id", value)
@pulumi.input_type
class LabelCategoryArgs:
def __init__(__self__, *,
classes: pulumi.Input[Mapping[str, pulumi.Input['LabelClassArgs']]],
allow_multi_select: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None):
"""
Represents a category of labels in a labeling job.
:param pulumi.Input[Mapping[str, pulumi.Input['LabelClassArgs']]] classes: Dictionary of label classes in this category.
:param pulumi.Input[bool] allow_multi_select: Indicates whether it is allowed to select multiple classes in this category.
:param pulumi.Input[str] display_name: Display name of the label category.
"""
pulumi.set(__self__, "classes", classes)
if allow_multi_select is not None:
pulumi.set(__self__, "allow_multi_select", allow_multi_select)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
@property
@pulumi.getter
def classes(self) -> pulumi.Input[Mapping[str, pulumi.Input['LabelClassArgs']]]:
"""
Dictionary of label classes in this category.
"""
return pulumi.get(self, "classes")
@classes.setter
def classes(self, value: pulumi.Input[Mapping[str, pulumi.Input['LabelClassArgs']]]):
pulumi.set(self, "classes", value)
@property
@pulumi.getter(name="allowMultiSelect")
def allow_multi_select(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether it is allowed to select multiple classes in this category.
"""
return pulumi.get(self, "allow_multi_select")
@allow_multi_select.setter
def allow_multi_select(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_multi_select", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of the label category.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@pulumi.input_type
class LabelClassArgs:
def __init__(__self__, *,
display_name: Optional[pulumi.Input[str]] = None,
subclasses: Optional[pulumi.Input[Mapping[str, pulumi.Input['LabelClassArgs']]]] = None):
"""
Represents a label or a category of labels in a labeling job.
:param pulumi.Input[str] display_name: Display name of the label class.
:param pulumi.Input[Mapping[str, pulumi.Input['LabelClassArgs']]] subclasses: Dictionary of subclasses of the label class.
"""
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if subclasses is not None:
pulumi.set(__self__, "subclasses", subclasses)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of the label class.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def subclasses(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['LabelClassArgs']]]]:
"""
Dictionary of subclasses of the label class.
"""
return pulumi.get(self, "subclasses")
@subclasses.setter
def subclasses(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['LabelClassArgs']]]]):
pulumi.set(self, "subclasses", value)
@pulumi.input_type
class LabelingDatasetConfigurationArgs:
def __init__(__self__, *,
asset_name: pulumi.Input[str],
dataset_version: pulumi.Input[str],
enable_incremental_dataset_refresh: Optional[pulumi.Input[bool]] = None):
"""
Represents configuration of dataset used in a labeling job.
:param pulumi.Input[str] asset_name: Name of the data asset to perform labeling.
:param pulumi.Input[str] dataset_version: AML dataset version.
:param pulumi.Input[bool] enable_incremental_dataset_refresh: Indicates whether to enable incremental dataset refresh.
"""
pulumi.set(__self__, "asset_name", asset_name)
pulumi.set(__self__, "dataset_version", dataset_version)
if enable_incremental_dataset_refresh is not None:
pulumi.set(__self__, "enable_incremental_dataset_refresh", enable_incremental_dataset_refresh)
@property
@pulumi.getter(name="assetName")
def asset_name(self) -> pulumi.Input[str]:
"""
Name of the data asset to perform labeling.
"""
return pulumi.get(self, "asset_name")
@asset_name.setter
def asset_name(self, value: pulumi.Input[str]):
pulumi.set(self, "asset_name", value)
@property
@pulumi.getter(name="datasetVersion")
def dataset_version(self) -> pulumi.Input[str]:
"""
AML dataset version.
"""
return pulumi.get(self, "dataset_version")
@dataset_version.setter
def dataset_version(self, value: pulumi.Input[str]):
pulumi.set(self, "dataset_version", value)
@property
@pulumi.getter(name="enableIncrementalDatasetRefresh")
def enable_incremental_dataset_refresh(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether to enable incremental dataset refresh.
"""
return pulumi.get(self, "enable_incremental_dataset_refresh")
@enable_incremental_dataset_refresh.setter
def enable_incremental_dataset_refresh(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_incremental_dataset_refresh", value)
@pulumi.input_type
class LabelingJobImagePropertiesArgs:
def __init__(__self__, *,
media_type: pulumi.Input[Union[str, 'MediaType']],
annotation_type: Optional[pulumi.Input[Union[str, 'ImageAnnotationType']]] = None):
"""
:param pulumi.Input[Union[str, 'MediaType']] media_type: Media type of data asset.
:param pulumi.Input[Union[str, 'ImageAnnotationType']] annotation_type: Annotation type of image labeling tasks.
"""
pulumi.set(__self__, "media_type", media_type)
if annotation_type is not None:
pulumi.set(__self__, "annotation_type", annotation_type)
@property
@pulumi.getter(name="mediaType")
def media_type(self) -> pulumi.Input[Union[str, 'MediaType']]:
"""
Media type of data asset.
"""
return pulumi.get(self, "media_type")
@media_type.setter
def media_type(self, value: pulumi.Input[Union[str, 'MediaType']]):
pulumi.set(self, "media_type", value)
@property
@pulumi.getter(name="annotationType")
def annotation_type(self) -> Optional[pulumi.Input[Union[str, 'ImageAnnotationType']]]:
"""
Annotation type of image labeling tasks.
"""
return pulumi.get(self, "annotation_type")
@annotation_type.setter
def annotation_type(self, value: Optional[pulumi.Input[Union[str, 'ImageAnnotationType']]]):
pulumi.set(self, "annotation_type", value)
@pulumi.input_type
class LabelingJobInstructionsArgs:
def __init__(__self__, *,
uri: Optional[pulumi.Input[str]] = None):
"""
Instructions for a labeling job.
:param pulumi.Input[str] uri: The link to a page with detailed labeling instructions for labelers.
"""
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
The link to a page with detailed labeling instructions for labelers.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class LabelingJobPropertiesArgs:
def __init__(__self__, *,
dataset_configuration: pulumi.Input['LabelingDatasetConfigurationArgs'],
job_instructions: pulumi.Input['LabelingJobInstructionsArgs'],
label_categories: pulumi.Input[Mapping[str, pulumi.Input['LabelCategoryArgs']]],
labeling_job_media_properties: pulumi.Input['LabelingJobImagePropertiesArgs'],
ml_assist_configuration: Optional[pulumi.Input['MLAssistConfigurationArgs']] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Definition of a labeling job.
:param pulumi.Input['LabelingDatasetConfigurationArgs'] dataset_configuration: Dataset configuration for the job.
:param pulumi.Input['LabelingJobInstructionsArgs'] job_instructions: Instructions for the job.
:param pulumi.Input[Mapping[str, pulumi.Input['LabelCategoryArgs']]] label_categories: Label categories of the job.
:param pulumi.Input['LabelingJobImagePropertiesArgs'] labeling_job_media_properties: Media specific properties in a labeling job.
:param pulumi.Input['MLAssistConfigurationArgs'] ml_assist_configuration: Machine learning assisted configuration for the job.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The job property dictionary. Properties can be added, but not removed or altered.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The job tag dictionary. Tags can be added, removed, and updated.
"""
pulumi.set(__self__, "dataset_configuration", dataset_configuration)
pulumi.set(__self__, "job_instructions", job_instructions)
pulumi.set(__self__, "label_categories", label_categories)
pulumi.set(__self__, "labeling_job_media_properties", labeling_job_media_properties)
if ml_assist_configuration is not None:
pulumi.set(__self__, "ml_assist_configuration", ml_assist_configuration)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="datasetConfiguration")
def dataset_configuration(self) -> pulumi.Input['LabelingDatasetConfigurationArgs']:
"""
Dataset configuration for the job.
"""
return pulumi.get(self, "dataset_configuration")
@dataset_configuration.setter
def dataset_configuration(self, value: pulumi.Input['LabelingDatasetConfigurationArgs']):
pulumi.set(self, "dataset_configuration", value)
@property
@pulumi.getter(name="jobInstructions")
def job_instructions(self) -> pulumi.Input['LabelingJobInstructionsArgs']:
"""
Instructions for the job.
"""
return pulumi.get(self, "job_instructions")
@job_instructions.setter
def job_instructions(self, value: pulumi.Input['LabelingJobInstructionsArgs']):
pulumi.set(self, "job_instructions", value)
@property
@pulumi.getter(name="labelCategories")
def label_categories(self) -> pulumi.Input[Mapping[str, pulumi.Input['LabelCategoryArgs']]]:
"""
Label categories of the job.
"""
return pulumi.get(self, "label_categories")
@label_categories.setter
def label_categories(self, value: pulumi.Input[Mapping[str, pulumi.Input['LabelCategoryArgs']]]):
pulumi.set(self, "label_categories", value)
@property
@pulumi.getter(name="labelingJobMediaProperties")
def labeling_job_media_properties(self) -> pulumi.Input['LabelingJobImagePropertiesArgs']:
"""
Media specific properties in a labeling job.
"""
return pulumi.get(self, "labeling_job_media_properties")
@labeling_job_media_properties.setter
def labeling_job_media_properties(self, value: pulumi.Input['LabelingJobImagePropertiesArgs']):
pulumi.set(self, "labeling_job_media_properties", value)
@property
@pulumi.getter(name="mlAssistConfiguration")
def ml_assist_configuration(self) -> Optional[pulumi.Input['MLAssistConfigurationArgs']]:
"""
Machine learning assisted configuration for the job.
"""
return pulumi.get(self, "ml_assist_configuration")
@ml_assist_configuration.setter
def ml_assist_configuration(self, value: Optional[pulumi.Input['MLAssistConfigurationArgs']]):
pulumi.set(self, "ml_assist_configuration", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The job property dictionary. Properties can be added, but not removed or altered.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The job tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class LinkedServicePropsArgs:
def __init__(__self__, *,
linked_service_resource_id: pulumi.Input[str],
created_time: Optional[pulumi.Input[str]] = None,
link_type: Optional[pulumi.Input['LinkedServiceLinkType']] = None,
modified_time: Optional[pulumi.Input[str]] = None):
"""
LinkedService specific properties.
:param pulumi.Input[str] linked_service_resource_id: ResourceId of the link target of the linked service.
:param pulumi.Input[str] created_time: The creation time of the linked service.
:param pulumi.Input['LinkedServiceLinkType'] link_type: Type of the link target.
:param pulumi.Input[str] modified_time: The last modified time of the linked service.
"""
pulumi.set(__self__, "linked_service_resource_id", linked_service_resource_id)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if link_type is not None:
pulumi.set(__self__, "link_type", link_type)
if modified_time is not None:
pulumi.set(__self__, "modified_time", modified_time)
@property
@pulumi.getter(name="linkedServiceResourceId")
def linked_service_resource_id(self) -> pulumi.Input[str]:
"""
ResourceId of the link target of the linked service.
"""
return pulumi.get(self, "linked_service_resource_id")
@linked_service_resource_id.setter
def linked_service_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "linked_service_resource_id", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The creation time of the linked service.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter(name="linkType")
def link_type(self) -> Optional[pulumi.Input['LinkedServiceLinkType']]:
"""
Type of the link target.
"""
return pulumi.get(self, "link_type")
@link_type.setter
def link_type(self, value: Optional[pulumi.Input['LinkedServiceLinkType']]):
pulumi.set(self, "link_type", value)
@property
@pulumi.getter(name="modifiedTime")
def modified_time(self) -> Optional[pulumi.Input[str]]:
"""
The last modified time of the linked service.
"""
return pulumi.get(self, "modified_time")
@modified_time.setter
def modified_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modified_time", value)
@pulumi.input_type
class LinkedWorkspacePropsArgs:
def __init__(__self__, *,
linked_workspace_resource_id: Optional[pulumi.Input[str]] = None,
user_assigned_identity_resource_id: Optional[pulumi.Input[str]] = None):
"""
LinkedWorkspace specific properties.
:param pulumi.Input[str] linked_workspace_resource_id: ResourceId of the link target of the linked workspace.
:param pulumi.Input[str] user_assigned_identity_resource_id: ResourceId of the user assigned identity for the linked workspace.
"""
if linked_workspace_resource_id is not None:
pulumi.set(__self__, "linked_workspace_resource_id", linked_workspace_resource_id)
if user_assigned_identity_resource_id is not None:
pulumi.set(__self__, "user_assigned_identity_resource_id", user_assigned_identity_resource_id)
@property
@pulumi.getter(name="linkedWorkspaceResourceId")
def linked_workspace_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ResourceId of the link target of the linked workspace.
"""
return pulumi.get(self, "linked_workspace_resource_id")
@linked_workspace_resource_id.setter
def linked_workspace_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_workspace_resource_id", value)
@property
@pulumi.getter(name="userAssignedIdentityResourceId")
def user_assigned_identity_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ResourceId of the user assigned identity for the linked workspace.
"""
return pulumi.get(self, "user_assigned_identity_resource_id")
@user_assigned_identity_resource_id.setter
def user_assigned_identity_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_assigned_identity_resource_id", value)
@pulumi.input_type
class MLAssistConfigurationArgs:
def __init__(__self__, *,
inferencing_compute_binding: pulumi.Input['ComputeBindingArgs'],
model_name_prefix: pulumi.Input[str],
training_compute_binding: pulumi.Input['ComputeBindingArgs'],
ml_assist_enabled: Optional[pulumi.Input[bool]] = None,
prelabel_accuracy_threshold: Optional[pulumi.Input[float]] = None):
"""
Represents configuration for machine learning assisted features in a labeling job.
:param pulumi.Input['ComputeBindingArgs'] inferencing_compute_binding: The compute designated for inferencing.
:param pulumi.Input[str] model_name_prefix: Name prefix to use for machine learning model. For each iteration modelName will be appended with iteration e.g.{modelName}_{i}.
:param pulumi.Input['ComputeBindingArgs'] training_compute_binding: The compute designated for training.
:param pulumi.Input[bool] ml_assist_enabled: Indicates whether MLAssist feature is enabled.
:param pulumi.Input[float] prelabel_accuracy_threshold: Prelabel accuracy threshold used in MLAssist feature.
"""
pulumi.set(__self__, "inferencing_compute_binding", inferencing_compute_binding)
pulumi.set(__self__, "model_name_prefix", model_name_prefix)
pulumi.set(__self__, "training_compute_binding", training_compute_binding)
if ml_assist_enabled is not None:
pulumi.set(__self__, "ml_assist_enabled", ml_assist_enabled)
if prelabel_accuracy_threshold is not None:
pulumi.set(__self__, "prelabel_accuracy_threshold", prelabel_accuracy_threshold)
@property
@pulumi.getter(name="inferencingComputeBinding")
def inferencing_compute_binding(self) -> pulumi.Input['ComputeBindingArgs']:
"""
The compute designated for inferencing.
"""
return pulumi.get(self, "inferencing_compute_binding")
@inferencing_compute_binding.setter
def inferencing_compute_binding(self, value: pulumi.Input['ComputeBindingArgs']):
pulumi.set(self, "inferencing_compute_binding", value)
@property
@pulumi.getter(name="modelNamePrefix")
def model_name_prefix(self) -> pulumi.Input[str]:
"""
Name prefix to use for machine learning model. For each iteration modelName will be appended with iteration e.g.{modelName}_{i}.
"""
return pulumi.get(self, "model_name_prefix")
@model_name_prefix.setter
def model_name_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "model_name_prefix", value)
@property
@pulumi.getter(name="trainingComputeBinding")
def training_compute_binding(self) -> pulumi.Input['ComputeBindingArgs']:
"""
The compute designated for training.
"""
return pulumi.get(self, "training_compute_binding")
@training_compute_binding.setter
def training_compute_binding(self, value: pulumi.Input['ComputeBindingArgs']):
pulumi.set(self, "training_compute_binding", value)
@property
@pulumi.getter(name="mlAssistEnabled")
def ml_assist_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether MLAssist feature is enabled.
"""
return pulumi.get(self, "ml_assist_enabled")
@ml_assist_enabled.setter
def ml_assist_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ml_assist_enabled", value)
@property
@pulumi.getter(name="prelabelAccuracyThreshold")
def prelabel_accuracy_threshold(self) -> Optional[pulumi.Input[float]]:
"""
Prelabel accuracy threshold used in MLAssist feature.
"""
return pulumi.get(self, "prelabel_accuracy_threshold")
@prelabel_accuracy_threshold.setter
def prelabel_accuracy_threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "prelabel_accuracy_threshold", value)
@pulumi.input_type
class ManagedIdentityArgs:
def __init__(__self__, *,
identity_type: pulumi.Input[str],
client_id: Optional[pulumi.Input[str]] = None,
object_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
Managed identity configuration.
:param pulumi.Input[str] identity_type: Enum to determine identity framework.
Expected value is 'Managed'.
:param pulumi.Input[str] client_id: Specifies a user-assigned identity by client ID. For system-assigned, do not set this field.
:param pulumi.Input[str] object_id: Specifies a user-assigned identity by object ID. For system-assigned, do not set this field.
:param pulumi.Input[str] resource_id: Specifies a user-assigned identity by ARM resource ID. For system-assigned, do not set this field.
"""
pulumi.set(__self__, "identity_type", 'Managed')
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="identityType")
def identity_type(self) -> pulumi.Input[str]:
"""
Enum to determine identity framework.
Expected value is 'Managed'.
"""
return pulumi.get(self, "identity_type")
@identity_type.setter
def identity_type(self, value: pulumi.Input[str]):
pulumi.set(self, "identity_type", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a user-assigned identity by client ID. For system-assigned, do not set this field.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a user-assigned identity by object ID. For system-assigned, do not set this field.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a user-assigned identity by ARM resource ID. For system-assigned, do not set this field.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class ManagedOnlineDeploymentArgs:
def __init__(__self__, *,
endpoint_compute_type: pulumi.Input[str],
app_insights_enabled: Optional[pulumi.Input[bool]] = None,
code_configuration: Optional[pulumi.Input['CodeConfigurationArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
liveness_probe: Optional[pulumi.Input['ProbeSettingsArgs']] = None,
model: Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
readiness_probe: Optional[pulumi.Input['ProbeSettingsArgs']] = None,
request_settings: Optional[pulumi.Input['OnlineRequestSettingsArgs']] = None,
scale_settings: Optional[pulumi.Input[Union['AutoScaleSettingsArgs', 'ManualScaleSettingsArgs']]] = None):
"""
:param pulumi.Input[str] endpoint_compute_type: Enum to determine endpoint compute type.
Expected value is 'Managed'.
:param pulumi.Input[bool] app_insights_enabled: If true, enables Application Insights logging.
:param pulumi.Input['CodeConfigurationArgs'] code_configuration: Code configuration for the endpoint deployment.
:param pulumi.Input[str] description: Description of the endpoint deployment.
:param pulumi.Input[str] environment_id: ARM resource ID of the environment specification for the endpoint deployment.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: Environment variables configuration for the deployment.
:param pulumi.Input[str] instance_type: Compute instance type.
:param pulumi.Input['ProbeSettingsArgs'] liveness_probe: Deployment container liveness/readiness probe configuration.
:param pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']] model: Reference to the model asset for the endpoint deployment.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Property dictionary. Properties can be added, but not removed or altered.
:param pulumi.Input['ProbeSettingsArgs'] readiness_probe: Deployment container liveness/readiness probe configuration.
:param pulumi.Input['OnlineRequestSettingsArgs'] request_settings: Online deployment scoring requests configuration.
:param pulumi.Input[Union['AutoScaleSettingsArgs', 'ManualScaleSettingsArgs']] scale_settings: Online deployment scaling configuration.
"""
pulumi.set(__self__, "endpoint_compute_type", 'Managed')
if app_insights_enabled is not None:
pulumi.set(__self__, "app_insights_enabled", app_insights_enabled)
if code_configuration is not None:
pulumi.set(__self__, "code_configuration", code_configuration)
if description is not None:
pulumi.set(__self__, "description", description)
if environment_id is not None:
pulumi.set(__self__, "environment_id", environment_id)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if model is not None:
pulumi.set(__self__, "model", model)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if request_settings is not None:
pulumi.set(__self__, "request_settings", request_settings)
if scale_settings is not None:
pulumi.set(__self__, "scale_settings", scale_settings)
@property
@pulumi.getter(name="endpointComputeType")
def endpoint_compute_type(self) -> pulumi.Input[str]:
"""
Enum to determine endpoint compute type.
Expected value is 'Managed'.
"""
return pulumi.get(self, "endpoint_compute_type")
@endpoint_compute_type.setter
def endpoint_compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint_compute_type", value)
@property
@pulumi.getter(name="appInsightsEnabled")
def app_insights_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If true, enables Application Insights logging.
"""
return pulumi.get(self, "app_insights_enabled")
@app_insights_enabled.setter
def app_insights_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "app_insights_enabled", value)
@property
@pulumi.getter(name="codeConfiguration")
def code_configuration(self) -> Optional[pulumi.Input['CodeConfigurationArgs']]:
"""
Code configuration for the endpoint deployment.
"""
return pulumi.get(self, "code_configuration")
@code_configuration.setter
def code_configuration(self, value: Optional[pulumi.Input['CodeConfigurationArgs']]):
pulumi.set(self, "code_configuration", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the endpoint deployment.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the environment specification for the endpoint deployment.
"""
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Environment variables configuration for the deployment.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
Compute instance type.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['ProbeSettingsArgs']]:
"""
Deployment container liveness/readiness probe configuration.
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['ProbeSettingsArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter
def model(self) -> Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]]:
"""
Reference to the model asset for the endpoint deployment.
"""
return pulumi.get(self, "model")
@model.setter
def model(self, value: Optional[pulumi.Input[Union['DataPathAssetReferenceArgs', 'IdAssetReferenceArgs', 'OutputPathAssetReferenceArgs']]]):
pulumi.set(self, "model", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Property dictionary. Properties can be added, but not removed or altered.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['ProbeSettingsArgs']]:
"""
Deployment container liveness/readiness probe configuration.
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['ProbeSettingsArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter(name="requestSettings")
def request_settings(self) -> Optional[pulumi.Input['OnlineRequestSettingsArgs']]:
"""
Online deployment scoring requests configuration.
"""
return pulumi.get(self, "request_settings")
@request_settings.setter
def request_settings(self, value: Optional[pulumi.Input['OnlineRequestSettingsArgs']]):
pulumi.set(self, "request_settings", value)
@property
@pulumi.getter(name="scaleSettings")
def scale_settings(self) -> Optional[pulumi.Input[Union['AutoScaleSettingsArgs', 'ManualScaleSettingsArgs']]]:
"""
Online deployment scaling configuration.
"""
return pulumi.get(self, "scale_settings")
@scale_settings.setter
def scale_settings(self, value: Optional[pulumi.Input[Union['AutoScaleSettingsArgs', 'ManualScaleSettingsArgs']]]):
pulumi.set(self, "scale_settings", value)
@pulumi.input_type
class ManualScaleSettingsArgs:
def __init__(__self__, *,
scale_type: pulumi.Input[str],
instance_count: Optional[pulumi.Input[int]] = None,
max_instances: Optional[pulumi.Input[int]] = None,
min_instances: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] scale_type:
Expected value is 'Manual'.
:param pulumi.Input[int] instance_count: Fixed number of instances for this deployment.
:param pulumi.Input[int] max_instances: Maximum number of instances for this deployment.
:param pulumi.Input[int] min_instances: Minimum number of instances for this deployment.
"""
pulumi.set(__self__, "scale_type", 'Manual')
if instance_count is not None:
pulumi.set(__self__, "instance_count", instance_count)
if max_instances is not None:
pulumi.set(__self__, "max_instances", max_instances)
if min_instances is not None:
pulumi.set(__self__, "min_instances", min_instances)
@property
@pulumi.getter(name="scaleType")
def scale_type(self) -> pulumi.Input[str]:
"""
Expected value is 'Manual'.
"""
return pulumi.get(self, "scale_type")
@scale_type.setter
def scale_type(self, value: pulumi.Input[str]):
pulumi.set(self, "scale_type", value)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> Optional[pulumi.Input[int]]:
"""
Fixed number of instances for this deployment.
"""
return pulumi.get(self, "instance_count")
@instance_count.setter
def instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "instance_count", value)
@property
@pulumi.getter(name="maxInstances")
def max_instances(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of instances for this deployment.
"""
return pulumi.get(self, "max_instances")
@max_instances.setter
def max_instances(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_instances", value)
@property
@pulumi.getter(name="minInstances")
def min_instances(self) -> Optional[pulumi.Input[int]]:
"""
Minimum number of instances for this deployment.
"""
return pulumi.get(self, "min_instances")
@min_instances.setter
def min_instances(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instances", value)
@pulumi.input_type
class MedianStoppingPolicyArgs:
def __init__(__self__, *,
policy_type: pulumi.Input[str],
delay_evaluation: Optional[pulumi.Input[int]] = None,
evaluation_interval: Optional[pulumi.Input[int]] = None):
"""
Defines an early termination policy based on running averages of the primary metric of all runs.
:param pulumi.Input[str] policy_type:
Expected value is 'MedianStopping'.
:param pulumi.Input[int] delay_evaluation: Number of intervals by which to delay the first evaluation.
:param pulumi.Input[int] evaluation_interval: Interval (number of runs) between policy evaluations.
"""
pulumi.set(__self__, "policy_type", 'MedianStopping')
if delay_evaluation is not None:
pulumi.set(__self__, "delay_evaluation", delay_evaluation)
if evaluation_interval is not None:
pulumi.set(__self__, "evaluation_interval", evaluation_interval)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Input[str]:
"""
Expected value is 'MedianStopping'.
"""
return pulumi.get(self, "policy_type")
@policy_type.setter
def policy_type(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_type", value)
@property
@pulumi.getter(name="delayEvaluation")
def delay_evaluation(self) -> Optional[pulumi.Input[int]]:
"""
Number of intervals by which to delay the first evaluation.
"""
return pulumi.get(self, "delay_evaluation")
@delay_evaluation.setter
def delay_evaluation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "delay_evaluation", value)
@property
@pulumi.getter(name="evaluationInterval")
def evaluation_interval(self) -> Optional[pulumi.Input[int]]:
"""
Interval (number of runs) between policy evaluations.
"""
return pulumi.get(self, "evaluation_interval")
@evaluation_interval.setter
def evaluation_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_interval", value)
@pulumi.input_type
class ModelContainerArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class ModelDockerSectionBaseImageRegistryArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Image registry that contains the base image.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if password is not None:
pulumi.set(__self__, "password", password)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class ModelEnvironmentDefinitionDockerArgs:
def __init__(__self__, *,
base_dockerfile: Optional[pulumi.Input[str]] = None,
base_image: Optional[pulumi.Input[str]] = None,
base_image_registry: Optional[pulumi.Input['ModelDockerSectionBaseImageRegistryArgs']] = None):
"""
The definition of a Docker container.
:param pulumi.Input[str] base_dockerfile: Base Dockerfile used for Docker-based runs. Mutually exclusive with BaseImage.
:param pulumi.Input[str] base_image: Base image used for Docker-based runs. Mutually exclusive with BaseDockerfile.
:param pulumi.Input['ModelDockerSectionBaseImageRegistryArgs'] base_image_registry: Image registry that contains the base image.
"""
if base_dockerfile is not None:
pulumi.set(__self__, "base_dockerfile", base_dockerfile)
if base_image is not None:
pulumi.set(__self__, "base_image", base_image)
if base_image_registry is not None:
pulumi.set(__self__, "base_image_registry", base_image_registry)
@property
@pulumi.getter(name="baseDockerfile")
def base_dockerfile(self) -> Optional[pulumi.Input[str]]:
"""
Base Dockerfile used for Docker-based runs. Mutually exclusive with BaseImage.
"""
return pulumi.get(self, "base_dockerfile")
@base_dockerfile.setter
def base_dockerfile(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_dockerfile", value)
@property
@pulumi.getter(name="baseImage")
def base_image(self) -> Optional[pulumi.Input[str]]:
"""
Base image used for Docker-based runs. Mutually exclusive with BaseDockerfile.
"""
return pulumi.get(self, "base_image")
@base_image.setter
def base_image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_image", value)
@property
@pulumi.getter(name="baseImageRegistry")
def base_image_registry(self) -> Optional[pulumi.Input['ModelDockerSectionBaseImageRegistryArgs']]:
"""
Image registry that contains the base image.
"""
return pulumi.get(self, "base_image_registry")
@base_image_registry.setter
def base_image_registry(self, value: Optional[pulumi.Input['ModelDockerSectionBaseImageRegistryArgs']]):
pulumi.set(self, "base_image_registry", value)
@pulumi.input_type
class ModelEnvironmentDefinitionPythonArgs:
def __init__(__self__, *,
base_conda_environment: Optional[pulumi.Input[str]] = None,
conda_dependencies: Optional[Any] = None,
interpreter_path: Optional[pulumi.Input[str]] = None,
user_managed_dependencies: Optional[pulumi.Input[bool]] = None):
"""
Settings for a Python environment.
:param Any conda_dependencies: A JObject containing Conda dependencies.
:param pulumi.Input[str] interpreter_path: The python interpreter path to use if an environment build is not required. The path specified gets used to call the user script.
:param pulumi.Input[bool] user_managed_dependencies: True means that AzureML reuses an existing python environment; False means that AzureML will create a python environment based on the Conda dependencies specification.
"""
if base_conda_environment is not None:
pulumi.set(__self__, "base_conda_environment", base_conda_environment)
if conda_dependencies is not None:
pulumi.set(__self__, "conda_dependencies", conda_dependencies)
if interpreter_path is not None:
pulumi.set(__self__, "interpreter_path", interpreter_path)
if user_managed_dependencies is not None:
pulumi.set(__self__, "user_managed_dependencies", user_managed_dependencies)
@property
@pulumi.getter(name="baseCondaEnvironment")
def base_conda_environment(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "base_conda_environment")
@base_conda_environment.setter
def base_conda_environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_conda_environment", value)
@property
@pulumi.getter(name="condaDependencies")
def conda_dependencies(self) -> Optional[Any]:
"""
A JObject containing Conda dependencies.
"""
return pulumi.get(self, "conda_dependencies")
@conda_dependencies.setter
def conda_dependencies(self, value: Optional[Any]):
pulumi.set(self, "conda_dependencies", value)
@property
@pulumi.getter(name="interpreterPath")
def interpreter_path(self) -> Optional[pulumi.Input[str]]:
"""
The python interpreter path to use if an environment build is not required. The path specified gets used to call the user script.
"""
return pulumi.get(self, "interpreter_path")
@interpreter_path.setter
def interpreter_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interpreter_path", value)
@property
@pulumi.getter(name="userManagedDependencies")
def user_managed_dependencies(self) -> Optional[pulumi.Input[bool]]:
"""
True means that AzureML reuses an existing python environment; False means that AzureML will create a python environment based on the Conda dependencies specification.
"""
return pulumi.get(self, "user_managed_dependencies")
@user_managed_dependencies.setter
def user_managed_dependencies(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "user_managed_dependencies", value)
@pulumi.input_type
class ModelEnvironmentDefinitionRArgs:
def __init__(__self__, *,
bio_conductor_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cran_packages: Optional[pulumi.Input[Sequence[pulumi.Input['RCranPackageArgs']]]] = None,
custom_url_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
git_hub_packages: Optional[pulumi.Input[Sequence[pulumi.Input['RGitHubPackageArgs']]]] = None,
r_version: Optional[pulumi.Input[str]] = None,
rscript_path: Optional[pulumi.Input[str]] = None,
snapshot_date: Optional[pulumi.Input[str]] = None,
user_managed: Optional[pulumi.Input[bool]] = None):
"""
Settings for a R environment.
:param pulumi.Input[Sequence[pulumi.Input[str]]] bio_conductor_packages: The packages from Bioconductor.
:param pulumi.Input[Sequence[pulumi.Input['RCranPackageArgs']]] cran_packages: The CRAN packages to use.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_url_packages: The packages from custom urls.
:param pulumi.Input[Sequence[pulumi.Input['RGitHubPackageArgs']]] git_hub_packages: The packages directly from GitHub.
:param pulumi.Input[str] r_version: The version of R to be installed
:param pulumi.Input[str] rscript_path: The Rscript path to use if an environment build is not required.
The path specified gets used to call the user script.
:param pulumi.Input[str] snapshot_date: Date of MRAN snapshot to use in YYYY-MM-DD format, e.g. "2019-04-17"
:param pulumi.Input[bool] user_managed: Indicates whether the environment is managed by user or by AzureML.
"""
if bio_conductor_packages is not None:
pulumi.set(__self__, "bio_conductor_packages", bio_conductor_packages)
if cran_packages is not None:
pulumi.set(__self__, "cran_packages", cran_packages)
if custom_url_packages is not None:
pulumi.set(__self__, "custom_url_packages", custom_url_packages)
if git_hub_packages is not None:
pulumi.set(__self__, "git_hub_packages", git_hub_packages)
if r_version is not None:
pulumi.set(__self__, "r_version", r_version)
if rscript_path is not None:
pulumi.set(__self__, "rscript_path", rscript_path)
if snapshot_date is not None:
pulumi.set(__self__, "snapshot_date", snapshot_date)
if user_managed is not None:
pulumi.set(__self__, "user_managed", user_managed)
@property
@pulumi.getter(name="bioConductorPackages")
def bio_conductor_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The packages from Bioconductor.
"""
return pulumi.get(self, "bio_conductor_packages")
@bio_conductor_packages.setter
def bio_conductor_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "bio_conductor_packages", value)
@property
@pulumi.getter(name="cranPackages")
def cran_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RCranPackageArgs']]]]:
"""
The CRAN packages to use.
"""
return pulumi.get(self, "cran_packages")
@cran_packages.setter
def cran_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RCranPackageArgs']]]]):
pulumi.set(self, "cran_packages", value)
@property
@pulumi.getter(name="customUrlPackages")
def custom_url_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The packages from custom urls.
"""
return pulumi.get(self, "custom_url_packages")
@custom_url_packages.setter
def custom_url_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_url_packages", value)
@property
@pulumi.getter(name="gitHubPackages")
def git_hub_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RGitHubPackageArgs']]]]:
"""
The packages directly from GitHub.
"""
return pulumi.get(self, "git_hub_packages")
@git_hub_packages.setter
def git_hub_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RGitHubPackageArgs']]]]):
pulumi.set(self, "git_hub_packages", value)
@property
@pulumi.getter(name="rVersion")
def r_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of R to be installed
"""
return pulumi.get(self, "r_version")
@r_version.setter
def r_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "r_version", value)
@property
@pulumi.getter(name="rscriptPath")
def rscript_path(self) -> Optional[pulumi.Input[str]]:
"""
The Rscript path to use if an environment build is not required.
The path specified gets used to call the user script.
"""
return pulumi.get(self, "rscript_path")
@rscript_path.setter
def rscript_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rscript_path", value)
@property
@pulumi.getter(name="snapshotDate")
def snapshot_date(self) -> Optional[pulumi.Input[str]]:
"""
Date of MRAN snapshot to use in YYYY-MM-DD format, e.g. "2019-04-17"
"""
return pulumi.get(self, "snapshot_date")
@snapshot_date.setter
def snapshot_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_date", value)
@property
@pulumi.getter(name="userManaged")
def user_managed(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the environment is managed by user or by AzureML.
"""
return pulumi.get(self, "user_managed")
@user_managed.setter
def user_managed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "user_managed", value)
@pulumi.input_type
class ModelEnvironmentDefinitionSparkArgs:
def __init__(__self__, *,
packages: Optional[pulumi.Input[Sequence[pulumi.Input['SparkMavenPackageArgs']]]] = None,
precache_packages: Optional[pulumi.Input[bool]] = None,
repositories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The configuration for a Spark environment.
:param pulumi.Input[Sequence[pulumi.Input['SparkMavenPackageArgs']]] packages: The Spark packages to use.
:param pulumi.Input[bool] precache_packages: Whether to precache the packages.
:param pulumi.Input[Sequence[pulumi.Input[str]]] repositories: The list of spark repositories.
"""
if packages is not None:
pulumi.set(__self__, "packages", packages)
if precache_packages is not None:
pulumi.set(__self__, "precache_packages", precache_packages)
if repositories is not None:
pulumi.set(__self__, "repositories", repositories)
@property
@pulumi.getter
def packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SparkMavenPackageArgs']]]]:
"""
The Spark packages to use.
"""
return pulumi.get(self, "packages")
@packages.setter
def packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SparkMavenPackageArgs']]]]):
pulumi.set(self, "packages", value)
@property
@pulumi.getter(name="precachePackages")
def precache_packages(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to precache the packages.
"""
return pulumi.get(self, "precache_packages")
@precache_packages.setter
def precache_packages(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "precache_packages", value)
@property
@pulumi.getter
def repositories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of spark repositories.
"""
return pulumi.get(self, "repositories")
@repositories.setter
def repositories(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "repositories", value)
@pulumi.input_type
class ModelVersionArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
datastore_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
flavors: Optional[pulumi.Input[Mapping[str, pulumi.Input['FlavorDataArgs']]]] = None,
is_anonymous: Optional[pulumi.Input[bool]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Model asset version details.
:param pulumi.Input[str] path: The path of the file/directory in the datastore.
:param pulumi.Input[str] datastore_id: ARM resource ID of the datastore where the asset is located.
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[Mapping[str, pulumi.Input['FlavorDataArgs']]] flavors: Mapping of model flavors to their properties.
:param pulumi.Input[bool] is_anonymous: If the name version are system generated (anonymous registration).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
"""
pulumi.set(__self__, "path", path)
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
if description is not None:
pulumi.set(__self__, "description", description)
if flavors is not None:
pulumi.set(__self__, "flavors", flavors)
if is_anonymous is not None:
pulumi.set(__self__, "is_anonymous", is_anonymous)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The path of the file/directory in the datastore.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the datastore where the asset is located.
"""
return pulumi.get(self, "datastore_id")
@datastore_id.setter
def datastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def flavors(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['FlavorDataArgs']]]]:
"""
Mapping of model flavors to their properties.
"""
return pulumi.get(self, "flavors")
@flavors.setter
def flavors(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['FlavorDataArgs']]]]):
pulumi.set(self, "flavors", value)
@property
@pulumi.getter(name="isAnonymous")
def is_anonymous(self) -> Optional[pulumi.Input[bool]]:
"""
If the name version are system generated (anonymous registration).
"""
return pulumi.get(self, "is_anonymous")
@is_anonymous.setter
def is_anonymous(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_anonymous", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class ModelArgs:
def __init__(__self__, *,
mime_type: pulumi.Input[str],
name: pulumi.Input[str],
url: pulumi.Input[str],
created_time: Optional[pulumi.Input[str]] = None,
datasets: Optional[pulumi.Input[Sequence[pulumi.Input['DatasetReferenceArgs']]]] = None,
derived_model_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
experiment_name: Optional[pulumi.Input[str]] = None,
framework: Optional[pulumi.Input[str]] = None,
framework_version: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
kv_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
modified_time: Optional[pulumi.Input[str]] = None,
parent_model_id: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_requirements: Optional[pulumi.Input['ContainerResourceRequirementsArgs']] = None,
run_id: Optional[pulumi.Input[str]] = None,
sample_input_data: Optional[pulumi.Input[str]] = None,
sample_output_data: Optional[pulumi.Input[str]] = None,
unpack: Optional[pulumi.Input[bool]] = None,
version: Optional[pulumi.Input[float]] = None):
"""
An Azure Machine Learning Model.
:param pulumi.Input[str] mime_type: The MIME type of Model content. For more details about MIME type, please open https://www.iana.org/assignments/media-types/media-types.xhtml
:param pulumi.Input[str] name: The Model name.
:param pulumi.Input[str] url: The URL of the Model. Usually a SAS URL.
:param pulumi.Input[str] created_time: The Model creation time (UTC).
:param pulumi.Input[Sequence[pulumi.Input['DatasetReferenceArgs']]] datasets: The list of datasets associated with the model.
:param pulumi.Input[Sequence[pulumi.Input[str]]] derived_model_ids: Models derived from this model
:param pulumi.Input[str] description: The Model description text.
:param pulumi.Input[str] experiment_name: The name of the experiment where this model was created.
:param pulumi.Input[str] framework: The Model framework.
:param pulumi.Input[str] framework_version: The Model framework version.
:param pulumi.Input[str] id: The Model Id.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] kv_tags: The Model tag dictionary. Items are mutable.
:param pulumi.Input[str] modified_time: The Model last modified time (UTC).
:param pulumi.Input[str] parent_model_id: The Parent Model Id.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The Model property dictionary. Properties are immutable.
:param pulumi.Input['ContainerResourceRequirementsArgs'] resource_requirements: Resource requirements for the model
:param pulumi.Input[str] run_id: The RunId that created this model.
:param pulumi.Input[str] sample_input_data: Sample Input Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
:param pulumi.Input[str] sample_output_data: Sample Output Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
:param pulumi.Input[bool] unpack: Indicates whether we need to unpack the Model during docker Image creation.
:param pulumi.Input[float] version: The Model version assigned by Model Management Service.
"""
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "url", url)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if datasets is not None:
pulumi.set(__self__, "datasets", datasets)
if derived_model_ids is not None:
pulumi.set(__self__, "derived_model_ids", derived_model_ids)
if description is not None:
pulumi.set(__self__, "description", description)
if experiment_name is not None:
pulumi.set(__self__, "experiment_name", experiment_name)
if framework is not None:
pulumi.set(__self__, "framework", framework)
if framework_version is not None:
pulumi.set(__self__, "framework_version", framework_version)
if id is not None:
pulumi.set(__self__, "id", id)
if kv_tags is not None:
pulumi.set(__self__, "kv_tags", kv_tags)
if modified_time is not None:
pulumi.set(__self__, "modified_time", modified_time)
if parent_model_id is not None:
pulumi.set(__self__, "parent_model_id", parent_model_id)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_requirements is not None:
pulumi.set(__self__, "resource_requirements", resource_requirements)
if run_id is not None:
pulumi.set(__self__, "run_id", run_id)
if sample_input_data is not None:
pulumi.set(__self__, "sample_input_data", sample_input_data)
if sample_output_data is not None:
pulumi.set(__self__, "sample_output_data", sample_output_data)
if unpack is not None:
pulumi.set(__self__, "unpack", unpack)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> pulumi.Input[str]:
"""
The MIME type of Model content. For more details about MIME type, please open https://www.iana.org/assignments/media-types/media-types.xhtml
"""
return pulumi.get(self, "mime_type")
@mime_type.setter
def mime_type(self, value: pulumi.Input[str]):
pulumi.set(self, "mime_type", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The Model name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
The URL of the Model. Usually a SAS URL.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The Model creation time (UTC).
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def datasets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DatasetReferenceArgs']]]]:
"""
The list of datasets associated with the model.
"""
return pulumi.get(self, "datasets")
@datasets.setter
def datasets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DatasetReferenceArgs']]]]):
pulumi.set(self, "datasets", value)
@property
@pulumi.getter(name="derivedModelIds")
def derived_model_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Models derived from this model
"""
return pulumi.get(self, "derived_model_ids")
@derived_model_ids.setter
def derived_model_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "derived_model_ids", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The Model description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="experimentName")
def experiment_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the experiment where this model was created.
"""
return pulumi.get(self, "experiment_name")
@experiment_name.setter
def experiment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "experiment_name", value)
@property
@pulumi.getter
def framework(self) -> Optional[pulumi.Input[str]]:
"""
The Model framework.
"""
return pulumi.get(self, "framework")
@framework.setter
def framework(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "framework", value)
@property
@pulumi.getter(name="frameworkVersion")
def framework_version(self) -> Optional[pulumi.Input[str]]:
"""
The Model framework version.
"""
return pulumi.get(self, "framework_version")
@framework_version.setter
def framework_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "framework_version", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The Model Id.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="kvTags")
def kv_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The Model tag dictionary. Items are mutable.
"""
return pulumi.get(self, "kv_tags")
@kv_tags.setter
def kv_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "kv_tags", value)
@property
@pulumi.getter(name="modifiedTime")
def modified_time(self) -> Optional[pulumi.Input[str]]:
"""
The Model last modified time (UTC).
"""
return pulumi.get(self, "modified_time")
@modified_time.setter
def modified_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modified_time", value)
@property
@pulumi.getter(name="parentModelId")
def parent_model_id(self) -> Optional[pulumi.Input[str]]:
"""
The Parent Model Id.
"""
return pulumi.get(self, "parent_model_id")
@parent_model_id.setter
def parent_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_model_id", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The Model property dictionary. Properties are immutable.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceRequirements")
def resource_requirements(self) -> Optional[pulumi.Input['ContainerResourceRequirementsArgs']]:
"""
Resource requirements for the model
"""
return pulumi.get(self, "resource_requirements")
@resource_requirements.setter
def resource_requirements(self, value: Optional[pulumi.Input['ContainerResourceRequirementsArgs']]):
pulumi.set(self, "resource_requirements", value)
@property
@pulumi.getter(name="runId")
def run_id(self) -> Optional[pulumi.Input[str]]:
"""
The RunId that created this model.
"""
return pulumi.get(self, "run_id")
@run_id.setter
def run_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_id", value)
@property
@pulumi.getter(name="sampleInputData")
def sample_input_data(self) -> Optional[pulumi.Input[str]]:
"""
Sample Input Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
"""
return pulumi.get(self, "sample_input_data")
@sample_input_data.setter
def sample_input_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sample_input_data", value)
@property
@pulumi.getter(name="sampleOutputData")
def sample_output_data(self) -> Optional[pulumi.Input[str]]:
"""
Sample Output Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
"""
return pulumi.get(self, "sample_output_data")
@sample_output_data.setter
def sample_output_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sample_output_data", value)
@property
@pulumi.getter
def unpack(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether we need to unpack the Model during docker Image creation.
"""
return pulumi.get(self, "unpack")
@unpack.setter
def unpack(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unpack", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[float]]:
"""
The Model version assigned by Model Management Service.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class MpiArgs:
def __init__(__self__, *,
distribution_type: pulumi.Input[str],
process_count_per_instance: Optional[pulumi.Input[int]] = None):
"""
MPI distribution configuration.
:param pulumi.Input[str] distribution_type: Enum to determine the job distribution type.
Expected value is 'Mpi'.
:param pulumi.Input[int] process_count_per_instance: Number of processes per MPI node.
"""
pulumi.set(__self__, "distribution_type", 'Mpi')
if process_count_per_instance is not None:
pulumi.set(__self__, "process_count_per_instance", process_count_per_instance)
@property
@pulumi.getter(name="distributionType")
def distribution_type(self) -> pulumi.Input[str]:
"""
Enum to determine the job distribution type.
Expected value is 'Mpi'.
"""
return pulumi.get(self, "distribution_type")
@distribution_type.setter
def distribution_type(self, value: pulumi.Input[str]):
pulumi.set(self, "distribution_type", value)
@property
@pulumi.getter(name="processCountPerInstance")
def process_count_per_instance(self) -> Optional[pulumi.Input[int]]:
"""
Number of processes per MPI node.
"""
return pulumi.get(self, "process_count_per_instance")
@process_count_per_instance.setter
def process_count_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "process_count_per_instance", value)
@pulumi.input_type
class ObjectiveArgs:
def __init__(__self__, *,
goal: pulumi.Input[Union[str, 'Goal']],
primary_metric: pulumi.Input[str]):
"""
Optimization objective.
:param pulumi.Input[Union[str, 'Goal']] goal: Defines supported metric goals for hyperparameter tuning
:param pulumi.Input[str] primary_metric: Name of the metric to optimize.
"""
pulumi.set(__self__, "goal", goal)
pulumi.set(__self__, "primary_metric", primary_metric)
@property
@pulumi.getter
def goal(self) -> pulumi.Input[Union[str, 'Goal']]:
"""
Defines supported metric goals for hyperparameter tuning
"""
return pulumi.get(self, "goal")
@goal.setter
def goal(self, value: pulumi.Input[Union[str, 'Goal']]):
pulumi.set(self, "goal", value)
@property
@pulumi.getter(name="primaryMetric")
def primary_metric(self) -> pulumi.Input[str]:
"""
Name of the metric to optimize.
"""
return pulumi.get(self, "primary_metric")
@primary_metric.setter
def primary_metric(self, value: pulumi.Input[str]):
pulumi.set(self, "primary_metric", value)
@pulumi.input_type
class OnlineEndpointArgs:
def __init__(__self__, *,
auth_mode: pulumi.Input[Union[str, 'EndpointAuthMode']],
description: Optional[pulumi.Input[str]] = None,
keys: Optional[pulumi.Input['EndpointAuthKeysArgs']] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target: Optional[pulumi.Input[str]] = None,
traffic: Optional[pulumi.Input[Mapping[str, pulumi.Input[int]]]] = None):
"""
Online endpoint configuration
:param pulumi.Input[Union[str, 'EndpointAuthMode']] auth_mode: Inference endpoint authentication mode type
:param pulumi.Input[str] description: Description of the inference endpoint.
:param pulumi.Input['EndpointAuthKeysArgs'] keys: EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the ListKeys API.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Property dictionary. Properties can be added, but not removed or altered.
:param pulumi.Input[str] target: ARM resource ID of the compute if it exists.
optional
:param pulumi.Input[Mapping[str, pulumi.Input[int]]] traffic: Traffic rules on how the traffic will be routed across deployments.
"""
pulumi.set(__self__, "auth_mode", auth_mode)
if description is not None:
pulumi.set(__self__, "description", description)
if keys is not None:
pulumi.set(__self__, "keys", keys)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if target is not None:
pulumi.set(__self__, "target", target)
if traffic is not None:
pulumi.set(__self__, "traffic", traffic)
@property
@pulumi.getter(name="authMode")
def auth_mode(self) -> pulumi.Input[Union[str, 'EndpointAuthMode']]:
"""
Inference endpoint authentication mode type
"""
return pulumi.get(self, "auth_mode")
@auth_mode.setter
def auth_mode(self, value: pulumi.Input[Union[str, 'EndpointAuthMode']]):
pulumi.set(self, "auth_mode", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the inference endpoint.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def keys(self) -> Optional[pulumi.Input['EndpointAuthKeysArgs']]:
"""
EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the ListKeys API.
"""
return pulumi.get(self, "keys")
@keys.setter
def keys(self, value: Optional[pulumi.Input['EndpointAuthKeysArgs']]):
pulumi.set(self, "keys", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Property dictionary. Properties can be added, but not removed or altered.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the compute if it exists.
optional
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def traffic(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[int]]]]:
"""
Traffic rules on how the traffic will be routed across deployments.
"""
return pulumi.get(self, "traffic")
@traffic.setter
def traffic(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[int]]]]):
pulumi.set(self, "traffic", value)
@pulumi.input_type
class OnlineRequestSettingsArgs:
def __init__(__self__, *,
max_concurrent_requests_per_instance: Optional[pulumi.Input[int]] = None,
max_queue_wait: Optional[pulumi.Input[str]] = None,
request_timeout: Optional[pulumi.Input[str]] = None):
"""
Online deployment scoring requests configuration.
:param pulumi.Input[int] max_concurrent_requests_per_instance: The number of requests allowed to queue at once for this deployment.
:param pulumi.Input[str] max_queue_wait: The maximum queue wait time in ISO 8601 format. Supports millisecond precision.
:param pulumi.Input[str] request_timeout: The request timeout in ISO 8601 format. Supports millisecond precision.
"""
if max_concurrent_requests_per_instance is not None:
pulumi.set(__self__, "max_concurrent_requests_per_instance", max_concurrent_requests_per_instance)
if max_queue_wait is not None:
pulumi.set(__self__, "max_queue_wait", max_queue_wait)
if request_timeout is not None:
pulumi.set(__self__, "request_timeout", request_timeout)
@property
@pulumi.getter(name="maxConcurrentRequestsPerInstance")
def max_concurrent_requests_per_instance(self) -> Optional[pulumi.Input[int]]:
"""
The number of requests allowed to queue at once for this deployment.
"""
return pulumi.get(self, "max_concurrent_requests_per_instance")
@max_concurrent_requests_per_instance.setter
def max_concurrent_requests_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_requests_per_instance", value)
@property
@pulumi.getter(name="maxQueueWait")
def max_queue_wait(self) -> Optional[pulumi.Input[str]]:
"""
The maximum queue wait time in ISO 8601 format. Supports millisecond precision.
"""
return pulumi.get(self, "max_queue_wait")
@max_queue_wait.setter
def max_queue_wait(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_queue_wait", value)
@property
@pulumi.getter(name="requestTimeout")
def request_timeout(self) -> Optional[pulumi.Input[str]]:
"""
The request timeout in ISO 8601 format. Supports millisecond precision.
"""
return pulumi.get(self, "request_timeout")
@request_timeout.setter
def request_timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_timeout", value)
@pulumi.input_type
class OutputDataBindingArgs:
def __init__(__self__, *,
datastore_id: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[Union[str, 'DataBindingMode']]] = None,
path_on_compute: Optional[pulumi.Input[str]] = None,
path_on_datastore: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] datastore_id: ARM resource ID of the datastore where the data output will be stored.
:param pulumi.Input[Union[str, 'DataBindingMode']] mode: Mechanism for data movement to datastore.
:param pulumi.Input[str] path_on_compute: Location of data inside the container process.
:param pulumi.Input[str] path_on_datastore: Path within the datastore to the data.
"""
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if path_on_compute is not None:
pulumi.set(__self__, "path_on_compute", path_on_compute)
if path_on_datastore is not None:
pulumi.set(__self__, "path_on_datastore", path_on_datastore)
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the datastore where the data output will be stored.
"""
return pulumi.get(self, "datastore_id")
@datastore_id.setter
def datastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_id", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[Union[str, 'DataBindingMode']]]:
"""
Mechanism for data movement to datastore.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[Union[str, 'DataBindingMode']]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter(name="pathOnCompute")
def path_on_compute(self) -> Optional[pulumi.Input[str]]:
"""
Location of data inside the container process.
"""
return pulumi.get(self, "path_on_compute")
@path_on_compute.setter
def path_on_compute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path_on_compute", value)
@property
@pulumi.getter(name="pathOnDatastore")
def path_on_datastore(self) -> Optional[pulumi.Input[str]]:
"""
Path within the datastore to the data.
"""
return pulumi.get(self, "path_on_datastore")
@path_on_datastore.setter
def path_on_datastore(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path_on_datastore", value)
@pulumi.input_type
class OutputPathAssetReferenceArgs:
def __init__(__self__, *,
reference_type: pulumi.Input[str],
job_id: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None):
"""
Reference to an asset via its path in a job output.
:param pulumi.Input[str] reference_type: Enum to determine which reference method to use for an asset.
Expected value is 'OutputPath'.
:param pulumi.Input[str] job_id: ARM resource ID of the job.
:param pulumi.Input[str] path: The path of the file/directory in the job output.
"""
pulumi.set(__self__, "reference_type", 'OutputPath')
if job_id is not None:
pulumi.set(__self__, "job_id", job_id)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="referenceType")
def reference_type(self) -> pulumi.Input[str]:
"""
Enum to determine which reference method to use for an asset.
Expected value is 'OutputPath'.
"""
return pulumi.get(self, "reference_type")
@reference_type.setter
def reference_type(self, value: pulumi.Input[str]):
pulumi.set(self, "reference_type", value)
@property
@pulumi.getter(name="jobId")
def job_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the job.
"""
return pulumi.get(self, "job_id")
@job_id.setter
def job_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_id", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The path of the file/directory in the job output.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@pulumi.input_type
class PersonalComputeInstanceSettingsArgs:
def __init__(__self__, *,
assigned_user: Optional[pulumi.Input['AssignedUserArgs']] = None):
"""
Settings for a personal compute instance.
:param pulumi.Input['AssignedUserArgs'] assigned_user: A user explicitly assigned to a personal compute instance.
"""
if assigned_user is not None:
pulumi.set(__self__, "assigned_user", assigned_user)
@property
@pulumi.getter(name="assignedUser")
def assigned_user(self) -> Optional[pulumi.Input['AssignedUserArgs']]:
"""
A user explicitly assigned to a personal compute instance.
"""
return pulumi.get(self, "assigned_user")
@assigned_user.setter
def assigned_user(self, value: Optional[pulumi.Input['AssignedUserArgs']]):
pulumi.set(self, "assigned_user", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
actions_required: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param pulumi.Input[str] description: The reason for approval/rejection of the connection.
:param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[pulumi.Input[str]]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@actions_required.setter
def actions_required(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "actions_required", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class ProbeSettingsArgs:
def __init__(__self__, *,
failure_threshold: Optional[pulumi.Input[int]] = None,
initial_delay: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[str]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Deployment container liveness/readiness probe configuration.
:param pulumi.Input[int] failure_threshold: The number of failures to allow before returning an unhealthy status.
:param pulumi.Input[str] initial_delay: The delay before the first probe in ISO 8601 format.
:param pulumi.Input[str] period: The length of time between probes in ISO 8601 format.
:param pulumi.Input[int] success_threshold: The number of successful probes before returning a healthy status.
:param pulumi.Input[str] timeout: The probe timeout in ISO 8601 format.
"""
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if initial_delay is not None:
pulumi.set(__self__, "initial_delay", initial_delay)
if period is not None:
pulumi.set(__self__, "period", period)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of failures to allow before returning an unhealthy status.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="initialDelay")
def initial_delay(self) -> Optional[pulumi.Input[str]]:
"""
The delay before the first probe in ISO 8601 format.
"""
return pulumi.get(self, "initial_delay")
@initial_delay.setter
def initial_delay(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "initial_delay", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[str]]:
"""
The length of time between probes in ISO 8601 format.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of successful probes before returning a healthy status.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
The probe timeout in ISO 8601 format.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class PyTorchArgs:
def __init__(__self__, *,
distribution_type: pulumi.Input[str],
process_count: Optional[pulumi.Input[int]] = None):
"""
PyTorch distribution configuration.
:param pulumi.Input[str] distribution_type: Enum to determine the job distribution type.
Expected value is 'PyTorch'.
:param pulumi.Input[int] process_count: Total process count for the distributed job.
"""
pulumi.set(__self__, "distribution_type", 'PyTorch')
if process_count is not None:
pulumi.set(__self__, "process_count", process_count)
@property
@pulumi.getter(name="distributionType")
def distribution_type(self) -> pulumi.Input[str]:
"""
Enum to determine the job distribution type.
Expected value is 'PyTorch'.
"""
return pulumi.get(self, "distribution_type")
@distribution_type.setter
def distribution_type(self, value: pulumi.Input[str]):
pulumi.set(self, "distribution_type", value)
@property
@pulumi.getter(name="processCount")
def process_count(self) -> Optional[pulumi.Input[int]]:
"""
Total process count for the distributed job.
"""
return pulumi.get(self, "process_count")
@process_count.setter
def process_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "process_count", value)
@pulumi.input_type
class RCranPackageArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
repository: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The package name.
:param pulumi.Input[str] repository: The repository name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if repository is not None:
pulumi.set(__self__, "repository", repository)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The package name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def repository(self) -> Optional[pulumi.Input[str]]:
"""
The repository name.
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository", value)
@pulumi.input_type
class RGitHubPackageArgs:
def __init__(__self__, *,
auth_token: Optional[pulumi.Input[str]] = None,
repository: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] auth_token: Personal access token to install from a private repo
:param pulumi.Input[str] repository: Repository address in the format username/repo[/subdir][@ref|#pull].
"""
if auth_token is not None:
pulumi.set(__self__, "auth_token", auth_token)
if repository is not None:
pulumi.set(__self__, "repository", repository)
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> Optional[pulumi.Input[str]]:
"""
Personal access token to install from a private repo
"""
return pulumi.get(self, "auth_token")
@auth_token.setter
def auth_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_token", value)
@property
@pulumi.getter
def repository(self) -> Optional[pulumi.Input[str]]:
"""
Repository address in the format username/repo[/subdir][@ref|#pull].
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository", value)
@pulumi.input_type
class ResourceIdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[Union[str, 'ResourceIdentityAssignment']]] = None,
user_assigned_identities: Optional[pulumi.Input[Mapping[str, pulumi.Input['UserAssignedIdentityMetaArgs']]]] = None):
"""
Service identity associated with a resource.
:param pulumi.Input[Union[str, 'ResourceIdentityAssignment']] type: Defines values for a ResourceIdentity's type.
:param pulumi.Input[Mapping[str, pulumi.Input['UserAssignedIdentityMetaArgs']]] user_assigned_identities: Dictionary of the user assigned identities, key is ARM resource ID of the UAI.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'ResourceIdentityAssignment']]]:
"""
Defines values for a ResourceIdentity's type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'ResourceIdentityAssignment']]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['UserAssignedIdentityMetaArgs']]]]:
"""
Dictionary of the user assigned identities, key is ARM resource ID of the UAI.
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['UserAssignedIdentityMetaArgs']]]]):
pulumi.set(self, "user_assigned_identities", value)
@pulumi.input_type
class ResourceIdArgs:
def __init__(__self__, *,
id: pulumi.Input[str]):
"""
Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
:param pulumi.Input[str] id: The ID of the resource
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
The ID of the resource
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@pulumi.input_type
class RouteArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
port: pulumi.Input[int]):
"""
:param pulumi.Input[str] path: The path for the route.
:param pulumi.Input[int] port: The port for the route.
"""
pulumi.set(__self__, "path", path)
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The path for the route.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
The port for the route.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@pulumi.input_type
class ScaleSettingsArgs:
def __init__(__self__, *,
max_node_count: pulumi.Input[int],
min_node_count: Optional[pulumi.Input[int]] = None,
node_idle_time_before_scale_down: Optional[pulumi.Input[str]] = None):
"""
scale settings for AML Compute
:param pulumi.Input[int] max_node_count: Max number of nodes to use
:param pulumi.Input[int] min_node_count: Min number of nodes to use
:param pulumi.Input[str] node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This string needs to be in the RFC Format.
"""
pulumi.set(__self__, "max_node_count", max_node_count)
if min_node_count is None:
min_node_count = 0
if min_node_count is not None:
pulumi.set(__self__, "min_node_count", min_node_count)
if node_idle_time_before_scale_down is not None:
pulumi.set(__self__, "node_idle_time_before_scale_down", node_idle_time_before_scale_down)
@property
@pulumi.getter(name="maxNodeCount")
def max_node_count(self) -> pulumi.Input[int]:
"""
Max number of nodes to use
"""
return pulumi.get(self, "max_node_count")
@max_node_count.setter
def max_node_count(self, value: pulumi.Input[int]):
pulumi.set(self, "max_node_count", value)
@property
@pulumi.getter(name="minNodeCount")
def min_node_count(self) -> Optional[pulumi.Input[int]]:
"""
Min number of nodes to use
"""
return pulumi.get(self, "min_node_count")
@min_node_count.setter
def min_node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_node_count", value)
@property
@pulumi.getter(name="nodeIdleTimeBeforeScaleDown")
def node_idle_time_before_scale_down(self) -> Optional[pulumi.Input[str]]:
"""
Node Idle Time before scaling down amlCompute. This string needs to be in the RFC Format.
"""
return pulumi.get(self, "node_idle_time_before_scale_down")
@node_idle_time_before_scale_down.setter
def node_idle_time_before_scale_down(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_idle_time_before_scale_down", value)
@pulumi.input_type
class ScriptReferenceArgs:
def __init__(__self__, *,
script_arguments: Optional[pulumi.Input[str]] = None,
script_data: Optional[pulumi.Input[str]] = None,
script_source: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Script reference
:param pulumi.Input[str] script_arguments: Optional command line arguments passed to the script to run.
:param pulumi.Input[str] script_data: The location of scripts in the mounted volume.
:param pulumi.Input[str] script_source: The storage source of the script: inline, workspace.
:param pulumi.Input[str] timeout: Optional time period passed to timeout command.
"""
if script_arguments is not None:
pulumi.set(__self__, "script_arguments", script_arguments)
if script_data is not None:
pulumi.set(__self__, "script_data", script_data)
if script_source is not None:
pulumi.set(__self__, "script_source", script_source)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="scriptArguments")
def script_arguments(self) -> Optional[pulumi.Input[str]]:
"""
Optional command line arguments passed to the script to run.
"""
return pulumi.get(self, "script_arguments")
@script_arguments.setter
def script_arguments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_arguments", value)
@property
@pulumi.getter(name="scriptData")
def script_data(self) -> Optional[pulumi.Input[str]]:
"""
The location of scripts in the mounted volume.
"""
return pulumi.get(self, "script_data")
@script_data.setter
def script_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_data", value)
@property
@pulumi.getter(name="scriptSource")
def script_source(self) -> Optional[pulumi.Input[str]]:
"""
The storage source of the script: inline, workspace.
"""
return pulumi.get(self, "script_source")
@script_source.setter
def script_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_source", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
Optional time period passed to timeout command.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class ScriptsToExecuteArgs:
def __init__(__self__, *,
creation_script: Optional[pulumi.Input['ScriptReferenceArgs']] = None,
startup_script: Optional[pulumi.Input['ScriptReferenceArgs']] = None):
"""
Customized setup scripts
:param pulumi.Input['ScriptReferenceArgs'] creation_script: Script that's run only once during provision of the compute.
:param pulumi.Input['ScriptReferenceArgs'] startup_script: Script that's run every time the machine starts.
"""
if creation_script is not None:
pulumi.set(__self__, "creation_script", creation_script)
if startup_script is not None:
pulumi.set(__self__, "startup_script", startup_script)
@property
@pulumi.getter(name="creationScript")
def creation_script(self) -> Optional[pulumi.Input['ScriptReferenceArgs']]:
"""
Script that's run only once during provision of the compute.
"""
return pulumi.get(self, "creation_script")
@creation_script.setter
def creation_script(self, value: Optional[pulumi.Input['ScriptReferenceArgs']]):
pulumi.set(self, "creation_script", value)
@property
@pulumi.getter(name="startupScript")
def startup_script(self) -> Optional[pulumi.Input['ScriptReferenceArgs']]:
"""
Script that's run every time the machine starts.
"""
return pulumi.get(self, "startup_script")
@startup_script.setter
def startup_script(self, value: Optional[pulumi.Input['ScriptReferenceArgs']]):
pulumi.set(self, "startup_script", value)
@pulumi.input_type
class ServiceManagedResourcesSettingsArgs:
def __init__(__self__, *,
cosmos_db: Optional[pulumi.Input['CosmosDbSettingsArgs']] = None):
"""
:param pulumi.Input['CosmosDbSettingsArgs'] cosmos_db: The settings for the service managed cosmosdb account.
"""
if cosmos_db is not None:
pulumi.set(__self__, "cosmos_db", cosmos_db)
@property
@pulumi.getter(name="cosmosDb")
def cosmos_db(self) -> Optional[pulumi.Input['CosmosDbSettingsArgs']]:
"""
The settings for the service managed cosmosdb account.
"""
return pulumi.get(self, "cosmos_db")
@cosmos_db.setter
def cosmos_db(self, value: Optional[pulumi.Input['CosmosDbSettingsArgs']]):
pulumi.set(self, "cosmos_db", value)
@pulumi.input_type
class SetupScriptsArgs:
def __init__(__self__, *,
scripts: Optional[pulumi.Input['ScriptsToExecuteArgs']] = None):
"""
Details of customized scripts to execute for setting up the cluster.
:param pulumi.Input['ScriptsToExecuteArgs'] scripts: Customized setup scripts
"""
if scripts is not None:
pulumi.set(__self__, "scripts", scripts)
@property
@pulumi.getter
def scripts(self) -> Optional[pulumi.Input['ScriptsToExecuteArgs']]:
"""
Customized setup scripts
"""
return pulumi.get(self, "scripts")
@scripts.setter
def scripts(self, value: Optional[pulumi.Input['ScriptsToExecuteArgs']]):
pulumi.set(self, "scripts", value)
@pulumi.input_type
class SharedPrivateLinkResourceArgs:
def __init__(__self__, *,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_link_resource_id: Optional[pulumi.Input[str]] = None,
request_message: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None):
"""
:param pulumi.Input[str] group_id: The private link resource group id.
:param pulumi.Input[str] name: Unique name of the private link.
:param pulumi.Input[str] private_link_resource_id: The resource id that private link links to.
:param pulumi.Input[str] request_message: Request message.
:param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_link_resource_id is not None:
pulumi.set(__self__, "private_link_resource_id", private_link_resource_id)
if request_message is not None:
pulumi.set(__self__, "request_message", request_message)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
The private link resource group id.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of the private link.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateLinkResourceId")
def private_link_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id that private link links to.
"""
return pulumi.get(self, "private_link_resource_id")
@private_link_resource_id.setter
def private_link_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_resource_id", value)
@property
@pulumi.getter(name="requestMessage")
def request_message(self) -> Optional[pulumi.Input[str]]:
"""
Request message.
"""
return pulumi.get(self, "request_message")
@request_message.setter
def request_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_message", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
"""
Sku of the resource
:param pulumi.Input[str] name: Name of the sku
:param pulumi.Input[str] tier: Tier of the sku like Basic or Enterprise
"""
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the sku
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
"""
Tier of the sku like Basic or Enterprise
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class SparkMavenPackageArgs:
def __init__(__self__, *,
artifact: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if artifact is not None:
pulumi.set(__self__, "artifact", artifact)
if group is not None:
pulumi.set(__self__, "group", group)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def artifact(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "artifact")
@artifact.setter
def artifact(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "artifact", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SslConfigurationArgs:
def __init__(__self__, *,
cert: Optional[pulumi.Input[str]] = None,
cname: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
The ssl configuration for scoring
:param pulumi.Input[str] cert: Cert data
:param pulumi.Input[str] cname: CNAME of the cert
:param pulumi.Input[str] key: Key data
:param pulumi.Input[str] status: Enable or disable ssl for scoring
"""
if cert is not None:
pulumi.set(__self__, "cert", cert)
if cname is not None:
pulumi.set(__self__, "cname", cname)
if key is not None:
pulumi.set(__self__, "key", key)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def cert(self) -> Optional[pulumi.Input[str]]:
"""
Cert data
"""
return pulumi.get(self, "cert")
@cert.setter
def cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cert", value)
@property
@pulumi.getter
def cname(self) -> Optional[pulumi.Input[str]]:
"""
CNAME of the cert
"""
return pulumi.get(self, "cname")
@cname.setter
def cname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cname", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key data
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Enable or disable ssl for scoring
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class SweepJobArgs:
def __init__(__self__, *,
algorithm: pulumi.Input[Union[str, 'SamplingAlgorithm']],
compute: pulumi.Input['ComputeConfigurationArgs'],
job_type: pulumi.Input[str],
objective: pulumi.Input['ObjectiveArgs'],
search_space: pulumi.Input[Mapping[str, Any]],
description: Optional[pulumi.Input[str]] = None,
early_termination: Optional[pulumi.Input[Union['BanditPolicyArgs', 'MedianStoppingPolicyArgs', 'TruncationSelectionPolicyArgs']]] = None,
experiment_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[Union['AmlTokenArgs', 'ManagedIdentityArgs']]] = None,
max_concurrent_trials: Optional[pulumi.Input[int]] = None,
max_total_trials: Optional[pulumi.Input[int]] = None,
priority: Optional[pulumi.Input[int]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timeout: Optional[pulumi.Input[str]] = None,
trial: Optional[pulumi.Input['TrialComponentArgs']] = None):
"""
Sweep job definition.
:param pulumi.Input[Union[str, 'SamplingAlgorithm']] algorithm: Type of the hyperparameter sampling algorithms
:param pulumi.Input['ComputeConfigurationArgs'] compute: Compute binding for the job.
:param pulumi.Input[str] job_type: Enum to determine the type of job.
Expected value is 'Sweep'.
:param pulumi.Input['ObjectiveArgs'] objective: Optimization objective.
:param pulumi.Input[Mapping[str, Any]] search_space: A dictionary containing each parameter and its distribution. The dictionary key is the name of the parameter
:param pulumi.Input[str] description: The asset description text.
:param pulumi.Input[Union['BanditPolicyArgs', 'MedianStoppingPolicyArgs', 'TruncationSelectionPolicyArgs']] early_termination: Early termination policies enable canceling poor-performing runs before they complete.
:param pulumi.Input[str] experiment_name: The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
:param pulumi.Input[Union['AmlTokenArgs', 'ManagedIdentityArgs']] identity: Identity configuration. If set, this should be one of AmlToken, ManagedIdentity or null.
Defaults to AmlToken if null.
:param pulumi.Input[int] max_concurrent_trials: An upper bound on the number of trials performed in parallel.
:param pulumi.Input[int] max_total_trials: An upper bound on the number of trials to perform.
:param pulumi.Input[int] priority: Job priority for scheduling policy. Only applies to AMLCompute.
Private preview feature and only available to users on the allow list.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The asset property dictionary.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tag dictionary. Tags can be added, removed, and updated.
:param pulumi.Input[str] timeout: The total timeout in ISO 8601 format. Only supports duration with precision as low as Minutes.
:param pulumi.Input['TrialComponentArgs'] trial: Trial component definition.
"""
pulumi.set(__self__, "algorithm", algorithm)
pulumi.set(__self__, "compute", compute)
pulumi.set(__self__, "job_type", 'Sweep')
pulumi.set(__self__, "objective", objective)
pulumi.set(__self__, "search_space", search_space)
if description is not None:
pulumi.set(__self__, "description", description)
if early_termination is not None:
pulumi.set(__self__, "early_termination", early_termination)
if experiment_name is not None:
pulumi.set(__self__, "experiment_name", experiment_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if max_concurrent_trials is not None:
pulumi.set(__self__, "max_concurrent_trials", max_concurrent_trials)
if max_total_trials is not None:
pulumi.set(__self__, "max_total_trials", max_total_trials)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if trial is not None:
pulumi.set(__self__, "trial", trial)
@property
@pulumi.getter
def algorithm(self) -> pulumi.Input[Union[str, 'SamplingAlgorithm']]:
"""
Type of the hyperparameter sampling algorithms
"""
return pulumi.get(self, "algorithm")
@algorithm.setter
def algorithm(self, value: pulumi.Input[Union[str, 'SamplingAlgorithm']]):
pulumi.set(self, "algorithm", value)
@property
@pulumi.getter
def compute(self) -> pulumi.Input['ComputeConfigurationArgs']:
"""
Compute binding for the job.
"""
return pulumi.get(self, "compute")
@compute.setter
def compute(self, value: pulumi.Input['ComputeConfigurationArgs']):
pulumi.set(self, "compute", value)
@property
@pulumi.getter(name="jobType")
def job_type(self) -> pulumi.Input[str]:
"""
Enum to determine the type of job.
Expected value is 'Sweep'.
"""
return pulumi.get(self, "job_type")
@job_type.setter
def job_type(self, value: pulumi.Input[str]):
pulumi.set(self, "job_type", value)
@property
@pulumi.getter
def objective(self) -> pulumi.Input['ObjectiveArgs']:
"""
Optimization objective.
"""
return pulumi.get(self, "objective")
@objective.setter
def objective(self, value: pulumi.Input['ObjectiveArgs']):
pulumi.set(self, "objective", value)
@property
@pulumi.getter(name="searchSpace")
def search_space(self) -> pulumi.Input[Mapping[str, Any]]:
"""
A dictionary containing each parameter and its distribution. The dictionary key is the name of the parameter
"""
return pulumi.get(self, "search_space")
@search_space.setter
def search_space(self, value: pulumi.Input[Mapping[str, Any]]):
pulumi.set(self, "search_space", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The asset description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="earlyTermination")
def early_termination(self) -> Optional[pulumi.Input[Union['BanditPolicyArgs', 'MedianStoppingPolicyArgs', 'TruncationSelectionPolicyArgs']]]:
"""
Early termination policies enable canceling poor-performing runs before they complete.
"""
return pulumi.get(self, "early_termination")
@early_termination.setter
def early_termination(self, value: Optional[pulumi.Input[Union['BanditPolicyArgs', 'MedianStoppingPolicyArgs', 'TruncationSelectionPolicyArgs']]]):
pulumi.set(self, "early_termination", value)
@property
@pulumi.getter(name="experimentName")
def experiment_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
"""
return pulumi.get(self, "experiment_name")
@experiment_name.setter
def experiment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "experiment_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input[Union['AmlTokenArgs', 'ManagedIdentityArgs']]]:
"""
Identity configuration. If set, this should be one of AmlToken, ManagedIdentity or null.
Defaults to AmlToken if null.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input[Union['AmlTokenArgs', 'ManagedIdentityArgs']]]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="maxConcurrentTrials")
def max_concurrent_trials(self) -> Optional[pulumi.Input[int]]:
"""
An upper bound on the number of trials performed in parallel.
"""
return pulumi.get(self, "max_concurrent_trials")
@max_concurrent_trials.setter
def max_concurrent_trials(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_trials", value)
@property
@pulumi.getter(name="maxTotalTrials")
def max_total_trials(self) -> Optional[pulumi.Input[int]]:
"""
An upper bound on the number of trials to perform.
"""
return pulumi.get(self, "max_total_trials")
@max_total_trials.setter
def max_total_trials(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_total_trials", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Job priority for scheduling policy. Only applies to AMLCompute.
Private preview feature and only available to users on the allow list.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The asset property dictionary.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tag dictionary. Tags can be added, removed, and updated.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
The total timeout in ISO 8601 format. Only supports duration with precision as low as Minutes.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter
def trial(self) -> Optional[pulumi.Input['TrialComponentArgs']]:
"""
Trial component definition.
"""
return pulumi.get(self, "trial")
@trial.setter
def trial(self, value: Optional[pulumi.Input['TrialComponentArgs']]):
pulumi.set(self, "trial", value)
@pulumi.input_type
class TensorFlowArgs:
def __init__(__self__, *,
distribution_type: pulumi.Input[str],
parameter_server_count: Optional[pulumi.Input[int]] = None,
worker_count: Optional[pulumi.Input[int]] = None):
"""
TensorFlow distribution configuration.
:param pulumi.Input[str] distribution_type: Enum to determine the job distribution type.
Expected value is 'TensorFlow'.
:param pulumi.Input[int] parameter_server_count: Number of parameter server tasks.
:param pulumi.Input[int] worker_count: Number of workers. Overwrites the node count in compute binding.
"""
pulumi.set(__self__, "distribution_type", 'TensorFlow')
if parameter_server_count is not None:
pulumi.set(__self__, "parameter_server_count", parameter_server_count)
if worker_count is not None:
pulumi.set(__self__, "worker_count", worker_count)
@property
@pulumi.getter(name="distributionType")
def distribution_type(self) -> pulumi.Input[str]:
"""
Enum to determine the job distribution type.
Expected value is 'TensorFlow'.
"""
return pulumi.get(self, "distribution_type")
@distribution_type.setter
def distribution_type(self, value: pulumi.Input[str]):
pulumi.set(self, "distribution_type", value)
@property
@pulumi.getter(name="parameterServerCount")
def parameter_server_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of parameter server tasks.
"""
return pulumi.get(self, "parameter_server_count")
@parameter_server_count.setter
def parameter_server_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "parameter_server_count", value)
@property
@pulumi.getter(name="workerCount")
def worker_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of workers. Overwrites the node count in compute binding.
"""
return pulumi.get(self, "worker_count")
@worker_count.setter
def worker_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "worker_count", value)
@pulumi.input_type
class TrialComponentArgs:
def __init__(__self__, *,
command: pulumi.Input[str],
code_id: Optional[pulumi.Input[str]] = None,
distribution: Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
input_data_bindings: Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]] = None,
output_data_bindings: Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Trial component definition.
:param pulumi.Input[str] command: The command to execute on startup of the job. eg. "python train.py"
:param pulumi.Input[str] code_id: ARM resource ID of the code asset.
:param pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']] distribution: Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
:param pulumi.Input[str] environment_id: The ARM resource ID of the Environment specification for the job.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: Environment variables included in the job.
:param pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]] input_data_bindings: Mapping of input data bindings used in the job.
:param pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]] output_data_bindings: Mapping of output data bindings used in the job.
:param pulumi.Input[str] timeout: The max run duration in ISO 8601 format, after which the trial component will be cancelled.
Only supports duration with precision as low as Seconds.
"""
pulumi.set(__self__, "command", command)
if code_id is not None:
pulumi.set(__self__, "code_id", code_id)
if distribution is not None:
pulumi.set(__self__, "distribution", distribution)
if environment_id is not None:
pulumi.set(__self__, "environment_id", environment_id)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if input_data_bindings is not None:
pulumi.set(__self__, "input_data_bindings", input_data_bindings)
if output_data_bindings is not None:
pulumi.set(__self__, "output_data_bindings", output_data_bindings)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter
def command(self) -> pulumi.Input[str]:
"""
The command to execute on startup of the job. eg. "python train.py"
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: pulumi.Input[str]):
pulumi.set(self, "command", value)
@property
@pulumi.getter(name="codeId")
def code_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the code asset.
"""
return pulumi.get(self, "code_id")
@code_id.setter
def code_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "code_id", value)
@property
@pulumi.getter
def distribution(self) -> Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]]:
"""
Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
"""
return pulumi.get(self, "distribution")
@distribution.setter
def distribution(self, value: Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]]):
pulumi.set(self, "distribution", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> Optional[pulumi.Input[str]]:
"""
The ARM resource ID of the Environment specification for the job.
"""
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Environment variables included in the job.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="inputDataBindings")
def input_data_bindings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]]:
"""
Mapping of input data bindings used in the job.
"""
return pulumi.get(self, "input_data_bindings")
@input_data_bindings.setter
def input_data_bindings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]]):
pulumi.set(self, "input_data_bindings", value)
@property
@pulumi.getter(name="outputDataBindings")
def output_data_bindings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]]:
"""
Mapping of output data bindings used in the job.
"""
return pulumi.get(self, "output_data_bindings")
@output_data_bindings.setter
def output_data_bindings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]]):
pulumi.set(self, "output_data_bindings", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
The max run duration in ISO 8601 format, after which the trial component will be cancelled.
Only supports duration with precision as low as Seconds.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class TruncationSelectionPolicyArgs:
def __init__(__self__, *,
policy_type: pulumi.Input[str],
delay_evaluation: Optional[pulumi.Input[int]] = None,
evaluation_interval: Optional[pulumi.Input[int]] = None,
truncation_percentage: Optional[pulumi.Input[int]] = None):
"""
Defines an early termination policy that cancels a given percentage of runs at each evaluation interval.
:param pulumi.Input[str] policy_type:
Expected value is 'TruncationSelection'.
:param pulumi.Input[int] delay_evaluation: Number of intervals by which to delay the first evaluation.
:param pulumi.Input[int] evaluation_interval: Interval (number of runs) between policy evaluations.
:param pulumi.Input[int] truncation_percentage: The percentage of runs to cancel at each evaluation interval.
"""
pulumi.set(__self__, "policy_type", 'TruncationSelection')
if delay_evaluation is not None:
pulumi.set(__self__, "delay_evaluation", delay_evaluation)
if evaluation_interval is not None:
pulumi.set(__self__, "evaluation_interval", evaluation_interval)
if truncation_percentage is not None:
pulumi.set(__self__, "truncation_percentage", truncation_percentage)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Input[str]:
"""
Expected value is 'TruncationSelection'.
"""
return pulumi.get(self, "policy_type")
@policy_type.setter
def policy_type(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_type", value)
@property
@pulumi.getter(name="delayEvaluation")
def delay_evaluation(self) -> Optional[pulumi.Input[int]]:
"""
Number of intervals by which to delay the first evaluation.
"""
return pulumi.get(self, "delay_evaluation")
@delay_evaluation.setter
def delay_evaluation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "delay_evaluation", value)
@property
@pulumi.getter(name="evaluationInterval")
def evaluation_interval(self) -> Optional[pulumi.Input[int]]:
"""
Interval (number of runs) between policy evaluations.
"""
return pulumi.get(self, "evaluation_interval")
@evaluation_interval.setter
def evaluation_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_interval", value)
@property
@pulumi.getter(name="truncationPercentage")
def truncation_percentage(self) -> Optional[pulumi.Input[int]]:
"""
The percentage of runs to cancel at each evaluation interval.
"""
return pulumi.get(self, "truncation_percentage")
@truncation_percentage.setter
def truncation_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "truncation_percentage", value)
@pulumi.input_type
class UserAccountCredentialsArgs:
def __init__(__self__, *,
admin_user_name: pulumi.Input[str],
admin_user_password: Optional[pulumi.Input[str]] = None,
admin_user_ssh_public_key: Optional[pulumi.Input[str]] = None):
"""
Settings for user account that gets created on each on the nodes of a compute.
:param pulumi.Input[str] admin_user_name: Name of the administrator user account which can be used to SSH to nodes.
:param pulumi.Input[str] admin_user_password: Password of the administrator user account.
:param pulumi.Input[str] admin_user_ssh_public_key: SSH public key of the administrator user account.
"""
pulumi.set(__self__, "admin_user_name", admin_user_name)
if admin_user_password is not None:
pulumi.set(__self__, "admin_user_password", admin_user_password)
if admin_user_ssh_public_key is not None:
pulumi.set(__self__, "admin_user_ssh_public_key", admin_user_ssh_public_key)
@property
@pulumi.getter(name="adminUserName")
def admin_user_name(self) -> pulumi.Input[str]:
"""
Name of the administrator user account which can be used to SSH to nodes.
"""
return pulumi.get(self, "admin_user_name")
@admin_user_name.setter
def admin_user_name(self, value: pulumi.Input[str]):
pulumi.set(self, "admin_user_name", value)
@property
@pulumi.getter(name="adminUserPassword")
def admin_user_password(self) -> Optional[pulumi.Input[str]]:
"""
Password of the administrator user account.
"""
return pulumi.get(self, "admin_user_password")
@admin_user_password.setter
def admin_user_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_user_password", value)
@property
@pulumi.getter(name="adminUserSshPublicKey")
def admin_user_ssh_public_key(self) -> Optional[pulumi.Input[str]]:
"""
SSH public key of the administrator user account.
"""
return pulumi.get(self, "admin_user_ssh_public_key")
@admin_user_ssh_public_key.setter
def admin_user_ssh_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_user_ssh_public_key", value)
@pulumi.input_type
class UserAssignedIdentityMetaArgs:
def __init__(__self__, *,
client_id: Optional[pulumi.Input[str]] = None,
principal_id: Optional[pulumi.Input[str]] = None):
"""
User assigned identities associated with a resource.
:param pulumi.Input[str] client_id: Aka application ID, a unique identifier generated by Azure AD that is tied to an application and service principal during its initial provisioning.
:param pulumi.Input[str] principal_id: The object ID of the service principal object for your managed identity that is used to grant role-based access to an Azure resource.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
Aka application ID, a unique identifier generated by Azure AD that is tied to an application and service principal during its initial provisioning.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The object ID of the service principal object for your managed identity that is used to grant role-based access to an Azure resource.
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_id", value)
@pulumi.input_type
class VirtualMachineImageArgs:
def __init__(__self__, *,
id: pulumi.Input[str]):
"""
Virtual Machine image for Windows AML Compute
:param pulumi.Input[str] id: Virtual Machine image path
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
Virtual Machine image path
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@pulumi.input_type
class VirtualMachinePropertiesArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
administrator_account: Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']] = None,
ssh_port: Optional[pulumi.Input[int]] = None,
virtual_machine_size: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] address: Public IP address of the virtual machine.
:param pulumi.Input['VirtualMachineSshCredentialsArgs'] administrator_account: Admin credentials for virtual machine
:param pulumi.Input[int] ssh_port: Port open for ssh connections.
:param pulumi.Input[str] virtual_machine_size: Virtual Machine size
"""
if address is not None:
pulumi.set(__self__, "address", address)
if administrator_account is not None:
pulumi.set(__self__, "administrator_account", administrator_account)
if ssh_port is not None:
pulumi.set(__self__, "ssh_port", ssh_port)
if virtual_machine_size is not None:
pulumi.set(__self__, "virtual_machine_size", virtual_machine_size)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Public IP address of the virtual machine.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="administratorAccount")
def administrator_account(self) -> Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']]:
"""
Admin credentials for virtual machine
"""
return pulumi.get(self, "administrator_account")
@administrator_account.setter
def administrator_account(self, value: Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']]):
pulumi.set(self, "administrator_account", value)
@property
@pulumi.getter(name="sshPort")
def ssh_port(self) -> Optional[pulumi.Input[int]]:
"""
Port open for ssh connections.
"""
return pulumi.get(self, "ssh_port")
@ssh_port.setter
def ssh_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ssh_port", value)
@property
@pulumi.getter(name="virtualMachineSize")
def virtual_machine_size(self) -> Optional[pulumi.Input[str]]:
"""
Virtual Machine size
"""
return pulumi.get(self, "virtual_machine_size")
@virtual_machine_size.setter
def virtual_machine_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_machine_size", value)
@pulumi.input_type
class VirtualMachineSshCredentialsArgs:
def __init__(__self__, *,
password: Optional[pulumi.Input[str]] = None,
private_key_data: Optional[pulumi.Input[str]] = None,
public_key_data: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Admin credentials for virtual machine
:param pulumi.Input[str] password: Password of admin account
:param pulumi.Input[str] private_key_data: Private key data
:param pulumi.Input[str] public_key_data: Public key data
:param pulumi.Input[str] username: Username of admin account
"""
if password is not None:
pulumi.set(__self__, "password", password)
if private_key_data is not None:
pulumi.set(__self__, "private_key_data", private_key_data)
if public_key_data is not None:
pulumi.set(__self__, "public_key_data", public_key_data)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password of admin account
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="privateKeyData")
def private_key_data(self) -> Optional[pulumi.Input[str]]:
"""
Private key data
"""
return pulumi.get(self, "private_key_data")
@private_key_data.setter
def private_key_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_key_data", value)
@property
@pulumi.getter(name="publicKeyData")
def public_key_data(self) -> Optional[pulumi.Input[str]]:
"""
Public key data
"""
return pulumi.get(self, "public_key_data")
@public_key_data.setter
def public_key_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key_data", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username of admin account
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class VirtualMachineArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['VirtualMachinePropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A Machine Learning compute based on Azure Virtual Machines.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'VirtualMachine'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'VirtualMachine')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'VirtualMachine'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['VirtualMachinePropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['VirtualMachinePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
| 40.287734
| 805
| 0.657583
| 40,871
| 359,004
| 5.598248
| 0.029287
| 0.110959
| 0.103135
| 0.053941
| 0.86878
| 0.785124
| 0.719601
| 0.651137
| 0.619
| 0.58571
| 0
| 0.000558
| 0.231206
| 359,004
| 8,910
| 806
| 40.292256
| 0.828449
| 0.233282
| 0
| 0.582881
| 1
| 0
| 0.134692
| 0.048971
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206406
| false
| 0.004683
| 0.001124
| 0.002622
| 0.322158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6f4c79352b012425ebfce81eac1af519e62161ad
| 72
|
py
|
Python
|
src/constants.py
|
Rebeljah/PyChess
|
3c29a9c473c7fd6fa1f8d53ad22dd84b6c7621af
|
[
"MIT"
] | 2
|
2021-07-10T06:31:56.000Z
|
2021-07-10T06:31:58.000Z
|
src/constants.py
|
Rebeljah/PyChess
|
3c29a9c473c7fd6fa1f8d53ad22dd84b6c7621af
|
[
"MIT"
] | null | null | null |
src/constants.py
|
Rebeljah/PyChess
|
3c29a9c473c7fd6fa1f8d53ad22dd84b6c7621af
|
[
"MIT"
] | null | null | null |
class Color:
DARK = (92, 88, 76,255)
LIGHT = (255, 245, 214,255)
| 24
| 31
| 0.555556
| 12
| 72
| 3.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.396226
| 0.263889
| 72
| 3
| 31
| 24
| 0.358491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6f4e0e53fc5482b2fbd023a076fcd7118489c475
| 22
|
py
|
Python
|
tccli/services/hcm/v20181106/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/hcm/v20181106/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/hcm/v20181106/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
version = "2018-11-06"
| 22
| 22
| 0.681818
| 4
| 22
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0.090909
| 22
| 1
| 22
| 22
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6f551be491ce4adf41318ee09e77e1f90d1a9662
| 142
|
py
|
Python
|
models/__init__.py
|
easthorse/brain-tumor-segmentation-based-on-group-convolution
|
98547a4c89cd96c85045e70b46f89cfdb74edfca
|
[
"OLDAP-2.3"
] | 6
|
2020-08-30T15:48:54.000Z
|
2021-04-20T02:46:43.000Z
|
models/__init__.py
|
easthorse/brain-tumor-segmentation-based-on-group-convolution
|
98547a4c89cd96c85045e70b46f89cfdb74edfca
|
[
"OLDAP-2.3"
] | null | null | null |
models/__init__.py
|
easthorse/brain-tumor-segmentation-based-on-group-convolution
|
98547a4c89cd96c85045e70b46f89cfdb74edfca
|
[
"OLDAP-2.3"
] | null | null | null |
'''If you want to use either of the following networks, comment out the other'''
from .MFSNet_MixLoss import MFSNet
#from .unet import Unet
| 35.5
| 80
| 0.760563
| 23
| 142
| 4.652174
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 142
| 3
| 81
| 47.333333
| 0.90678
| 0.697183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4896182264486240033cac615b239e13ecc94199
| 895
|
py
|
Python
|
applicants/admin.py
|
ForumDev/djangocms-applicants
|
2feb5756dabd535c56ef4ea2f9e86af107fc36bd
|
[
"MIT"
] | null | null | null |
applicants/admin.py
|
ForumDev/djangocms-applicants
|
2feb5756dabd535c56ef4ea2f9e86af107fc36bd
|
[
"MIT"
] | null | null | null |
applicants/admin.py
|
ForumDev/djangocms-applicants
|
2feb5756dabd535c56ef4ea2f9e86af107fc36bd
|
[
"MIT"
] | 1
|
2020-10-12T06:33:40.000Z
|
2020-10-12T06:33:40.000Z
|
from django.contrib import admin
from .models import Applicant, Reference, Attachment, Event, Score, Note
from cms.admin.placeholderadmin import PlaceholderAdminMixin
# Register your models here.
class ApplicantsAdmin(PlaceholderAdminMixin, admin.ModelAdmin):
pass
admin.site.register(Applicant, ApplicantsAdmin)
class ReferencesAdmin(PlaceholderAdminMixin, admin.ModelAdmin):
pass
admin.site.register(Reference, ReferencesAdmin)
class AttachmentAdmin(PlaceholderAdminMixin, admin.ModelAdmin):
pass
admin.site.register(Attachment, AttachmentAdmin)
class EventAdmin(PlaceholderAdminMixin, admin.ModelAdmin):
pass
admin.site.register(Event, EventAdmin)
class ScoreAdmin(PlaceholderAdminMixin, admin.ModelAdmin):
pass
admin.site.register(Score, ScoreAdmin)
class NoteAdmin(PlaceholderAdminMixin, admin.ModelAdmin):
pass
admin.site.register(Note, NoteAdmin)
| 26.323529
| 72
| 0.810056
| 90
| 895
| 8.055556
| 0.311111
| 0.215172
| 0.297931
| 0.331034
| 0.471724
| 0.471724
| 0.471724
| 0
| 0
| 0
| 0
| 0
| 0.110615
| 895
| 34
| 73
| 26.323529
| 0.910804
| 0.02905
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.285714
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
48995f96af70f44a0749d434b25434d5064f69db
| 3,716
|
py
|
Python
|
tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | 1
|
2021-02-05T13:19:58.000Z
|
2021-02-05T13:19:58.000Z
|
tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | 1
|
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | null | null | null |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_array_equal, assert_allclose, \
run_module_suite
from scipy.interpolate import griddata
class TestGriddata(object):
def test_fill_value(self):
x = [(0,0), (0,1), (1,0)]
y = [1, 2, 3]
yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
assert_array_equal(yi, [-1., -1, 1])
yi = griddata(x, y, [(1,1), (1,2), (0,0)])
assert_array_equal(yi, [np.nan, np.nan, 1])
def test_alternative_call(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+ np.array([0,1])[None,:])
for method in ('nearest', 'linear', 'cubic'):
yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method)
assert_allclose(y, yi, atol=1e-14, err_msg=method)
def test_multivalue_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+ np.array([0,1])[None,:])
for method in ('nearest', 'linear', 'cubic'):
yi = griddata(x, y, x, method=method)
assert_allclose(y, yi, atol=1e-14, err_msg=method)
def test_multipoint_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
for method in ('nearest', 'linear', 'cubic'):
yi = griddata(x, y, xi, method=method)
assert_equal(yi.shape, (5, 3), err_msg=method)
assert_allclose(yi, np.tile(y[:,None], (1, 3)),
atol=1e-14, err_msg=method)
def test_complex_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 2j*y[::-1]
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
for method in ('nearest', 'linear', 'cubic'):
yi = griddata(x, y, xi, method=method)
assert_equal(yi.shape, (5, 3), err_msg=method)
assert_allclose(yi, np.tile(y[:,None], (1, 3)),
atol=1e-14, err_msg=method)
def test_1d(self):
x = np.array([1, 2.5, 3, 4.5, 5, 6])
y = np.array([1, 2, 0, 3.9, 2, 1])
for method in ('nearest', 'linear', 'cubic'):
assert_allclose(griddata(x, y, x, method=method), y,
err_msg=method, atol=1e-14)
assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
err_msg=method, atol=1e-14)
assert_allclose(griddata((x,), y, (x,), method=method), y,
err_msg=method, atol=1e-14)
def test_1d_unsorted(self):
x = np.array([2.5, 1, 4.5, 5, 6, 3])
y = np.array([1, 2, 0, 3.9, 2, 1])
for method in ('nearest', 'linear', 'cubic'):
assert_allclose(griddata(x, y, x, method=method), y,
err_msg=method, atol=1e-10)
assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
err_msg=method, atol=1e-10)
assert_allclose(griddata((x,), y, (x,), method=method), y,
err_msg=method, atol=1e-10)
if __name__ == "__main__":
run_module_suite()
| 38.309278
| 79
| 0.498654
| 569
| 3,716
| 3.144112
| 0.130053
| 0.026831
| 0.040246
| 0.044718
| 0.755729
| 0.755729
| 0.75517
| 0.75517
| 0.75517
| 0.75517
| 0
| 0.076864
| 0.303283
| 3,716
| 96
| 80
| 38.708333
| 0.614137
| 0
| 0
| 0.638889
| 0
| 0
| 0.031216
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 1
| 0.097222
| false
| 0
| 0.055556
| 0
| 0.166667
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
48d62846a2635a6120ad36ede124e60e891ba967
| 171
|
py
|
Python
|
nn/__init__.py
|
zsquaredz/transformer-summarization
|
abab57e7cafa6d11a49804e1bf2e33863ea940b2
|
[
"MIT"
] | 17
|
2019-01-24T10:17:57.000Z
|
2022-02-16T20:00:50.000Z
|
nn/__init__.py
|
zsquaredz/transformer-summarization
|
abab57e7cafa6d11a49804e1bf2e33863ea940b2
|
[
"MIT"
] | 2
|
2019-11-07T05:03:25.000Z
|
2020-02-05T11:07:57.000Z
|
nn/__init__.py
|
zsquaredz/transformer-summarization
|
abab57e7cafa6d11a49804e1bf2e33863ea940b2
|
[
"MIT"
] | 6
|
2019-04-29T06:46:22.000Z
|
2021-08-16T02:28:08.000Z
|
from .modules import Transformer, TransformerEncoderLayer, TransformerDecoderLayer, ScaledDotProductAttention, \
MultiHeadAttention, PositionalEmbedding, PositionWise
| 57
| 112
| 0.865497
| 10
| 171
| 14.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 171
| 2
| 113
| 85.5
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
48db71e6be24ca344ad555d374ba78db48e7dcd0
| 87
|
py
|
Python
|
verce-hpc-pe/src/__init__.py
|
KNMI/VERCE
|
c2f9eaa70ecf1621a218afc5d73ca1304ca8ed36
|
[
"MIT"
] | 2
|
2017-09-07T04:33:18.000Z
|
2019-01-07T13:32:15.000Z
|
verce-hpc-pe/src/__init__.py
|
KNMI/VERCE
|
c2f9eaa70ecf1621a218afc5d73ca1304ca8ed36
|
[
"MIT"
] | 2
|
2016-10-06T13:07:05.000Z
|
2017-12-20T09:47:08.000Z
|
verce-hpc-pe/src/__init__.py
|
KNMI/VERCE
|
c2f9eaa70ecf1621a218afc5d73ca1304ca8ed36
|
[
"MIT"
] | 4
|
2016-04-25T22:15:40.000Z
|
2017-12-18T14:40:58.000Z
|
from utm.conversion import to_latlon, from_latlon
from utm.error import OutOfRangeError
| 43.5
| 49
| 0.873563
| 13
| 87
| 5.692308
| 0.615385
| 0.189189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091954
| 87
| 2
| 50
| 43.5
| 0.936709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
48ed0bb5670886177888e6224b20d72bbb486595
| 113
|
py
|
Python
|
celloutline/__init__.py
|
cdw/celloutline
|
35d4690233dcc48dd0adc9d032e3f04bb4073bc8
|
[
"MIT"
] | null | null | null |
celloutline/__init__.py
|
cdw/celloutline
|
35d4690233dcc48dd0adc9d032e3f04bb4073bc8
|
[
"MIT"
] | null | null | null |
celloutline/__init__.py
|
cdw/celloutline
|
35d4690233dcc48dd0adc9d032e3f04bb4073bc8
|
[
"MIT"
] | null | null | null |
from .version import __version__
from . import conversion
from . import representation
__all__ = ('UnitSpiral')
| 18.833333
| 32
| 0.787611
| 12
| 113
| 6.75
| 0.583333
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141593
| 113
| 5
| 33
| 22.6
| 0.835052
| 0
| 0
| 0
| 0
| 0
| 0.088496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d2a0d10d77494577ec73c21ea229e19b3c11bb6e
| 218
|
py
|
Python
|
twisted/test/myrebuilder2.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 267
|
2015-03-22T15:23:48.000Z
|
2022-03-05T21:57:34.000Z
|
twisted/test/myrebuilder2.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 133
|
2015-03-21T15:13:43.000Z
|
2021-12-11T23:37:58.000Z
|
twisted/test/myrebuilder2.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 119
|
2015-04-28T16:07:10.000Z
|
2022-03-18T03:49:48.000Z
|
class A:
def a(self):
return 'b'
try:
object
except NameError:
pass
else:
class B(A, object):
def b(self):
return 'c'
class Inherit(A):
def a(self):
return 'd'
| 12.823529
| 23
| 0.5
| 30
| 218
| 3.633333
| 0.5
| 0.275229
| 0.091743
| 0.165138
| 0.275229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.380734
| 218
| 16
| 24
| 13.625
| 0.807407
| 0
| 0
| 0.142857
| 0
| 0
| 0.013825
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0.071429
| 0
| 0.214286
| 0.642857
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 5
|
d2ac1877a2059733771cfd2db7cebca0a7ef5001
| 58
|
py
|
Python
|
kivymd/uix/selectioncontrol/__init__.py
|
AnEx07/KivyMD
|
e4004a570ad3f1874b3540cc1b0c243b3037bba8
|
[
"MIT"
] | null | null | null |
kivymd/uix/selectioncontrol/__init__.py
|
AnEx07/KivyMD
|
e4004a570ad3f1874b3540cc1b0c243b3037bba8
|
[
"MIT"
] | null | null | null |
kivymd/uix/selectioncontrol/__init__.py
|
AnEx07/KivyMD
|
e4004a570ad3f1874b3540cc1b0c243b3037bba8
|
[
"MIT"
] | null | null | null |
from .selectioncontrol import MDCheckbox, MDSwitch, Thumb
| 29
| 57
| 0.844828
| 6
| 58
| 8.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 58
| 1
| 58
| 58
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d2e1c772d81558e84af41a4034815be5e86cf819
| 98
|
py
|
Python
|
python/tuples.py
|
matheuskiser/pdx_code_guild
|
49a5c62fb468253eb4d9a1fb11166df79bb10873
|
[
"MIT"
] | null | null | null |
python/tuples.py
|
matheuskiser/pdx_code_guild
|
49a5c62fb468253eb4d9a1fb11166df79bb10873
|
[
"MIT"
] | null | null | null |
python/tuples.py
|
matheuskiser/pdx_code_guild
|
49a5c62fb468253eb4d9a1fb11166df79bb10873
|
[
"MIT"
] | null | null | null |
tupel = ("thing 1", "thing 2", "thing 3")
print tupel
print tupel[0]
print tupel[1]
print tupel[2]
| 19.6
| 41
| 0.673469
| 18
| 98
| 3.666667
| 0.388889
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072289
| 0.153061
| 98
| 5
| 42
| 19.6
| 0.722892
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.8
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
96014cdf6cce00da23e52273cd887d28ef93a247
| 19
|
py
|
Python
|
quantstats/version.py
|
lmifflen/quantstats
|
b1e00b16ae8fea42b46ad06f61b8fab72c32c5fe
|
[
"Apache-2.0"
] | null | null | null |
quantstats/version.py
|
lmifflen/quantstats
|
b1e00b16ae8fea42b46ad06f61b8fab72c32c5fe
|
[
"Apache-2.0"
] | null | null | null |
quantstats/version.py
|
lmifflen/quantstats
|
b1e00b16ae8fea42b46ad06f61b8fab72c32c5fe
|
[
"Apache-2.0"
] | 1
|
2022-01-23T19:21:19.000Z
|
2022-01-23T19:21:19.000Z
|
version = "0.0.48"
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
960df71cc14ae8e691395b3d7790e2c356e61545
| 4,244
|
py
|
Python
|
data/typing/pandas.core.dtypes.dtypes.py
|
pydata-apis/python-api-record
|
684cffbbb6dc6e81f9de4e02619c8b0ebc557b2b
|
[
"MIT"
] | 67
|
2020-08-17T11:53:26.000Z
|
2021-11-08T20:16:06.000Z
|
data/typing/pandas.core.dtypes.dtypes.py
|
data-apis/python-record-api
|
684cffbbb6dc6e81f9de4e02619c8b0ebc557b2b
|
[
"MIT"
] | 36
|
2020-08-17T11:09:51.000Z
|
2021-12-15T18:09:47.000Z
|
data/typing/pandas.core.dtypes.dtypes.py
|
pydata-apis/python-api-record
|
684cffbbb6dc6e81f9de4e02619c8b0ebc557b2b
|
[
"MIT"
] | 7
|
2020-08-19T05:06:47.000Z
|
2020-11-04T05:10:38.000Z
|
from typing import *
class CategoricalDtype:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 10
categories: object
# usage.dask: 1
# usage.sklearn: 6
kind: object
# usage.sklearn: 1
name: object
# usage.dask: 4
ordered: object
@overload
def __eq__(self, _0: Literal["O"], /):
"""
usage.xarray: 1
"""
...
@overload
def __eq__(self, _0: Union[numpy.dtype, type], /):
"""
usage.pandas: 19
"""
...
@overload
def __eq__(self, _0: Literal["category"], /):
"""
usage.dask: 10
"""
...
@overload
def __eq__(self, _0: Type[object], /):
"""
usage.dask: 3
"""
...
@overload
def __eq__(self, _0: pandas.core.dtypes.dtypes.CategoricalDtype, /):
"""
usage.dask: 4
usage.sklearn: 6
"""
...
def __eq__(
self,
_0: Union[
pandas.core.dtypes.dtypes.CategoricalDtype,
numpy.dtype,
Literal["category", "O"],
type,
],
/,
):
"""
usage.dask: 17
usage.pandas: 19
usage.sklearn: 6
usage.xarray: 1
"""
...
@overload
def __ne__(self, _0: numpy.dtype, /):
"""
usage.pandas: 1
"""
...
@overload
def __ne__(self, _0: pandas.core.dtypes.dtypes.CategoricalDtype, /):
"""
usage.dask: 4
usage.modin: 2
"""
...
@overload
def __ne__(self, _0: Literal["category"], /):
"""
usage.dask: 1
"""
...
def __ne__(
self,
_0: Union[
Literal["category"], pandas.core.dtypes.dtypes.CategoricalDtype, numpy.dtype
],
/,
):
"""
usage.dask: 5
usage.modin: 2
usage.pandas: 1
"""
...
class DatetimeTZDtype:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 1
kind: object
# usage.dask: 1
name: object
# usage.dask: 1
tz: object
@overload
def __eq__(self, _0: Literal["O"], /):
"""
usage.xarray: 1
"""
...
@overload
def __eq__(self, _0: Union[numpy.dtype, type], /):
"""
usage.pandas: 24
"""
...
@overload
def __eq__(self, _0: Type[object], /):
"""
usage.dask: 1
"""
...
@overload
def __eq__(self, _0: pandas.core.dtypes.dtypes.DatetimeTZDtype, /):
"""
usage.dask: 2
"""
...
def __eq__(
self,
_0: Union[
type, pandas.core.dtypes.dtypes.DatetimeTZDtype, numpy.dtype, Literal["O"]
],
/,
):
"""
usage.dask: 3
usage.pandas: 24
usage.xarray: 1
"""
...
def __ne__(self, _0: numpy.dtype, /):
"""
usage.pandas: 1
"""
...
class IntervalDtype:
@overload
def __eq__(self, _0: Literal["O"], /):
"""
usage.xarray: 1
"""
...
@overload
def __eq__(self, _0: Union[numpy.dtype, type], /):
"""
usage.pandas: 13
"""
...
def __eq__(self, _0: Union[type, numpy.dtype, Literal["O"]], /):
"""
usage.pandas: 13
usage.xarray: 1
"""
...
def __ne__(self, _0: numpy.dtype, /):
"""
usage.pandas: 1
"""
...
class PeriodDtype:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 1
kind: object
# usage.dask: 1
name: object
@overload
def __eq__(self, _0: Literal["O"], /):
"""
usage.xarray: 1
"""
...
@overload
def __eq__(self, _0: Union[numpy.dtype, type], /):
"""
usage.pandas: 22
"""
...
def __eq__(self, _0: Union[type, numpy.dtype, Literal["O"]], /):
"""
usage.pandas: 22
usage.xarray: 1
"""
...
def __ne__(self, _0: numpy.dtype, /):
"""
usage.pandas: 1
"""
...
| 17.53719
| 88
| 0.439444
| 394
| 4,244
| 4.398477
| 0.124365
| 0.069244
| 0.088286
| 0.098096
| 0.814195
| 0.725332
| 0.660127
| 0.604732
| 0.582227
| 0.490479
| 0
| 0.032897
| 0.405514
| 4,244
| 241
| 89
| 17.609959
| 0.653983
| 0.174128
| 0
| 0.803922
| 0
| 0
| 0.014025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.235294
| true
| 0
| 0.009804
| 0
| 0.401961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
825284285ba67a31722162aa73e4afe59d40a96c
| 662
|
py
|
Python
|
cloudmersive_validate_api_client/api/__init__.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | null | null | null |
cloudmersive_validate_api_client/api/__init__.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | null | null | null |
cloudmersive_validate_api_client/api/__init__.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from cloudmersive_validate_api_client.api.address_api import AddressApi
from cloudmersive_validate_api_client.api.domain_api import DomainApi
from cloudmersive_validate_api_client.api.email_api import EmailApi
from cloudmersive_validate_api_client.api.ip_address_api import IPAddressApi
from cloudmersive_validate_api_client.api.name_api import NameApi
from cloudmersive_validate_api_client.api.phone_number_api import PhoneNumberApi
from cloudmersive_validate_api_client.api.user_agent_api import UserAgentApi
from cloudmersive_validate_api_client.api.vat_api import VatApi
| 47.285714
| 80
| 0.900302
| 95
| 662
| 5.852632
| 0.326316
| 0.230216
| 0.345324
| 0.388489
| 0.517986
| 0.517986
| 0
| 0
| 0
| 0
| 0
| 0.001626
| 0.070997
| 662
| 13
| 81
| 50.923077
| 0.902439
| 0.061934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8269b425fe1917c25356c227b806edb690a0a3d1
| 50
|
py
|
Python
|
app/__init__.py
|
powersemmi/notes
|
79346aa3e660c33016ecb30ee2cdcf25ff0e129f
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
powersemmi/notes
|
79346aa3e660c33016ecb30ee2cdcf25ff0e129f
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
powersemmi/notes
|
79346aa3e660c33016ecb30ee2cdcf25ff0e129f
|
[
"MIT"
] | null | null | null |
from .shema.models import db
from .api import api
| 16.666667
| 28
| 0.78
| 9
| 50
| 4.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 29
| 25
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8295f6ff99f4c8318545dc2eb8dadd2a73397c6c
| 412
|
py
|
Python
|
great_expectations/rule_based_profiler/domain_builder/__init__.py
|
romalee/great_expectations
|
c2c5df42f878612d25aa76ee3e6d4e3852de797e
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/rule_based_profiler/domain_builder/__init__.py
|
romalee/great_expectations
|
c2c5df42f878612d25aa76ee3e6d4e3852de797e
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/rule_based_profiler/domain_builder/__init__.py
|
romalee/great_expectations
|
c2c5df42f878612d25aa76ee3e6d4e3852de797e
|
[
"Apache-2.0"
] | null | null | null |
from .active_batch_table_domain_builder import ActiveBatchTableDomainBuilder
from .column_domain_builder import ColumnDomainBuilder
from .domain_builder import DomainBuilder
from .inferred_semantic_domain_type import InferredSemanticDomainType
from .simple_column_suffix_domain_builder import SimpleColumnSuffixDomainBuilder
from .simple_semantic_type_domain_builder import SimpleSemanticTypeColumnDomainBuilder
| 58.857143
| 86
| 0.927184
| 42
| 412
| 8.666667
| 0.452381
| 0.178571
| 0.260989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058252
| 412
| 6
| 87
| 68.666667
| 0.938144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
829d3b977acbd49b1d283eed8daa805095eda30d
| 66
|
py
|
Python
|
kivymd/uix/refreshlayout/__init__.py
|
marvelous-benji/KivyMD
|
4ab8dd339902597eaa9f8a4f9a80d8a6eb7d6053
|
[
"MIT"
] | 1,111
|
2015-07-15T02:31:09.000Z
|
2022-03-29T17:22:02.000Z
|
kivymd/uix/refreshlayout/__init__.py
|
marvelous-benji/KivyMD
|
4ab8dd339902597eaa9f8a4f9a80d8a6eb7d6053
|
[
"MIT"
] | 706
|
2015-06-10T22:24:13.000Z
|
2022-03-31T16:22:39.000Z
|
kivymd/uix/refreshlayout/__init__.py
|
marvelous-benji/KivyMD
|
4ab8dd339902597eaa9f8a4f9a80d8a6eb7d6053
|
[
"MIT"
] | 561
|
2015-07-15T04:57:23.000Z
|
2022-03-31T17:14:31.000Z
|
from .refreshlayout import MDScrollViewRefreshLayout # NOQA F401
| 33
| 65
| 0.848485
| 6
| 66
| 9.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 0.121212
| 66
| 1
| 66
| 66
| 0.913793
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
82abb265582d316740596d4f98725db51caca2be
| 136
|
py
|
Python
|
dace/libraries/standard/nodes/__init__.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | 1
|
2021-07-26T07:58:06.000Z
|
2021-07-26T07:58:06.000Z
|
dace/libraries/standard/nodes/__init__.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | null | null | null |
dace/libraries/standard/nodes/__init__.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | 1
|
2021-03-04T13:01:48.000Z
|
2021-03-04T13:01:48.000Z
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from .code import CodeLibraryNode
from .reduce import Reduce
| 45.333333
| 75
| 0.816176
| 20
| 136
| 5.55
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068376
| 0.139706
| 136
| 3
| 76
| 45.333333
| 0.880342
| 0.536765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
82b43050cc9b418d7cb8096e36267bc183eb3fc0
| 218
|
py
|
Python
|
master/admin.py
|
oteejay/lms
|
be351c8ec7aee1f81dede6fcf4292c1ecad31c60
|
[
"MIT"
] | null | null | null |
master/admin.py
|
oteejay/lms
|
be351c8ec7aee1f81dede6fcf4292c1ecad31c60
|
[
"MIT"
] | 11
|
2020-06-05T22:33:23.000Z
|
2022-03-11T23:56:46.000Z
|
master/admin.py
|
oteejay/lms
|
be351c8ec7aee1f81dede6fcf4292c1ecad31c60
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Master, MasterPlant, MasterSupplier
# Register your models here.
admin.site.register(Master)
admin.site.register(MasterPlant)
admin.site.register(MasterSupplier)
| 21.8
| 55
| 0.821101
| 27
| 218
| 6.62963
| 0.481481
| 0.150838
| 0.284916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09633
| 218
| 9
| 56
| 24.222222
| 0.908629
| 0.119266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
82e36cdfc55b3d6b9a33db54ed38fe926758c2e1
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/clikit/api/args/format/args_format_builder.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/clikit/api/args/format/args_format_builder.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/clikit/api/args/format/args_format_builder.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/55/a2/65/efd9c7a5d9eacfa7e6e9e266b8200936d0648c328df1b581abcf65290c
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395833
| 0
| 96
| 1
| 96
| 96
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7daafb59e6c87c3569d31efe164fbbc16543aaa2
| 118
|
py
|
Python
|
tests/test_resources/hello_world.py
|
yonromai/snooze
|
6183c759a59d59b2fe6f6ba6f54cfa422a3ecb6e
|
[
"MIT"
] | null | null | null |
tests/test_resources/hello_world.py
|
yonromai/snooze
|
6183c759a59d59b2fe6f6ba6f54cfa422a3ecb6e
|
[
"MIT"
] | 8
|
2020-04-14T00:08:30.000Z
|
2021-06-02T01:28:26.000Z
|
tests/test_resources/hello_world.py
|
yonromai/snooze
|
6183c759a59d59b2fe6f6ba6f54cfa422a3ecb6e
|
[
"MIT"
] | null | null | null |
# src: https://www.programiz.com/python-programming/examples/hello-world
print("Hello, world!") # snooze: 1900-01-01
| 39.333333
| 72
| 0.737288
| 17
| 118
| 5.117647
| 0.823529
| 0.229885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073395
| 0.076271
| 118
| 2
| 73
| 59
| 0.724771
| 0.754237
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
7dbaeebf7ef30c76b44dc49deb6f141b7527d7ea
| 76
|
py
|
Python
|
graphene_sqlalchemy_auto/__init__.py
|
HJewkes/graphene-sqlalchemy-auto
|
89b136586d7b0c5e46d38b5b098104e3979eca6d
|
[
"MIT"
] | 9
|
2020-06-09T14:54:25.000Z
|
2022-03-10T18:12:13.000Z
|
graphene_sqlalchemy_auto/__init__.py
|
HJewkes/graphene-sqlalchemy-auto
|
89b136586d7b0c5e46d38b5b098104e3979eca6d
|
[
"MIT"
] | 5
|
2020-06-15T22:42:43.000Z
|
2021-11-11T02:04:35.000Z
|
graphene_sqlalchemy_auto/__init__.py
|
HJewkes/graphene-sqlalchemy-auto
|
89b136586d7b0c5e46d38b5b098104e3979eca6d
|
[
"MIT"
] | 7
|
2020-06-09T15:16:50.000Z
|
2022-03-01T15:46:43.000Z
|
from .mutation import MutationObjectType
from .query import QueryObjectType
| 25.333333
| 40
| 0.868421
| 8
| 76
| 8.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 41
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7dc720fd8aa931080074ea5bde8b54dd13abab73
| 868
|
py
|
Python
|
sync_ends/end_point.py
|
nirav1997/sync_ends
|
04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb
|
[
"MIT"
] | null | null | null |
sync_ends/end_point.py
|
nirav1997/sync_ends
|
04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb
|
[
"MIT"
] | null | null | null |
sync_ends/end_point.py
|
nirav1997/sync_ends
|
04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb
|
[
"MIT"
] | null | null | null |
class EndPoint:
def __init__(self, end_point_json):
self.id = end_point_json["_postman_id"]
self.name = end_point_json["name"]
self.authentication = end_point_json["request"].get("auth")
self.method = end_point_json["request"]["method"]
self.header = end_point_json["request"].get("header")
self.url = end_point_json["request"]["url"]["raw"]
self.query_parameters = end_point_json["request"]["url"].get("query")
def get_id(self):
return self.id
def get_name(self):
return self.name
def get_authentication(self):
return self.authentication
def get_method(self):
return self.method
def get_header(self):
return self.header
def get_url(self):
return self.url
def get_query_parameters(self):
return self.query_parameters
| 28
| 77
| 0.641705
| 113
| 868
| 4.646018
| 0.19469
| 0.121905
| 0.182857
| 0.180952
| 0.167619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236175
| 868
| 30
| 78
| 28.933333
| 0.791855
| 0
| 0
| 0
| 0
| 0
| 0.092166
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.347826
| false
| 0
| 0
| 0.304348
| 0.695652
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7de9275421236fc66ee829f3ef4804422068baa4
| 168
|
py
|
Python
|
py_pdf_term/analysis/_analysis/concats/__init__.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | null | null | null |
py_pdf_term/analysis/_analysis/concats/__init__.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | 1
|
2021-08-02T13:02:12.000Z
|
2021-08-02T13:02:12.000Z
|
py_pdf_term/analysis/_analysis/concats/__init__.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | null | null | null |
from .lr import DomainLeftRightFrequency, TermLeftRightFrequencyAnalyzer
# isort: unique-list
__all__ = ["DomainLeftRightFrequency", "TermLeftRightFrequencyAnalyzer"]
| 33.6
| 72
| 0.845238
| 11
| 168
| 12.545455
| 0.818182
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077381
| 168
| 4
| 73
| 42
| 0.890323
| 0.107143
| 0
| 0
| 0
| 0
| 0.364865
| 0.364865
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7decd9865ba0df893b021511086b6fb31591b17c
| 184
|
py
|
Python
|
CuConnect/pims/uims_api_test.py
|
Shreyans13/Xenial-Xerus
|
6a42886eb6f882f9772689ea40c8e7dab75e678a
|
[
"MIT"
] | null | null | null |
CuConnect/pims/uims_api_test.py
|
Shreyans13/Xenial-Xerus
|
6a42886eb6f882f9772689ea40c8e7dab75e678a
|
[
"MIT"
] | null | null | null |
CuConnect/pims/uims_api_test.py
|
Shreyans13/Xenial-Xerus
|
6a42886eb6f882f9772689ea40c8e7dab75e678a
|
[
"MIT"
] | null | null | null |
from uims_api import SessionUIMS
import os
new_acc = SessionUIMS(os.getenv("UIMS_UID"), os.getenv("UIMS_PASS"))
# with open("marks.json", "w") as file:
# file.write(new_acc.marks)
| 30.666667
| 68
| 0.722826
| 30
| 184
| 4.266667
| 0.633333
| 0.09375
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 184
| 6
| 69
| 30.666667
| 0.790123
| 0.36413
| 0
| 0
| 0
| 0
| 0.147826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
7df0a8b05fcabfe4bf248e74f936760b6ceab256
| 19,978
|
py
|
Python
|
tests/test_hvactemplatesystemunitaryheatpumpairtoair.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 19
|
2015-12-08T23:33:51.000Z
|
2022-01-31T04:41:10.000Z
|
tests/test_hvactemplatesystemunitaryheatpumpairtoair.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 2
|
2019-10-04T10:57:00.000Z
|
2021-10-01T06:46:17.000Z
|
tests/test_hvactemplatesystemunitaryheatpumpairtoair.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 7
|
2015-11-04T02:25:01.000Z
|
2021-12-08T03:14:28.000Z
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_templates import HvactemplateSystemUnitaryHeatPumpAirToAir
log = logging.getLogger(__name__)
class TestHvactemplateSystemUnitaryHeatPumpAirToAir(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_hvactemplatesystemunitaryheatpumpairtoair(self):
pyidf.validation_level = ValidationLevel.error
obj = HvactemplateSystemUnitaryHeatPumpAirToAir()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_system_availability_schedule_name = "object-list|System Availability Schedule Name"
obj.system_availability_schedule_name = var_system_availability_schedule_name
# object-list
var_control_zone_or_thermostat_location_name = "object-list|Control Zone or Thermostat Location Name"
obj.control_zone_or_thermostat_location_name = var_control_zone_or_thermostat_location_name
# real
var_cooling_supply_air_flow_rate = 0.0001
obj.cooling_supply_air_flow_rate = var_cooling_supply_air_flow_rate
# real
var_heating_supply_air_flow_rate = 0.0001
obj.heating_supply_air_flow_rate = var_heating_supply_air_flow_rate
# real
var_no_load_supply_air_flow_rate = 0.0
obj.no_load_supply_air_flow_rate = var_no_load_supply_air_flow_rate
# object-list
var_supply_fan_operating_mode_schedule_name = "object-list|Supply Fan Operating Mode Schedule Name"
obj.supply_fan_operating_mode_schedule_name = var_supply_fan_operating_mode_schedule_name
# alpha
var_supply_fan_placement = "BlowThrough"
obj.supply_fan_placement = var_supply_fan_placement
# real
var_supply_fan_total_efficiency = 0.50005
obj.supply_fan_total_efficiency = var_supply_fan_total_efficiency
# real
var_supply_fan_delta_pressure = 0.0
obj.supply_fan_delta_pressure = var_supply_fan_delta_pressure
# real
var_supply_fan_motor_efficiency = 0.50005
obj.supply_fan_motor_efficiency = var_supply_fan_motor_efficiency
# real
var_supply_fan_motor_in_air_stream_fraction = 0.5
obj.supply_fan_motor_in_air_stream_fraction = var_supply_fan_motor_in_air_stream_fraction
# alpha
var_cooling_coil_type = "SingleSpeedDX"
obj.cooling_coil_type = var_cooling_coil_type
# object-list
var_cooling_coil_availability_schedule_name = "object-list|Cooling Coil Availability Schedule Name"
obj.cooling_coil_availability_schedule_name = var_cooling_coil_availability_schedule_name
# real
var_cooling_design_supply_air_temperature = 15.15
obj.cooling_design_supply_air_temperature = var_cooling_design_supply_air_temperature
# real
var_cooling_coil_gross_rated_total_capacity = 16.16
obj.cooling_coil_gross_rated_total_capacity = var_cooling_coil_gross_rated_total_capacity
# real
var_cooling_coil_gross_rated_sensible_heat_ratio = 0.75
obj.cooling_coil_gross_rated_sensible_heat_ratio = var_cooling_coil_gross_rated_sensible_heat_ratio
# real
var_cooling_coil_gross_rated_cop = 0.0001
obj.cooling_coil_gross_rated_cop = var_cooling_coil_gross_rated_cop
# alpha
var_heat_pump_heating_coil_type = "SingleSpeedDXHeatPump"
obj.heat_pump_heating_coil_type = var_heat_pump_heating_coil_type
# object-list
var_heat_pump_heating_coil_availability_schedule_name = "object-list|Heat Pump Heating Coil Availability Schedule Name"
obj.heat_pump_heating_coil_availability_schedule_name = var_heat_pump_heating_coil_availability_schedule_name
# real
var_heating_design_supply_air_temperature = 21.21
obj.heating_design_supply_air_temperature = var_heating_design_supply_air_temperature
# real
var_heat_pump_heating_coil_gross_rated_capacity = 0.0001
obj.heat_pump_heating_coil_gross_rated_capacity = var_heat_pump_heating_coil_gross_rated_capacity
# real
var_heat_pump_heating_coil_rated_cop = 0.0001
obj.heat_pump_heating_coil_rated_cop = var_heat_pump_heating_coil_rated_cop
# real
var_heat_pump_heating_minimum_outdoor_drybulb_temperature = -20.0
obj.heat_pump_heating_minimum_outdoor_drybulb_temperature = var_heat_pump_heating_minimum_outdoor_drybulb_temperature
# real
var_heat_pump_defrost_maximum_outdoor_drybulb_temperature = 3.61
obj.heat_pump_defrost_maximum_outdoor_drybulb_temperature = var_heat_pump_defrost_maximum_outdoor_drybulb_temperature
# alpha
var_heat_pump_defrost_strategy = "ReverseCycle"
obj.heat_pump_defrost_strategy = var_heat_pump_defrost_strategy
# alpha
var_heat_pump_defrost_control = "Timed"
obj.heat_pump_defrost_control = var_heat_pump_defrost_control
# real
var_heat_pump_defrost_time_period_fraction = 0.0
obj.heat_pump_defrost_time_period_fraction = var_heat_pump_defrost_time_period_fraction
# alpha
var_supplemental_heating_coil_type = "Electric"
obj.supplemental_heating_coil_type = var_supplemental_heating_coil_type
# object-list
var_supplemental_heating_coil_availability_schedule_name = "object-list|Supplemental Heating Coil Availability Schedule Name"
obj.supplemental_heating_coil_availability_schedule_name = var_supplemental_heating_coil_availability_schedule_name
# real
var_supplemental_heating_coil_capacity = 31.31
obj.supplemental_heating_coil_capacity = var_supplemental_heating_coil_capacity
# real
var_supplemental_heating_coil_maximum_outdoor_drybulb_temperature = 21.0
obj.supplemental_heating_coil_maximum_outdoor_drybulb_temperature = var_supplemental_heating_coil_maximum_outdoor_drybulb_temperature
# real
var_supplemental_gas_heating_coil_efficiency = 0.5
obj.supplemental_gas_heating_coil_efficiency = var_supplemental_gas_heating_coil_efficiency
# real
var_supplemental_gas_heating_coil_parasitic_electric_load = 0.0
obj.supplemental_gas_heating_coil_parasitic_electric_load = var_supplemental_gas_heating_coil_parasitic_electric_load
# real
var_maximum_outdoor_air_flow_rate = 0.0
obj.maximum_outdoor_air_flow_rate = var_maximum_outdoor_air_flow_rate
# real
var_minimum_outdoor_air_flow_rate = 0.0
obj.minimum_outdoor_air_flow_rate = var_minimum_outdoor_air_flow_rate
# object-list
var_minimum_outdoor_air_schedule_name = "object-list|Minimum Outdoor Air Schedule Name"
obj.minimum_outdoor_air_schedule_name = var_minimum_outdoor_air_schedule_name
# alpha
var_economizer_type = "FixedDryBulb"
obj.economizer_type = var_economizer_type
# alpha
var_economizer_lockout = "NoLockout"
obj.economizer_lockout = var_economizer_lockout
# real
var_economizer_maximum_limit_drybulb_temperature = 40.4
obj.economizer_maximum_limit_drybulb_temperature = var_economizer_maximum_limit_drybulb_temperature
# real
var_economizer_maximum_limit_enthalpy = 41.41
obj.economizer_maximum_limit_enthalpy = var_economizer_maximum_limit_enthalpy
# real
var_economizer_maximum_limit_dewpoint_temperature = 42.42
obj.economizer_maximum_limit_dewpoint_temperature = var_economizer_maximum_limit_dewpoint_temperature
# real
var_economizer_minimum_limit_drybulb_temperature = 43.43
obj.economizer_minimum_limit_drybulb_temperature = var_economizer_minimum_limit_drybulb_temperature
# object-list
var_supply_plenum_name = "object-list|Supply Plenum Name"
obj.supply_plenum_name = var_supply_plenum_name
# object-list
var_return_plenum_name = "object-list|Return Plenum Name"
obj.return_plenum_name = var_return_plenum_name
# alpha
var_night_cycle_control = "StayOff"
obj.night_cycle_control = var_night_cycle_control
# object-list
var_night_cycle_control_zone_name = "object-list|Night Cycle Control Zone Name"
obj.night_cycle_control_zone_name = var_night_cycle_control_zone_name
# alpha
var_heat_recovery_type = "None"
obj.heat_recovery_type = var_heat_recovery_type
# real
var_sensible_heat_recovery_effectiveness = 0.5
obj.sensible_heat_recovery_effectiveness = var_sensible_heat_recovery_effectiveness
# real
var_latent_heat_recovery_effectiveness = 0.5
obj.latent_heat_recovery_effectiveness = var_latent_heat_recovery_effectiveness
# alpha
var_humidifier_type = "None"
obj.humidifier_type = var_humidifier_type
# object-list
var_humidifier_availability_schedule_name = "object-list|Humidifier Availability Schedule Name"
obj.humidifier_availability_schedule_name = var_humidifier_availability_schedule_name
# real
var_humidifier_rated_capacity = 0.0
obj.humidifier_rated_capacity = var_humidifier_rated_capacity
# real
var_humidifier_rated_electric_power = 0.0
obj.humidifier_rated_electric_power = var_humidifier_rated_electric_power
# object-list
var_humidifier_control_zone_name = "object-list|Humidifier Control Zone Name"
obj.humidifier_control_zone_name = var_humidifier_control_zone_name
# real
var_humidifier_setpoint = 50.0
obj.humidifier_setpoint = var_humidifier_setpoint
# alpha
var_return_fan = "Yes"
obj.return_fan = var_return_fan
# real
var_return_fan_total_efficiency = 0.50005
obj.return_fan_total_efficiency = var_return_fan_total_efficiency
# real
var_return_fan_delta_pressure = 0.0
obj.return_fan_delta_pressure = var_return_fan_delta_pressure
# real
var_return_fan_motor_efficiency = 0.50005
obj.return_fan_motor_efficiency = var_return_fan_motor_efficiency
# real
var_return_fan_motor_in_air_stream_fraction = 0.5
obj.return_fan_motor_in_air_stream_fraction = var_return_fan_motor_in_air_stream_fraction
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].name, var_name)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].system_availability_schedule_name, var_system_availability_schedule_name)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].control_zone_or_thermostat_location_name, var_control_zone_or_thermostat_location_name)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].cooling_supply_air_flow_rate, var_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heating_supply_air_flow_rate, var_heating_supply_air_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].no_load_supply_air_flow_rate, var_no_load_supply_air_flow_rate)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supply_fan_operating_mode_schedule_name, var_supply_fan_operating_mode_schedule_name)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supply_fan_placement, var_supply_fan_placement)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supply_fan_total_efficiency, var_supply_fan_total_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supply_fan_delta_pressure, var_supply_fan_delta_pressure)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supply_fan_motor_efficiency, var_supply_fan_motor_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supply_fan_motor_in_air_stream_fraction, var_supply_fan_motor_in_air_stream_fraction)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].cooling_coil_type, var_cooling_coil_type)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].cooling_coil_availability_schedule_name, var_cooling_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].cooling_design_supply_air_temperature, var_cooling_design_supply_air_temperature)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].cooling_coil_gross_rated_total_capacity, var_cooling_coil_gross_rated_total_capacity)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].cooling_coil_gross_rated_sensible_heat_ratio, var_cooling_coil_gross_rated_sensible_heat_ratio)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].cooling_coil_gross_rated_cop, var_cooling_coil_gross_rated_cop)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_heating_coil_type, var_heat_pump_heating_coil_type)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_heating_coil_availability_schedule_name, var_heat_pump_heating_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heating_design_supply_air_temperature, var_heating_design_supply_air_temperature)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_heating_coil_gross_rated_capacity, var_heat_pump_heating_coil_gross_rated_capacity)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_heating_coil_rated_cop, var_heat_pump_heating_coil_rated_cop)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_heating_minimum_outdoor_drybulb_temperature, var_heat_pump_heating_minimum_outdoor_drybulb_temperature)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_defrost_maximum_outdoor_drybulb_temperature, var_heat_pump_defrost_maximum_outdoor_drybulb_temperature)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_defrost_strategy, var_heat_pump_defrost_strategy)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_defrost_control, var_heat_pump_defrost_control)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_pump_defrost_time_period_fraction, var_heat_pump_defrost_time_period_fraction)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supplemental_heating_coil_type, var_supplemental_heating_coil_type)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supplemental_heating_coil_availability_schedule_name, var_supplemental_heating_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supplemental_heating_coil_capacity, var_supplemental_heating_coil_capacity)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supplemental_heating_coil_maximum_outdoor_drybulb_temperature, var_supplemental_heating_coil_maximum_outdoor_drybulb_temperature)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supplemental_gas_heating_coil_efficiency, var_supplemental_gas_heating_coil_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supplemental_gas_heating_coil_parasitic_electric_load, var_supplemental_gas_heating_coil_parasitic_electric_load)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].maximum_outdoor_air_flow_rate, var_maximum_outdoor_air_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].minimum_outdoor_air_flow_rate, var_minimum_outdoor_air_flow_rate)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].minimum_outdoor_air_schedule_name, var_minimum_outdoor_air_schedule_name)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].economizer_type, var_economizer_type)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].economizer_lockout, var_economizer_lockout)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].economizer_maximum_limit_drybulb_temperature, var_economizer_maximum_limit_drybulb_temperature)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].economizer_maximum_limit_enthalpy, var_economizer_maximum_limit_enthalpy)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].economizer_maximum_limit_dewpoint_temperature, var_economizer_maximum_limit_dewpoint_temperature)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].economizer_minimum_limit_drybulb_temperature, var_economizer_minimum_limit_drybulb_temperature)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].supply_plenum_name, var_supply_plenum_name)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].return_plenum_name, var_return_plenum_name)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].night_cycle_control, var_night_cycle_control)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].night_cycle_control_zone_name, var_night_cycle_control_zone_name)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].heat_recovery_type, var_heat_recovery_type)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].sensible_heat_recovery_effectiveness, var_sensible_heat_recovery_effectiveness)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].latent_heat_recovery_effectiveness, var_latent_heat_recovery_effectiveness)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].humidifier_type, var_humidifier_type)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].humidifier_availability_schedule_name, var_humidifier_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].humidifier_rated_capacity, var_humidifier_rated_capacity)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].humidifier_rated_electric_power, var_humidifier_rated_electric_power)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].humidifier_control_zone_name, var_humidifier_control_zone_name)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].humidifier_setpoint, var_humidifier_setpoint)
self.assertEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].return_fan, var_return_fan)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].return_fan_total_efficiency, var_return_fan_total_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].return_fan_delta_pressure, var_return_fan_delta_pressure)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].return_fan_motor_efficiency, var_return_fan_motor_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystemunitaryheatpumpairtoairs[0].return_fan_motor_in_air_stream_fraction, var_return_fan_motor_in_air_stream_fraction)
| 71.863309
| 211
| 0.814096
| 2,281
| 19,978
| 6.566418
| 0.065322
| 0.187341
| 0.191414
| 0.161036
| 0.907398
| 0.835091
| 0.743157
| 0.619175
| 0.493257
| 0.307251
| 0
| 0.014314
| 0.139754
| 19,978
| 278
| 212
| 71.863309
| 0.857209
| 0.020072
| 0
| 0
| 0
| 0
| 0.034485
| 0.00456
| 0
| 0
| 0
| 0
| 0.294686
| 1
| 0.014493
| false
| 0
| 0.038647
| 0
| 0.057971
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
814cb767128e870b49e151586210fd210a8fdb6d
| 54
|
py
|
Python
|
cropper/__init__.py
|
9bstudios/mecco_cropper
|
53ea5298a6fae14ce1e18d256581d04c1e0a6426
|
[
"MIT"
] | 2
|
2018-06-15T14:29:04.000Z
|
2021-04-19T23:44:40.000Z
|
cropper/__init__.py
|
9bstudios/mecco_cropper
|
53ea5298a6fae14ce1e18d256581d04c1e0a6426
|
[
"MIT"
] | null | null | null |
cropper/__init__.py
|
9bstudios/mecco_cropper
|
53ea5298a6fae14ce1e18d256581d04c1e0a6426
|
[
"MIT"
] | null | null | null |
# python
from notifier import *
from camera import *
| 10.8
| 22
| 0.740741
| 7
| 54
| 5.714286
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203704
| 54
| 4
| 23
| 13.5
| 0.930233
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
816fc7b1f0847871f2ddeda9ab201c449d3a9b6a
| 2,529
|
py
|
Python
|
tests/test_dataset.py
|
nobu-g/cohesion-analysis
|
bf2e22c1aff51f96fd2aaef6359839646548c3be
|
[
"MIT"
] | 12
|
2020-12-25T11:13:17.000Z
|
2021-12-28T05:19:46.000Z
|
tests/test_dataset.py
|
nobu-g/cohesion-analysis
|
bf2e22c1aff51f96fd2aaef6359839646548c3be
|
[
"MIT"
] | 1
|
2020-12-25T09:26:26.000Z
|
2020-12-25T09:26:34.000Z
|
tests/test_dataset.py
|
nobu-g/cohesion-analysis
|
bf2e22c1aff51f96fd2aaef6359839646548c3be
|
[
"MIT"
] | 1
|
2022-02-25T13:22:47.000Z
|
2022-02-25T13:22:47.000Z
|
from data_loader.dataset import PASDataset
def test_train_dataset(fixture_train_dataset: PASDataset, fixture_example1: dict):
example = fixture_train_dataset.examples[1]
mrphs = fixture_example1['mrphs']
assert example.tokens == ['[CLS]'] + [t for mrph in mrphs for t in mrph['tokens']] + ['[SEP]']
assert len(example.words) == len(fixture_example1['mrphs'])
for i in range(len(example.words)):
word = example.words[i]
tok_index = example.orig_to_tok_index[i]
arguments = example.arguments_set[i]
arg_candidates = example.arg_candidates_set[i]
ment_candidates = example.ment_candidates_set[i]
# dtid = example.dtids[i]
# ddep = example.ddeps[i]
mrph = fixture_example1['mrphs'][i]
assert mrph['surf'] == word
assert mrph['tokens'][0] == example.tokens[tok_index] # head token is the representative token of a mrph
for rel in fixture_train_dataset.relations:
arg_strings = [arg[:-2] if arg[-2:] in ('%C', '%N', '%O') else arg for arg in arguments[rel]]
assert set(arg_strings) == set(mrph['arguments'][rel])
assert set(arg_candidates) == set(mrph['arg_candidates'])
assert set(ment_candidates) == set(mrph['ment_candidates'])
def test_eval_dataset(fixture_eval_dataset: PASDataset, fixture_example1: dict):
example = fixture_eval_dataset.examples[1]
mrphs = fixture_example1['mrphs']
assert example.tokens == ['[CLS]'] + [t for mrph in mrphs for t in mrph['tokens']] + ['[SEP]']
assert len(example.words) == len(fixture_example1['mrphs'])
for i in range(len(example.words)):
word = example.words[i]
tok_index = example.orig_to_tok_index[i]
arguments = example.arguments_set[i]
arg_candidates = example.arg_candidates_set[i]
ment_candidates = example.ment_candidates_set[i]
mrph = fixture_example1['mrphs'][i]
assert mrph['surf'] == word
assert mrph['tokens'][0] == example.tokens[tok_index] # head token is the representative token of a mrph
for rel in fixture_eval_dataset.relations:
if mrph['arguments'][rel]:
if rel != '=':
assert arguments[rel] == ['NULL']
else:
assert arguments[rel] == ['NA']
else:
assert arguments[rel] == []
assert set(arg_candidates) == set(mrph['arg_candidates'])
assert set(ment_candidates) == set(mrph['ment_candidates'])
| 45.160714
| 113
| 0.633847
| 321
| 2,529
| 4.809969
| 0.196262
| 0.07772
| 0.07772
| 0.040803
| 0.79728
| 0.781736
| 0.781736
| 0.716969
| 0.716969
| 0.716969
| 0
| 0.007243
| 0.235666
| 2,529
| 55
| 114
| 45.981818
| 0.791516
| 0.057335
| 0
| 0.681818
| 0
| 0
| 0.071909
| 0
| 0
| 0
| 0
| 0
| 0.363636
| 1
| 0.045455
| false
| 0
| 0.022727
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
818ef43acb6b3244ef68f76a2b348b3f667098c5
| 1,648
|
py
|
Python
|
models/fcn.py
|
xinyandai/structural-nn
|
373cec9ca2ee766ddb1d2a09eac4dd551d57e648
|
[
"MIT"
] | 2
|
2020-01-01T05:21:58.000Z
|
2020-01-02T02:06:19.000Z
|
models/fcn.py
|
xinyandai/structural-nn
|
373cec9ca2ee766ddb1d2a09eac4dd551d57e648
|
[
"MIT"
] | null | null | null |
models/fcn.py
|
xinyandai/structural-nn
|
373cec9ca2ee766ddb1d2a09eac4dd551d57e648
|
[
"MIT"
] | null | null | null |
# https://pytorch.org/tutorials/beginner/pytorch_with_examples.html
import torch
import torch.nn as nn
from pyops.vqlayer import AQLinear
class FCN(torch.nn.Module):
def __init__(self, D_in=784, H=128, num_classes=10):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(FCN, self).__init__()
self.linear1 = nn.Linear(D_in, H)
self.linear2 = nn.Linear(H, num_classes)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
x = x.view(-1, 28 * 28)
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
class VQFCN(torch.nn.Module):
def __init__(self, D_in=784, H=1024, num_classes=10):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(VQFCN, self).__init__()
self.linear1 = AQLinear(D_in, H, ks=32)
self.linear2 = nn.Linear(H, num_classes)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
x = x.view(-1, 28 * 28)
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
| 34.333333
| 83
| 0.625
| 244
| 1,648
| 4.081967
| 0.319672
| 0.03012
| 0.064257
| 0.032129
| 0.787149
| 0.787149
| 0.787149
| 0.787149
| 0.787149
| 0.787149
| 0
| 0.032939
| 0.281553
| 1,648
| 48
| 84
| 34.333333
| 0.808277
| 0.383495
| 0
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.130435
| 0
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
81a98702d1d46e54276b1e81375bbf26d903f142
| 2,646
|
py
|
Python
|
models.py
|
sheffali/Django-project
|
ad3132a5c67aed5492766288e50a6789c885f13b
|
[
"MIT"
] | null | null | null |
models.py
|
sheffali/Django-project
|
ad3132a5c67aed5492766288e50a6789c885f13b
|
[
"MIT"
] | null | null | null |
models.py
|
sheffali/Django-project
|
ad3132a5c67aed5492766288e50a6789c885f13b
|
[
"MIT"
] | null | null | null |
from django.db import models
class Regist(models.Model):
name=models.CharField(max_length=128);
age=models.CharField(max_length=128);
password1=models.CharField(max_length=100);
paremail=models.CharField(max_length=128);
phone=models.CharField(max_length=128);
gender=models.CharField(max_length=128);
def __str(self):
return self.name
return self.age
return self.password1
return self.paremail
return self.phone
return self.gender
class Test_r(models.Model):
name=models.CharField(max_length=128);
type=models.CharField(max_length=128);
ans1=models.CharField(max_length=128);
ans2=models.CharField(max_length=128);
ans4=models.CharField(max_length=128);
ans5=models.CharField(max_length=128);
ans6=models.CharField(max_length=128);
ans7=models.CharField(max_length=128);
ans8=models.CharField(max_length=128);
ans9=models.CharField(max_length=128);
ans10=models.CharField(max_length=128);
Tscore=models.CharField(max_length=128,null=True);
def __str(self):
return self.name;
return self.type;
return self.ans1
return self.ans2
return self.ans3
return self.ans4
return self.ans5
return self.ans6
return self.ans7
return self.ans8
return self.ans9
return self.ans10
return self.Tscore
class OTest(models.Model):
ans1=models.CharField(max_length=128);
ans2=models.CharField(max_length=128);
ans4=models.CharField(max_length=128);
ans5=models.CharField(max_length=128);
ans6=models.CharField(max_length=128);
ans7=models.CharField(max_length=128);
ans8=models.CharField(max_length=128);
ans9=models.CharField(max_length=128);
ans10=models.CharField(max_length=128);
ans11=models.CharField(max_length=128);
ans12=models.CharField(max_length=128);
ans13=models.CharField(max_length=128);
ans14=models.CharField(max_length=128);
ans15=models.CharField(max_length=128);
Tscore=models.CharField(max_length=128,null=True);
#Result=models.CharField(max_length=128,null=True);
def __str(self):
return self.ans1
return self.ans2
return self.ans3
return self.ans4
return self.ans5
return self.ans6
return self.ans7
return self.ans8
return self.ans9
return self.ans10
return self.ans11
return self.ans12
return self.ans13
return self.ans14
return self.ans15
return self.Tscore
#return self.Result
| 29.730337
| 56
| 0.676115
| 345
| 2,646
| 5.066667
| 0.133333
| 0.20595
| 0.350114
| 0.466819
| 0.807208
| 0.668192
| 0.668192
| 0.668192
| 0.592677
| 0.592677
| 0
| 0.080543
| 0.221088
| 2,646
| 88
| 57
| 30.068182
| 0.767589
| 0.025699
| 0
| 0.626667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0.026667
| 0.013333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
c4d21f72497a30634a81dd10c7b041b8085925d5
| 1,418
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/contentstore/tests/test_request_event.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/contentstore/tests/test_request_event.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/contentstore/tests/test_request_event.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""Tests for CMS's requests to logs"""
from django.test import TestCase
from django.urls import reverse
from cms.djangoapps.contentstore.views.helpers import event as cms_user_track
class CMSLogTest(TestCase):
"""
Tests that request to logs from CMS return 204s
"""
def test_post_answers_to_log(self):
"""
Checks that student answer requests submitted to cms's "/event" url
via POST are correctly returned as 204s
"""
requests = [
{"event": "my_event", "event_type": "my_event_type", "page": "my_page"},
{"event": "{'json': 'object'}", "event_type": chr(512), "page": "my_page"}
]
for request_params in requests:
response = self.client.post(reverse(cms_user_track), request_params)
self.assertEqual(response.status_code, 204)
def test_get_answers_to_log(self):
"""
Checks that student answer requests submitted to cms's "/event" url
via GET are correctly returned as 204s
"""
requests = [
{"event": "my_event", "event_type": "my_event_type", "page": "my_page"},
{"event": "{'json': 'object'}", "event_type": chr(512), "page": "my_page"}
]
for request_params in requests:
response = self.client.get(reverse(cms_user_track), request_params)
self.assertEqual(response.status_code, 204)
| 35.45
| 86
| 0.620592
| 177
| 1,418
| 4.779661
| 0.327684
| 0.06383
| 0.047281
| 0.037825
| 0.711584
| 0.711584
| 0.711584
| 0.711584
| 0.711584
| 0.711584
| 0
| 0.019924
| 0.2567
| 1,418
| 39
| 87
| 36.358974
| 0.782732
| 0.208039
| 0
| 0.5
| 0
| 0
| 0.175
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c4e604711e9956a055e2c0087608baac2532db78
| 4,137
|
py
|
Python
|
AIs/Dekun Yang/simple_mind.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 4
|
2015-11-04T21:18:40.000Z
|
2020-12-26T21:15:23.000Z
|
AIs/Dekun Yang/simple_mind.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 2
|
2021-08-09T18:19:58.000Z
|
2021-08-10T14:44:54.000Z
|
AIs/Dekun Yang/simple_mind.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 6
|
2015-08-01T17:54:17.000Z
|
2022-02-28T00:00:21.000Z
|
from .strategy import *
class SimpleMindAI(PlayerAIBase):
def __init__(self):
self.round = 0
self.strategy = None
def get_name(self):
return "SimpleMind"
def start_of_game(self):
self.round = 0
def start_of_turn(self, game_state, player):
if player.ai is self:
if self.round == 0:
self._initialise(game_state, player)
self.round += 1
self.strategy.update(game_state)
self.strategy.decide_mode()
self.strategy.deal_init(game_state)
self.strategy.display_log(False)
self.strategy.show(game_state)
if self.round > 0:
self.strategy.ply_in_action = player
#----- make deals -----
def propose_deal(self, game_state, player):
# workaround for start_of_game initialisation
if self.strategy is None:
self._initialise(game_state, player)
return self.strategy.propose_deal(game_state)
def deal_result(self, deal_info):
self.strategy.deal_result(deal_info)
def deal_proposed(self, game_state, player, deal_proposal):
# workaround for start_of_game initialisation
if self.strategy is None:
self._initialise(game_state, player)
return self.strategy.deal_proposed(game_state, deal_proposal)
def deal_completed(self, deal_result):
if self.strategy is not None:
return self.strategy.deal_completed(deal_result)
#----- unowned property, take it or leave it -----
def landed_on_unowned_property(self, game_state, player, property):
return self.strategy.landed_on_unowned_property(game_state, property)
def property_offered_for_auction(self, game_state, player, property):
# workaround for start_of_game initialisation
if self.strategy is None:
self._initialise(game_state, player)
self.strategy.update(game_state)
return self.strategy.property_offered_for_auction(game_state, property)
def auction_result(self, status, property, player, amount_paid):
self.strategy.auction_result(status, property, player, amount_paid)
#----- manage cash flow -----
def money_will_be_taken(self, player, amount):
if self.strategy is not None:
self.strategy.set_money_will_be_taken(amount)
def money_taken(self, player, amount):
self.strategy.money_will_be_taken = 0
#----- build house -----
def unmortgage_properties(self, game_state, player):
return self.strategy.unmortgage_properties(game_state)
def build_houses(self, game_state, player):
return self.strategy.build_houses(game_state)
def sell_houses(self, game_state, player):
return self.strategy.sell_houses(game_state)
def mortgage_properties(self, game_state, player):
# workaround for start_of_game initialisation
if self.strategy is None:
self._initialise(game_state, player)
self.strategy.update(game_state)
return self.strategy.mortgage_properties(game_state)
def eminent_domain(self, game_state, player):
if self.strategy is not None:
self.strategy.set_eminent_domain(game_state)
#----- other stuff -----
def get_out_of_jail(self, game_state, player):
return self.strategy.get_out_of_jail(game_state)
def _initialise(self, game_state, player):
self.strategy = Strategy(game_state, player)
#----- unused -----
def player_landed_on_square(self, game_state, square, player):
pass
def money_given(self, player, amount):
pass
def got_get_out_of_jail_free_card(self):
pass
def players_birthday(self):
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
return PlayerAIBase.Action.PAY_TEN_POUND_FINE
def player_went_bankrupt(self, player):
pass
def player_ran_out_of_time(self, player):
pass
def game_over(self, winner, maximum_rounds_played):
pass
def ai_error(self, message):
pass
| 4,137
| 4,137
| 0.671985
| 527
| 4,137
| 4.988615
| 0.212524
| 0.119817
| 0.108406
| 0.093952
| 0.424116
| 0.30582
| 0.284519
| 0.256371
| 0.223659
| 0.194751
| 0
| 0.001904
| 0.238095
| 4,137
| 1
| 4,137
| 4,137
| 0.83217
| 0.081943
| 0
| 0.285714
| 0
| 0
| 0.006603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.345238
| false
| 0.083333
| 0.011905
| 0.095238
| 0.52381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
1ef8922d186eaecbe284d06328d75db23bc4820d
| 171
|
py
|
Python
|
dask/diagnostics/__init__.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 20
|
2015-01-19T14:04:10.000Z
|
2020-01-14T03:43:19.000Z
|
dask/diagnostics/__init__.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
dask/diagnostics/__init__.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 7
|
2015-01-04T18:50:00.000Z
|
2020-07-29T11:00:04.000Z
|
from ..callbacks import Callback
from .profile import Profiler, ResourceProfiler, CacheProfiler
from .progress import ProgressBar
from .profile_visualize import visualize
| 34.2
| 62
| 0.853801
| 19
| 171
| 7.631579
| 0.578947
| 0.151724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 171
| 4
| 63
| 42.75
| 0.947712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4830f3a23b3289e46f83922fa402f2992be71ce7
| 20,067
|
py
|
Python
|
Python/microsoft_bonsai_api/simulator/generated/operations/_session_operations.py
|
ForrestTrepte/microsoft-bonsai-api
|
c8ac5829aa39fe5fd0362bfd68a0698b86e0bd0a
|
[
"MIT"
] | 3
|
2021-06-16T22:05:13.000Z
|
2021-12-18T14:42:57.000Z
|
Python/microsoft_bonsai_api/simulator/generated/operations/_session_operations.py
|
dbbevan/microsoft-bonsai-api
|
ecbab2c575450010339c07241db992ccc1629f66
|
[
"MIT"
] | 10
|
2021-05-27T23:34:53.000Z
|
2022-03-25T20:36:59.000Z
|
Python/microsoft_bonsai_api/simulator/generated/operations/_session_operations.py
|
dbbevan/microsoft-bonsai-api
|
ecbab2c575450010339c07241db992ccc1629f66
|
[
"MIT"
] | 4
|
2021-06-09T13:42:28.000Z
|
2022-02-17T22:07:25.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar("T")
ClsType = Optional[
Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]
]
class SessionOperations(object):
"""SessionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~microsoft_bonsai_api.simulator.generated.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
workspace_name, # type: str
deployment_mode=None, # type: Optional[str]
session_status=None, # type: Optional[str]
collection=None, # type: Optional[str]
package=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["models.SimulatorSessionSummary"]
"""The deployment_mode appears in the query string. It can be one of
Unspecified, Testing, or Hosted. If it has a 'neq:' prefix, that means "not;"
e.g., {.../simulatorSessions?deployment_mode=neq:Hosted} means the response should not include
simulators that are hosted.
The session_status can be one of Attachable, Attached, Detaching, Rejected,
and supports the neq: prefix.
The collection appears in the query string
The package appears in the query string
The filter queries can appear together, like
{.../simulatorSessions?deployment_mode=Hosted&collection=1234-455-33333}.
Retrieves all of the simulators currently registered with all
simulator gateways within this workspace.
:param workspace_name: The workspace identifier.
:type workspace_name: str
:param deployment_mode: A specifier to filter on deployment mode.
:type deployment_mode: str
:param session_status: A specifier to filter on session status.
:type session_status: str
:param collection: If present, only sessions in this collection.
:type collection: str
:param package: If present, only sessions in this package.
:type package: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of SimulatorSessionSummary, or the result of cls(response)
:rtype: list[~microsoft_bonsai_api.simulator.generated.models.SimulatorSessionSummary]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
"cls", None
) # type: ClsType[List["models.SimulatorSessionSummary"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
# Construct URL
url = self.list.metadata["url"] # type: ignore
path_format_arguments = {
"workspaceName": self._serialize.url(
"workspace_name", workspace_name, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if deployment_mode is not None:
query_parameters["deployment_mode"] = self._serialize.query(
"deployment_mode", deployment_mode, "str"
)
if session_status is not None:
query_parameters["session_status"] = self._serialize.query(
"session_status", session_status, "str"
)
if collection is not None:
query_parameters["collection"] = self._serialize.query(
"collection", collection, "str"
)
if package is not None:
query_parameters["package"] = self._serialize.query(
"package", package, "str"
)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = "application/json"
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(
status_code=response.status_code, response=response, error_map=error_map
)
error = self._deserialize(models.ProblemDetails, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("[SimulatorSessionSummary]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {"url": "/v2/workspaces/{workspaceName}/simulatorSessions"} # type: ignore
def create(
self,
workspace_name, # type: str
body, # type: "models.SimulatorInterface"
**kwargs # type: Any
):
# type: (...) -> "models.SimulatorSessionResponse"
"""Registers a simulator with the Bonsai platform.
Registers a simulator with the Bonsai platform.
:param workspace_name: The workspace identifier.
:type workspace_name: str
:param body: Information and capabilities about the simulator.
:type body: ~microsoft_bonsai_api.simulator.generated.models.SimulatorInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SimulatorSessionResponse, or the result of cls(response)
:rtype: ~microsoft_bonsai_api.simulator.generated.models.SimulatorSessionResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
"cls", None
) # type: ClsType["models.SimulatorSessionResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json-patch+json")
# Construct URL
url = self.create.metadata["url"] # type: ignore
path_format_arguments = {
"workspaceName": self._serialize.url(
"workspace_name", workspace_name, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Content-Type"] = self._serialize.header(
"content_type", content_type, "str"
)
header_parameters["Accept"] = "application/json"
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, "SimulatorInterface")
body_content_kwargs["content"] = body_content
request = self._client.post(
url, query_parameters, header_parameters, **body_content_kwargs
)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(
status_code=response.status_code, response=response, error_map=error_map
)
error = self._deserialize(models.ProblemDetails, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("SimulatorSessionResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/v2/workspaces/{workspaceName}/simulatorSessions"} # type: ignore
def get(
self,
workspace_name, # type: str
session_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.SimulatorSessionResponse"
"""Retrieves a simulator session corresponding to the sessionId.
Retrieves a simulator session corresponding to the sessionId.
:param workspace_name: The workspace identifier.
:type workspace_name: str
:param session_id: The sessionId of the simulator session to fetch.
:type session_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SimulatorSessionResponse, or the result of cls(response)
:rtype: ~microsoft_bonsai_api.simulator.generated.models.SimulatorSessionResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
"cls", None
) # type: ClsType["models.SimulatorSessionResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
# Construct URL
url = self.get.metadata["url"] # type: ignore
path_format_arguments = {
"workspaceName": self._serialize.url(
"workspace_name", workspace_name, "str"
),
"sessionId": self._serialize.url("session_id", session_id, "str"),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = "application/json"
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(
status_code=response.status_code, response=response, error_map=error_map
)
error = self._deserialize(models.ProblemDetails, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("SimulatorSessionResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/v2/workspaces/{workspaceName}/simulatorSessions/{sessionId}"} # type: ignore
def delete(
self,
workspace_name, # type: str
session_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the Simulator session.
Deletes the Simulator session.
:param workspace_name: The workspace identifier.
:type workspace_name: str
:param session_id: The session ID generated during registration.
:type session_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
# Construct URL
url = self.delete.metadata["url"] # type: ignore
path_format_arguments = {
"workspaceName": self._serialize.url(
"workspace_name", workspace_name, "str"
),
"sessionId": self._serialize.url("session_id", session_id, "str"),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(
status_code=response.status_code, response=response, error_map=error_map
)
error = self._deserialize(models.ProblemDetails, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/v2/workspaces/{workspaceName}/simulatorSessions/{sessionId}"} # type: ignore
def get_most_recent_action(
self,
workspace_name, # type: str
session_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.Event"
"""Gets the most recent action sent to the simulator to process.
Gets the most recent action sent to the simulator to process.
:param workspace_name: The workspace identifier.
:type workspace_name: str
:param session_id: Unique identification of the simulator.
:type session_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Event, or the result of cls(response)
:rtype: ~microsoft_bonsai_api.simulator.generated.models.Event
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["models.Event"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
# Construct URL
url = self.get_most_recent_action.metadata["url"] # type: ignore
path_format_arguments = {
"workspaceName": self._serialize.url(
"workspace_name", workspace_name, "str"
),
"sessionId": self._serialize.url("session_id", session_id, "str"),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = "application/json"
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(
status_code=response.status_code, response=response, error_map=error_map
)
error = self._deserialize(models.ProblemDetails, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Event", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_most_recent_action.metadata = {"url": "/v2/workspaces/{workspaceName}/simulatorSessions/{sessionId}/action"} # type: ignore
def advance(
self,
workspace_name, # type: str
session_id, # type: str
body, # type: "models.SimulatorState"
**kwargs # type: Any
):
# type: (...) -> "models.Event"
"""Advance the RL agent with the new state of the simulator, and returns an action computed by our policy.
Simulatorsession is supposed to use the returned action for stepping inside the sim and thne getting the new state.false
You can send the same state again, as long as you didn't get a Non-Idle Action back.
Advance the RL agent with the new state of the simulator, and returns an action computed by our
policy.
Simulatorsession is supposed to use the returned action for stepping inside the sim and thne
getting the new state.false
You can send the same state again, as long as you didn't get a Non-Idle Action back.
:param workspace_name: The workspace identifier.
:type workspace_name: str
:param session_id: Unique identifier for the simulator.
:type session_id: str
:param body: The new state of the simulator.
:type body: ~microsoft_bonsai_api.simulator.generated.models.SimulatorState
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Event, or the result of cls(response)
:rtype: ~microsoft_bonsai_api.simulator.generated.models.Event
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["models.Event"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json-patch+json")
# Construct URL
url = self.advance.metadata["url"] # type: ignore
path_format_arguments = {
"workspaceName": self._serialize.url(
"workspace_name", workspace_name, "str"
),
"sessionId": self._serialize.url("session_id", session_id, "str"),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Content-Type"] = self._serialize.header(
"content_type", content_type, "str"
)
header_parameters["Accept"] = "application/json"
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, "SimulatorState")
body_content_kwargs["content"] = body_content
request = self._client.post(
url, query_parameters, header_parameters, **body_content_kwargs
)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(
status_code=response.status_code, response=response, error_map=error_map
)
error = self._deserialize(models.ProblemDetails, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Event", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
advance.metadata = {"url": "/v2/workspaces/{workspaceName}/simulatorSessions/{sessionId}/advance"} # type: ignore
| 40.953061
| 132
| 0.642697
| 2,139
| 20,067
| 5.875643
| 0.122955
| 0.031031
| 0.011935
| 0.015595
| 0.779679
| 0.759389
| 0.726846
| 0.702817
| 0.694383
| 0.680697
| 0
| 0.005307
| 0.258135
| 20,067
| 489
| 133
| 41.03681
| 0.83892
| 0.363134
| 0
| 0.670412
| 1
| 0
| 0.100754
| 0.040034
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026217
| false
| 0
| 0.026217
| 0
| 0.101124
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
48455668d8506b83de525a4aac32059ad6805848
| 166
|
py
|
Python
|
demonoid/__init__.py
|
syndbg/demonoid-api
|
518aa389ac91b5243b92fc19923103f31041a61e
|
[
"MIT"
] | 3
|
2015-11-17T13:04:25.000Z
|
2019-04-14T20:27:32.000Z
|
demonoid/__init__.py
|
syndbg/demonoid-api
|
518aa389ac91b5243b92fc19923103f31041a61e
|
[
"MIT"
] | null | null | null |
demonoid/__init__.py
|
syndbg/demonoid-api
|
518aa389ac91b5243b92fc19923103f31041a61e
|
[
"MIT"
] | null | null | null |
from .constants import Category, SortBy, Quality, Language, TrackedBy, State
from .structures import Torrent, List, Paginated, Search, Demonoid
from .urls import Url
| 41.5
| 76
| 0.801205
| 21
| 166
| 6.333333
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126506
| 166
| 3
| 77
| 55.333333
| 0.917241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4866c7bc19a0ddf6298bc533d0e633c14a5661a9
| 138
|
py
|
Python
|
python/testData/formatter/blankLineAroundClasses_after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/formatter/blankLineAroundClasses_after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/formatter/blankLineAroundClasses_after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
class Adjunct:
def apply(self, right, arg):
pass
class A: pass
def foo(): pass
class B: pass
print('239')
| 8.625
| 32
| 0.536232
| 19
| 138
| 3.894737
| 0.684211
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 0.355072
| 138
| 15
| 33
| 9.2
| 0.797753
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.571429
| 0
| 0
| 0.714286
| 0.142857
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
4875a0f443ec8ef95077f479ff041669983f2a43
| 49
|
py
|
Python
|
code-augmentor-core/src/test/resources/tasks/PreCodeAugmentationGenericTaskTest/task-spec-05-1.py
|
aaronicsubstances/code-augmentor
|
6a192372d1bdeb9c4303fc0cc68cedf5ffda3530
|
[
"MIT"
] | null | null | null |
code-augmentor-core/src/test/resources/tasks/PreCodeAugmentationGenericTaskTest/task-spec-05-1.py
|
aaronicsubstances/code-augmentor
|
6a192372d1bdeb9c4303fc0cc68cedf5ffda3530
|
[
"MIT"
] | 1
|
2021-01-19T10:06:01.000Z
|
2021-01-19T10:06:27.000Z
|
code-augmentor-core/src/test/resources/tasks/PreCodeAugmentationGenericTaskTest/task-spec-05-1.py
|
aaronicsubstances/code-augmentor
|
6a192372d1bdeb9c4303fc0cc68cedf5ffda3530
|
[
"MIT"
] | null | null | null |
#PHP7 generate
#PHP7
#ARG [ { tea=6
#ARG ]
| 9.8
| 16
| 0.530612
| 7
| 49
| 3.714286
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 0.306122
| 49
| 4
| 17
| 12.25
| 0.676471
| 0.714286
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
487c7c0c776a98822e078080cd9a02f2734693e6
| 68
|
py
|
Python
|
server/opendp_apps/dataset/user_dataset_util.py
|
mikephelan/opendp-ux
|
80c65da0ed17adc01c69b05dbc9cbf3a5973a016
|
[
"MIT"
] | null | null | null |
server/opendp_apps/dataset/user_dataset_util.py
|
mikephelan/opendp-ux
|
80c65da0ed17adc01c69b05dbc9cbf3a5973a016
|
[
"MIT"
] | null | null | null |
server/opendp_apps/dataset/user_dataset_util.py
|
mikephelan/opendp-ux
|
80c65da0ed17adc01c69b05dbc9cbf3a5973a016
|
[
"MIT"
] | null | null | null |
from opendp_apps.model_helpers.basic_err_check import BasicErrCheck
| 34
| 67
| 0.911765
| 10
| 68
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 68
| 1
| 68
| 68
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6fa50af1344d185c9d4a063630c4a7de971607dd
| 96
|
py
|
Python
|
vaccine/vaccine_cert_config.py
|
praekeltfoundation/vaccine-eligibility
|
041010cbb14a12854a9644d97e56b63ba62cd32e
|
[
"BSD-3-Clause"
] | null | null | null |
vaccine/vaccine_cert_config.py
|
praekeltfoundation/vaccine-eligibility
|
041010cbb14a12854a9644d97e56b63ba62cd32e
|
[
"BSD-3-Clause"
] | 6
|
2021-04-09T11:09:23.000Z
|
2022-03-29T08:35:25.000Z
|
vaccine/vaccine_cert_config.py
|
praekeltfoundation/vaccine-eligibility
|
041010cbb14a12854a9644d97e56b63ba62cd32e
|
[
"BSD-3-Clause"
] | null | null | null |
from os import environ
API_HOST = environ.get("API_HOST")
API_TOKEN = environ.get("API_TOKEN")
| 19.2
| 36
| 0.760417
| 16
| 96
| 4.3125
| 0.5
| 0.202899
| 0.376812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114583
| 96
| 4
| 37
| 24
| 0.811765
| 0
| 0
| 0
| 0
| 0
| 0.177083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6fdeddc76efb6a5a39de25fb607da12241236e47
| 194
|
py
|
Python
|
dzo/loader/__init__.py
|
moriaki3193/dzo
|
c0a221a8038879107a5fe07d2b9452abf51815b1
|
[
"MIT"
] | 8
|
2019-07-27T11:48:55.000Z
|
2019-09-19T07:27:15.000Z
|
dzo/loader/__init__.py
|
moriaki3193/dzo
|
c0a221a8038879107a5fe07d2b9452abf51815b1
|
[
"MIT"
] | 7
|
2019-08-25T17:08:45.000Z
|
2019-09-14T14:04:49.000Z
|
dzo/loader/__init__.py
|
moriaki3193/dzo
|
c0a221a8038879107a5fe07d2b9452abf51815b1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Loader module
Loaders help preprocessing pipelines to read records from local directories,
remote storages, and databases.
"""
from .directory import DirectoryLoader
| 24.25
| 76
| 0.762887
| 23
| 194
| 6.434783
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005988
| 0.139175
| 194
| 7
| 77
| 27.714286
| 0.88024
| 0.752577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b50eac7001c93bed7c4d9048d3e282a810844edc
| 163
|
py
|
Python
|
src/FLABasicTools/__init__.py
|
Fair-Lines-America/FLA_basic_tools
|
9aedc23ef4b9df2bd530c96fedd94e046eb545c8
|
[
"MIT"
] | null | null | null |
src/FLABasicTools/__init__.py
|
Fair-Lines-America/FLA_basic_tools
|
9aedc23ef4b9df2bd530c96fedd94e046eb545c8
|
[
"MIT"
] | null | null | null |
src/FLABasicTools/__init__.py
|
Fair-Lines-America/FLA_basic_tools
|
9aedc23ef4b9df2bd530c96fedd94e046eb545c8
|
[
"MIT"
] | null | null | null |
from .geo_splits import get_links,community_split
from .data_build import get_census_shp, assign_baf
from .overlap import getData, Overlap_old_new, Overlap_compare
| 54.333333
| 62
| 0.871166
| 26
| 163
| 5.076923
| 0.730769
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08589
| 163
| 3
| 62
| 54.333333
| 0.885906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
82ec997c981b3f2e223128f7cfa81fca8b742cb9
| 180
|
py
|
Python
|
_08_DecoratorsLab/_01_NumberIncrement.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | 1
|
2021-06-30T10:34:38.000Z
|
2021-06-30T10:34:38.000Z
|
_08_DecoratorsLab/_01_NumberIncrement.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | null | null | null |
_08_DecoratorsLab/_01_NumberIncrement.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | null | null | null |
def number_increment(numbers):
def increase():
increased = [x + 1 for x in numbers]
return increased
return increase()
print(number_increment([1, 2, 3]))
| 20
| 44
| 0.633333
| 23
| 180
| 4.869565
| 0.608696
| 0.267857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029851
| 0.255556
| 180
| 8
| 45
| 22.5
| 0.80597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0.166667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
d20b998f55aa656c1fd9ebef09df887220ba0022
| 183
|
py
|
Python
|
Python/py-ssl-omniORB/example__POA/__init__.py
|
egustafson/sandbox
|
9804e966347b33558b0497a04edb1a591d2d7773
|
[
"Apache-2.0"
] | 2
|
2019-09-27T21:25:26.000Z
|
2019-12-29T11:26:54.000Z
|
Python/py-ssl-omniORB/example__POA/__init__.py
|
egustafson/sandbox
|
9804e966347b33558b0497a04edb1a591d2d7773
|
[
"Apache-2.0"
] | 7
|
2020-08-11T17:32:14.000Z
|
2020-08-11T17:32:39.000Z
|
Python/py-ssl-omniORB/example__POA/__init__.py
|
egustafson/sandbox
|
9804e966347b33558b0497a04edb1a591d2d7773
|
[
"Apache-2.0"
] | 2
|
2016-07-18T10:55:50.000Z
|
2020-08-19T01:46:08.000Z
|
# DO NOT EDIT THIS FILE!
#
# Python module example__POA generated by omniidl
# ** 1. Stub files contributing to this module
import example_echo_idl
# ** 2. Sub-modules
# ** 3. End
| 16.636364
| 49
| 0.710383
| 28
| 183
| 4.5
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0.196721
| 183
| 10
| 50
| 18.3
| 0.836735
| 0.781421
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d21b0ccd290d6eb2dca262fb63b2b70c21a17878
| 293
|
py
|
Python
|
firewall-config/firewallconfig/errors.py
|
wtsi-hgi/webhook-router
|
a36987055ec4c1bcb443d391807c6469e3d21ba8
|
[
"MIT"
] | 2
|
2017-11-21T11:16:44.000Z
|
2022-01-05T23:17:50.000Z
|
firewall-config/firewallconfig/errors.py
|
wtsi-hgi/webhook-router
|
a36987055ec4c1bcb443d391807c6469e3d21ba8
|
[
"MIT"
] | 14
|
2017-10-17T16:05:39.000Z
|
2022-02-12T02:42:49.000Z
|
firewall-config/firewallconfig/errors.py
|
wtsi-hgi/webhook-router
|
a36987055ec4c1bcb443d391807c6469e3d21ba8
|
[
"MIT"
] | null | null | null |
class InvalidCredentialsError(Exception):
pass
class NotAuthorisedError(Exception):
pass
class InvalidRouteUUIDError(Exception):
pass
class RouteLinkNotFound(Exception):
pass
class InvalidURLError(Exception):
pass
class InvalidRouteTokenError(Exception):
pass
| 13.318182
| 41
| 0.764505
| 24
| 293
| 9.333333
| 0.375
| 0.348214
| 0.401786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174061
| 293
| 22
| 42
| 13.318182
| 0.92562
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d244d7e864957e90d8e113389ac9dfda35e0b535
| 225
|
py
|
Python
|
fitness_functions.py
|
proste/genetals
|
1af191915bdb6171b063ab00b2fc1d03bcf059c1
|
[
"MIT"
] | null | null | null |
fitness_functions.py
|
proste/genetals
|
1af191915bdb6171b063ab00b2fc1d03bcf059c1
|
[
"MIT"
] | null | null | null |
fitness_functions.py
|
proste/genetals
|
1af191915bdb6171b063ab00b2fc1d03bcf059c1
|
[
"MIT"
] | null | null | null |
import numpy as np
from .core import FitnessFncBase
class NormalizingFitness(FitnessFncBase):
def __call__(self, genes: np.ndarray, objectives: np.ndarray) -> np.ndarray:
return objectives / np.sum(objectives)
| 25
| 80
| 0.746667
| 27
| 225
| 6.074074
| 0.62963
| 0.164634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164444
| 225
| 8
| 81
| 28.125
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
d25238553282d16c4851c4f4f2c818c9fe0f31d5
| 56
|
py
|
Python
|
SearchForDividingCircle/DividingCircle/__init__.py
|
Alladin9393/Search-For-Dividing-Circle
|
4579be4dadc32cc9d66cca5e303bdde8a24cf99c
|
[
"MIT"
] | null | null | null |
SearchForDividingCircle/DividingCircle/__init__.py
|
Alladin9393/Search-For-Dividing-Circle
|
4579be4dadc32cc9d66cca5e303bdde8a24cf99c
|
[
"MIT"
] | null | null | null |
SearchForDividingCircle/DividingCircle/__init__.py
|
Alladin9393/Search-For-Dividing-Circle
|
4579be4dadc32cc9d66cca5e303bdde8a24cf99c
|
[
"MIT"
] | null | null | null |
"""Model."""
from .DividingCircle import DividingCircle
| 18.666667
| 42
| 0.767857
| 5
| 56
| 8.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 56
| 2
| 43
| 28
| 0.843137
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d268243433e9638dcf1f12e76dd53c54fe54b295
| 11,722
|
py
|
Python
|
rdt/performance/datasets/categorical.py
|
HDI-Project/RDT
|
f1648d10346f4e431957aca65e25a00879a5d419
|
[
"MIT"
] | 8
|
2018-06-20T22:59:07.000Z
|
2019-02-19T08:48:53.000Z
|
rdt/performance/datasets/categorical.py
|
HDI-Project/RDT
|
f1648d10346f4e431957aca65e25a00879a5d419
|
[
"MIT"
] | 63
|
2018-06-20T22:08:37.000Z
|
2019-12-16T18:57:08.000Z
|
rdt/performance/datasets/categorical.py
|
HDI-Project/RDT
|
f1648d10346f4e431957aca65e25a00879a5d419
|
[
"MIT"
] | 5
|
2018-11-06T16:45:48.000Z
|
2020-01-02T13:41:07.000Z
|
"""Dataset Generators for categorical transformers."""
from abc import ABC
import numpy as np
from rdt.performance.datasets.base import BaseDatasetGenerator
from rdt.performance.datasets.datetime import RandomGapDatetimeGenerator
from rdt.performance.datasets.utils import add_nans
class CategoricalGenerator(BaseDatasetGenerator, ABC):
"""Base class for generators that generate catgorical data."""
SDTYPE = 'categorical'
class RandomIntegerGenerator(CategoricalGenerator):
"""Generator that creates an array of random integers."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
categories = [1, 2, 3, 4, 5]
return np.random.choice(a=categories, size=num_rows)
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 5e-06,
'memory': 400.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 1000.0,
}
}
class RandomIntegerNaNsGenerator(CategoricalGenerator):
"""Generator that creates an array of random integers with nans."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
return add_nans(RandomIntegerGenerator.generate(num_rows).astype(float))
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 5e-06,
'memory': 1000.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 1000.0,
}
}
class RandomStringGenerator(CategoricalGenerator):
"""Generator that creates an array of random strings."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
categories = ['Alice', 'Bob', 'Charlie', 'Dave', 'Eve']
return np.random.choice(a=categories, size=num_rows)
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 500.0
},
'transform': {
'time': 1e-05,
'memory': 500.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 1000.0,
}
}
class RandomStringNaNsGenerator(CategoricalGenerator):
"""Generator that creates an array of random strings with nans."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
return add_nans(RandomStringGenerator.generate(num_rows).astype('O'))
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 1e-05,
'memory': 1000.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 1000.0,
}
}
class RandomMixedGenerator(CategoricalGenerator):
"""Generator that creates an array of random mixed types.
Mixed types include: int, float, bool, string, datetime.
"""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
cat_size = 5
categories = np.hstack([
cat.astype('O') for cat in [
RandomGapDatetimeGenerator.generate(cat_size),
np.random.randint(0, 100, cat_size),
np.random.uniform(0, 100, cat_size),
np.arange(cat_size).astype(str),
np.array([True, False])
]
])
return np.random.choice(a=categories, size=num_rows)
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 1e-05,
'memory': 1000.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 2000.0,
}
}
class RandomMixedNaNsGenerator(CategoricalGenerator):
"""Generator that creates an array of random mixed types with nans.
Mixed types include: int, float, bool, string, datetime.
"""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
array = RandomMixedGenerator.generate(num_rows)
length = len(array)
num_nulls = np.random.randint(1, length)
nulls_idx = np.random.choice(range(length), num_nulls)
nulls = np.random.choice([np.nan, float('nan'), None], num_nulls)
array[nulls_idx] = nulls
return array
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 1e-05,
'memory': 2000.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 2000.0,
}
}
class SingleIntegerGenerator(CategoricalGenerator):
"""Generator that creates an array with a single integer."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
constant = np.random.randint(0, 100)
return np.full(num_rows, constant)
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 3e-06,
'memory': 200.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 400.0,
}
}
class SingleIntegerNaNsGenerator(CategoricalGenerator):
"""Generator that creates an array with a single integer with some nans."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
return add_nans(SingleIntegerGenerator.generate(num_rows).astype(float))
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 3e-06,
'memory': 200.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 500.0,
}
}
class SingleStringGenerator(CategoricalGenerator):
"""Generator that creates an array of a single string."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
constant = 'A'
return np.full(num_rows, constant)
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 4e-06,
'memory': 200.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 400.0,
}
}
class SingleStringNaNsGenerator(CategoricalGenerator):
"""Generator that creates an array of a single string with nans."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
return add_nans(SingleStringGenerator.generate(num_rows).astype('O'))
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 400.0
},
'transform': {
'time': 3e-06,
'memory': 200.0
},
'reverse_transform': {
'time': 1e-05,
'memory': 500.0,
}
}
class UniqueIntegerGenerator(CategoricalGenerator):
"""Generator that creates an array of unique integers."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
return np.arange(num_rows)
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 2e-05,
'memory': 2000.0
},
'transform': {
'time': 0.0002,
'memory': 500000.0
},
'reverse_transform': {
'time': 0.0003,
'memory': 1000000.0,
}
}
class UniqueIntegerNaNsGenerator(CategoricalGenerator):
"""Generator that creates an array of unique integers with nans."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
return add_nans(UniqueIntegerGenerator.generate(num_rows))
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 1e-05,
'memory': 1000.0
},
'transform': {
'time': 0.0002,
'memory': 1000000.0
},
'reverse_transform': {
'time': 0.0002,
'memory': 1000000.0,
}
}
class UniqueStringGenerator(CategoricalGenerator):
"""Generator that creates an array of unique strings."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
return np.arange(num_rows).astype(str)
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 2e-05,
'memory': 2000.0
},
'transform': {
'time': 0.0002,
'memory': 500000.0
},
'reverse_transform': {
'time': 0.0003,
'memory': 1000000.0,
}
}
class UniqueStringNaNsGenerator(CategoricalGenerator):
"""Generator that creates an array of unique strings with nans."""
@staticmethod
def generate(num_rows):
"""Generate a ``num_rows`` number of rows."""
return add_nans(UniqueStringGenerator.generate(num_rows).astype('O'))
@staticmethod
def get_performance_thresholds():
"""Return the expected threseholds."""
return {
'fit': {
'time': 2e-05,
'memory': 1000.0
},
'transform': {
'time': 0.0005,
'memory': 1000000.0
},
'reverse_transform': {
'time': 0.0002,
'memory': 1000000.0,
}
}
| 27.711584
| 80
| 0.506995
| 1,060
| 11,722
| 5.511321
| 0.123585
| 0.050325
| 0.034235
| 0.059911
| 0.790825
| 0.780555
| 0.77987
| 0.767545
| 0.767545
| 0.692229
| 0
| 0.050728
| 0.372718
| 11,722
| 422
| 81
| 27.777251
| 0.743778
| 0.175226
| 0
| 0.648208
| 0
| 0
| 0.09195
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091205
| false
| 0
| 0.016287
| 0
| 0.250814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d26d7c25347ab02b8cd3238f51c44a59fcee6de1
| 143
|
py
|
Python
|
src/gluonnlp/data/__init__.py
|
leezu/gluon-nlp
|
19de74c2b03f22dde8311a0225b4571c2deef0e4
|
[
"Apache-2.0"
] | 2,461
|
2018-04-25T03:47:22.000Z
|
2022-03-31T03:58:48.000Z
|
src/gluonnlp/data/__init__.py
|
leezu/gluon-nlp
|
19de74c2b03f22dde8311a0225b4571c2deef0e4
|
[
"Apache-2.0"
] | 1,450
|
2018-04-25T16:14:25.000Z
|
2022-02-24T21:02:57.000Z
|
src/gluonnlp/data/__init__.py
|
leezu/gluon-nlp
|
19de74c2b03f22dde8311a0225b4571c2deef0e4
|
[
"Apache-2.0"
] | 578
|
2018-04-25T04:55:18.000Z
|
2022-03-16T03:01:45.000Z
|
from . import vocab
from . import tokenizers
from . import batchify
from .vocab import *
__all__ = ['batchify', 'tokenizers'] + vocab.__all__
| 20.428571
| 52
| 0.734266
| 17
| 143
| 5.705882
| 0.352941
| 0.309278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160839
| 143
| 6
| 53
| 23.833333
| 0.808333
| 0
| 0
| 0
| 0
| 0
| 0.125874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d26fab08837e940dad581f70700c84474bb70e86
| 262
|
py
|
Python
|
sentiment_analysis_auto_ml/__init__.py
|
ArtificiAI/Sentiment-Analysis-AutoML
|
1984e078481f97c3483131113aa39b3e2c767ee6
|
[
"MIT"
] | 9
|
2019-04-08T02:10:46.000Z
|
2020-06-17T15:22:28.000Z
|
testing/__init__.py
|
guillaume-chevalier/Sentiment-Analysis-AutoML
|
d6cd54ca2344ff6f2b10dc229613b1076272a94e
|
[
"MIT"
] | null | null | null |
testing/__init__.py
|
guillaume-chevalier/Sentiment-Analysis-AutoML
|
d6cd54ca2344ff6f2b10dc229613b1076272a94e
|
[
"MIT"
] | 3
|
2019-04-08T02:09:12.000Z
|
2019-04-18T22:26:10.000Z
|
"""
This files originate from the "New-Empty-Python-Project-Base" template:
https://github.com/guillaume-chevalier/New-Empty-Python-Project-Base
Created by Guillaume Chevalier:
https://github.com/guillaume-chevalier
License: CC0-1.0 (Public Domain)
"""
| 29.111111
| 72
| 0.755725
| 36
| 262
| 5.5
| 0.666667
| 0.272727
| 0.141414
| 0.212121
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012821
| 0.10687
| 262
| 8
| 73
| 32.75
| 0.833333
| 0.961832
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
963b26b363f9fdd45053c17c4f5913ba8d118a15
| 163
|
py
|
Python
|
rso_manage/admin.py
|
maxweis/Web-App
|
f2df73bbc6da88d2d42713de9b72b0d1a799db6e
|
[
"Apache-2.0"
] | 2
|
2019-03-26T17:43:16.000Z
|
2019-04-01T01:18:16.000Z
|
rso_manage/admin.py
|
maxweis/Web-App
|
f2df73bbc6da88d2d42713de9b72b0d1a799db6e
|
[
"Apache-2.0"
] | 1
|
2019-03-29T11:33:56.000Z
|
2019-03-29T11:33:56.000Z
|
rso_manage/admin.py
|
maxweis/Resumania
|
f2df73bbc6da88d2d42713de9b72b0d1a799db6e
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import RSO, Registrations
admin.site.register(RSO)
admin.site.register(Registrations)
| 20.375
| 38
| 0.809816
| 22
| 163
| 6
| 0.545455
| 0.136364
| 0.257576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110429
| 163
| 7
| 39
| 23.285714
| 0.910345
| 0.159509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9645bcd60831b8f2010679036fe1878f103d0551
| 149
|
py
|
Python
|
tests/web_platform/css_flexbox_1/test_flexbox_flex_0_N.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_flex_0_N.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_flex_0_N.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestFlexbox_Flex0N(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_flex-0-N'))
| 24.833333
| 71
| 0.791946
| 19
| 149
| 5.842105
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.09396
| 149
| 5
| 72
| 29.8
| 0.785185
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9646723f353910d68d31d7da4dff55f7277832a8
| 129
|
py
|
Python
|
modules/__init__.py
|
ghost-luck/TernausNetV2
|
4df657ae731ae9b8e8fd43390af647f4cbc99baa
|
[
"BSD-3-Clause"
] | 142
|
2018-09-02T08:59:45.000Z
|
2022-03-30T17:08:24.000Z
|
DEEP LEARNING/segmentation/Kaggle TGS Salt Identification Challenge/v2/modules/__init__.py
|
jerinka/ML-DL-scripts
|
eeb5c3c7c5841eb4cdb272690e14d6718f3685b2
|
[
"Apache-2.0"
] | 4
|
2019-09-08T07:27:11.000Z
|
2021-10-19T05:50:24.000Z
|
DEEP LEARNING/segmentation/Kaggle TGS Salt Identification Challenge/v2/modules/__init__.py
|
jerinka/ML-DL-scripts
|
eeb5c3c7c5841eb4cdb272690e14d6718f3685b2
|
[
"Apache-2.0"
] | 75
|
2018-10-04T17:08:40.000Z
|
2022-03-08T18:50:52.000Z
|
from .bn import ABN, InPlaceABN, InPlaceABNWrapper
from .misc import GlobalAvgPool2d
from .residual import IdentityResidualBlock
| 32.25
| 50
| 0.852713
| 14
| 129
| 7.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008696
| 0.108527
| 129
| 3
| 51
| 43
| 0.947826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
73a31b8e3e3ac34830a993c1562879d9981002da
| 2,955
|
py
|
Python
|
src/Python/tests/test_glycolysis.py
|
PathwayAnalysisPlatform/ProteoformNetworks
|
3d31e5b3cb4abc45e6419fa982c08b3dc5c2624e
|
[
"Apache-2.0"
] | 1
|
2019-08-16T12:40:14.000Z
|
2019-08-16T12:40:14.000Z
|
src/Python/tests/test_glycolysis.py
|
LuisFranciscoHS/ProteoformNetworks
|
a6baa87fe6f76905f6d58a2f7cb66aad5d8d56c5
|
[
"Apache-2.0"
] | 9
|
2019-08-16T07:33:33.000Z
|
2022-03-04T22:20:02.000Z
|
src/Python/tests/test_glycolysis.py
|
LuisFranciscoHS/ProteoformNetworks
|
a6baa87fe6f76905f6d58a2f7cb66aad5d8d56c5
|
[
"Apache-2.0"
] | 1
|
2022-02-21T17:42:48.000Z
|
2022-02-21T17:42:48.000Z
|
import pytest
# Pathway "Regulation of glycolysis by fructose" R-HSA-9634600
#region Genes No small molecules
from config import genes
from lib.graph_database_access import get_participants_by_pathway
@pytest.fixture(scope="session")
def glycolysis_get_participants(tmpdir_factory):
return get_participants_by_pathway(genes, "R-HSA-9634600")
@pytest.fixture(scope="session")
def glycolysis_genes_no_sm(glycolysis_get_participants):
participants = get_participants_by_pathway(genes, "R-HSA-9634600")
components = get_components(genes)
return
# Test get_participants()
def test_genes_no_sm_get_participants():
participants = get_participants_by_pathway(genes, "R-HSA-9634600")
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
def test_connects_inputs_with_outputs(glycolysis_graph):
# Input to output interactions for reaction R-HSA-163773:
assert ("PFKFB1", "ADP") in glycolysis_graph.edges
assert ("ATP", "ADP") in glycolysis_graph.edges
assert ("ATP", "PFKFB1") in glycolysis_graph.edges
# Input to output interactions for reaction R-HSA-163750
assert ("PFKFB1", "Pi") in glycolysis_graph.edges
assert ("H2O", "Pi") in glycolysis_graph.edges
assert ("H2O", "PFKFB1") in glycolysis_graph.edges
# Input to output interaction for reaction R-HSA-71802
assert ("Fru(6)P", "D-Fructose 2,6-bisphosphate") in glycolysis_graph.edges
assert ("Fru(6)P", "ADP") in glycolysis_graph.edges
assert ("ATP", "D-Fructose 2,6-bisphosphate") in glycolysis_graph.edges
assert ("ATP", "ADP") in glycolysis_graph.edges
#endregion
#region Genes With small molecules
# Test get_participants()
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
#endregion
#region Genes With reaction-specific ids for small molecules
# Test get_participants()
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
#endregion
#region Proteins No small molecules
# Test get_participants()
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
#endregion
#region Proteins With small molecules
# Test get_participants()
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
#endregion
#region Proteins With reaction-specific ids for small molecules
# Test get_participants()
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
#endregion
#region Proteoforms No small molecules
# Test get_participants()
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
#endregion
#region Proteoforms With small molecules
# Test get_participants()
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
#endregion
#region Proteoforms With reaction-specific ids for small molecules
# Test get_participants()
# Test get_components()
# Test add_nodes()
# Test create_interaction_network()
#endregion
| 24.831933
| 79
| 0.76819
| 386
| 2,955
| 5.65285
| 0.183938
| 0.057745
| 0.07791
| 0.100825
| 0.796975
| 0.796975
| 0.762145
| 0.710816
| 0.624198
| 0.624198
| 0
| 0.022292
| 0.134687
| 2,955
| 119
| 80
| 24.831933
| 0.831052
| 0.532657
| 0
| 0.25
| 0
| 0
| 0.135299
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 1
| 0.166667
| false
| 0
| 0.125
| 0.041667
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
73bd9a927d4198d23ac87660e426cc27a817b4ac
| 663
|
py
|
Python
|
tests/test_version.py
|
jgru/dfxml_python
|
7f0a5c74051bf2fd80109e948e3adf3222d195ae
|
[
"CC0-1.0"
] | null | null | null |
tests/test_version.py
|
jgru/dfxml_python
|
7f0a5c74051bf2fd80109e948e3adf3222d195ae
|
[
"CC0-1.0"
] | null | null | null |
tests/test_version.py
|
jgru/dfxml_python
|
7f0a5c74051bf2fd80109e948e3adf3222d195ae
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. NIST assumes no
# responsibility whatsoever for its use by other parties, and makes
# no guarantees, expressed or implied, about its quality,
# reliability, or any other characteristic.
#
# We would appreciate acknowledgement if the software is used.
import dfxml
def test_version():
assert not dfxml.__version__ is None
| 36.833333
| 69
| 0.78733
| 100
| 663
| 5.17
| 0.74
| 0.046422
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010909
| 0.170437
| 663
| 17
| 70
| 39
| 0.929091
| 0.850679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
73f152c469ae402b8517375d90ade1c8e0bb3296
| 5,856
|
py
|
Python
|
harmonica/table.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
harmonica/table.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
harmonica/table.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
"""
Table module.
"""
import torch
from .decomposition import Decomposition
class Table():
"""
Returns
----------
Table class instance.
Parameters
----------
name: list
location names
nux: torch.Tensor
x frequency (fractional part)
nuy: torch.Tensor
y frequency (fractional part)
ax: torch.Tensor
x amplitude
ay: torch.Tensor
y amplitude
fx: torch.Tensor
x phase
fy: torch.Tensor
y phase
sigma_nux: torch.Tensor:
x frequency error
sigma_nuy: torch.Tensor:
y frequency error
sigma_ax: torch.Tensor:
x amplitude error
sigma_ay: torch.Tensor:
y amplitude error
sigma_fx: torch.Tensor:
x phase error
sigma_fy: torch.Tensor:
y phase error
dtype: torch.dtype
data type
device: torch.device
data device
Attributes
----------
name: list
location names
nux: torch.Tensor
x frequency (fractional part)
nuy: torch.Tensor
y frequency (fractional part)
ax: torch.Tensor
x amplitude
ay: torch.Tensor
y amplitude
fx: torch.Tensor
x phase
fy: torch.Tensor
y phase
sigma_nux: torch.Tensor:
x frequency error
sigma_nuy: torch.Tensor:
y frequency error
sigma_ax: torch.Tensor:
x amplitude error
sigma_ay: torch.Tensor:
y amplitude error
sigma_fx: torch.Tensor:
x phase error
sigma_fy: torch.Tensor:
y phase error
dtype: torch.dtype
data type
device: torch.device
data device
phase_x: torch.Tensor
x phase advance from each location to the next one
sigma_x: torch.Tensor
x phase advance error from each location to the next one
phase_y: torch.Tensor
y phase advance from each location to the next one
sigma_y: torch.Tensor
y phase advance error from each location to the next one
Methods
----------
__init__(self, name:list, nux:torch.Tensor, nuy:torch.Tensor, ax:torch.Tensor, ay:torch.Tensor, fx:torch.Tensor, fy:torch.Tensor, sigma_nux:torch.Tensor=None, sigma_nuy:torch.Tensor=None, sigma_ax:torch.Tensor=None, sigma_ay:torch.Tensor=None, sigma_fx:torch.Tensor=None, sigma_fy:torch.Tensor=None, *, dtype:torch.dtype=torch.float64, device:torch.device='cpu') -> None
Table instance initialization.
def __repr__(self) -> str
String representation.
"""
def __init__(self, name:list,
nux:torch.Tensor, nuy:torch.Tensor,
ax:torch.Tensor, ay:torch.Tensor, fx:torch.Tensor, fy:torch.Tensor,
sigma_nux:torch.Tensor=None, sigma_nuy:torch.Tensor=None,
sigma_ax:torch.Tensor=None, sigma_ay:torch.Tensor=None,
sigma_fx:torch.Tensor=None, sigma_fy:torch.Tensor=None, *,
dtype:torch.dtype=torch.float64, device:torch.device='cpu') -> None:
"""
Table instance initialization.
Parameters
----------
name: list
location names
nux: torch.Tensor
x frequency (fractional part)
nuy: torch.Tensor
y frequency (fractional part)
ax: torch.Tensor
x amplitude
ay: torch.Tensor
y amplitude
fx: torch.Tensor
x phase
fy: torch.Tensor
y phase
sigma_nux: torch.Tensor:
x frequency error
sigma_nuy: torch.Tensor:
y frequency error
sigma_ax: torch.Tensor:
x amplitude error
sigma_ay: torch.Tensor:
y amplitude error
sigma_fx: torch.Tensor:
x phase error
sigma_fy: torch.Tensor:
y phase error
dtype: torch.dtype
data type
device: torch.device
data device
Returns
-------
None
"""
self.name = name
self.size = len(name)
self.dtype, self.device = dtype, device
self.nux = nux.to(self.dtype).to(self.device)
self.nuy = nuy.to(self.dtype).to(self.device)
self.ax = ax.to(self.dtype).to(self.device)
self.ay = ay.to(self.dtype).to(self.device)
self.fx = fx.to(self.dtype).to(self.device)
self.fy = fy.to(self.dtype).to(self.device)
self.sigma_nux = torch.tensor(0.0, dtype=dtype, device=device) if sigma_nux is None else sigma_nux.to(self.dtype).to(self.device)
self.sigma_nuy = torch.tensor(0.0, dtype=dtype, device=device) if sigma_nuy is None else sigma_nuy.to(self.dtype).to(self.device)
zero = torch.zeros(self.size, dtype=self.dtype, device=self.device)
self.sigma_ax = torch.clone(zero) if sigma_ax is None else sigma_ax.to(self.dtype).to(self.device)
self.sigma_ay = torch.clone(zero) if sigma_ay is None else sigma_ay.to(self.dtype).to(self.device)
self.sigma_fx = torch.clone(zero) if sigma_fx is None else sigma_fx.to(self.dtype).to(self.device)
self.sigma_fy = torch.clone(zero) if sigma_fy is None else sigma_fy.to(self.dtype).to(self.device)
probe = torch.tensor(range(self.size), dtype=torch.int64, device=self.device)
other = probe + 1
self.phase_x, self.sigma_x = Decomposition.phase_advance(probe, other, self.nux, self.fx, error=True, sigma_frequency=self.sigma_nux, sigma_phase=self.sigma_fx, model=False)
self.phase_y, self.sigma_y = Decomposition.phase_advance(probe, other, self.nuy, self.fy, error=True, sigma_frequency=self.sigma_nuy, sigma_phase=self.sigma_fy, model=False)
def __repr__(self) -> str:
"""
String representation.
"""
return f'Table({self.size})'
def main():
pass
if __name__ == '__main__':
main()
| 31.315508
| 374
| 0.615437
| 784
| 5,856
| 4.482143
| 0.094388
| 0.209733
| 0.068298
| 0.044394
| 0.820717
| 0.796813
| 0.722254
| 0.698634
| 0.617245
| 0.617245
| 0
| 0.00262
| 0.283128
| 5,856
| 187
| 375
| 31.315508
| 0.834445
| 0.476776
| 0
| 0
| 0
| 0
| 0.012124
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.027778
| 0.055556
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fb4ca0fa5e7de3689bf253b8f20536913f92f7bf
| 2,677
|
py
|
Python
|
tests/test_day_06.py
|
masasin/advent_of_code_2015
|
032bcf780b59d261b3913673fd33001a98f2f69d
|
[
"MIT"
] | null | null | null |
tests/test_day_06.py
|
masasin/advent_of_code_2015
|
032bcf780b59d261b3913673fd33001a98f2f69d
|
[
"MIT"
] | null | null | null |
tests/test_day_06.py
|
masasin/advent_of_code_2015
|
032bcf780b59d261b3913673fd33001a98f2f69d
|
[
"MIT"
] | null | null | null |
import numpy as np
from day_06 import parse_instruction, follow_instruction, follow_elvish
def test_parse_instruction():
assert parse_instruction("turn on 0,0 through 999,999") == (
"turn on",
slice(0, 1000),
slice(0, 1000),
)
assert parse_instruction("toggle 0,0 through 999,0") == (
"toggle",
slice(0, 1000),
slice(0, 1),
)
assert parse_instruction("turn off 499,499 through 500,500") == (
"turn off",
slice(499, 501),
slice(499, 501),
)
def test_follow_instruction():
orig_array = np.zeros((1000, 1000))
true_array = np.ones((1000, 1000))
new_array = follow_instruction("turn on 0,0 through 999,999", orig_array)
assert new_array.all()
new_array = follow_instruction("turn on 0,0 through 999,999", true_array)
assert new_array.all()
orig_array = np.zeros((1000, 1000))
true_array = np.ones((1000, 1000))
new_array = follow_instruction("turn off 499,499 through 500,500",
orig_array)
assert not new_array[499:500, 499:500].any()
new_array = follow_instruction("turn off 499,499 through 500,500",
true_array)
assert not new_array[499:500, 499:500].any()
orig_array = np.zeros((1000, 1000))
true_array = np.ones((1000, 1000))
new_array = follow_instruction("toggle 499,499 through 500,500", orig_array)
assert new_array[499:500, 499:500].all()
new_array = follow_instruction("toggle 499,499 through 500,500", true_array)
assert not new_array[499:500, 499:500].any()
orig_array = np.zeros((1000, 1000))
true_array = np.ones((1000, 1000))
new_array = follow_instruction("toggle 0,0 through 999,0", orig_array)
assert new_array[:, 0].all()
new_array = follow_instruction("toggle 0,0 through 999,0", true_array)
assert not new_array[:, 0].any()
def test_follow_elvish():
orig_array = np.zeros((1000, 1000))
new_array = follow_elvish("turn off 499,499 through 500,500", orig_array)
assert new_array.sum() == 0
orig_array = np.zeros((1000, 1000))
new_array = follow_elvish("turn on 499,499 through 500,500", orig_array)
assert new_array.sum() == 4
orig_array = np.zeros((1000, 1000))
new_array = follow_elvish("toggle 499,499 through 500,500", orig_array)
assert new_array.sum() == 8
orig_array = np.zeros((1000, 1000))
new_array = follow_elvish("turn on 0,0 through 0,0", orig_array)
assert new_array.sum() == 1
orig_array = np.zeros((1000, 1000))
new_array = follow_elvish("toggle 0,0 through 999,999", orig_array)
assert new_array.sum() == 2000000
| 37.180556
| 80
| 0.651476
| 398
| 2,677
| 4.18593
| 0.100503
| 0.12485
| 0.109244
| 0.086435
| 0.855342
| 0.815726
| 0.757503
| 0.739496
| 0.698079
| 0.698079
| 0
| 0.16555
| 0.219275
| 2,677
| 71
| 81
| 37.704225
| 0.631579
| 0
| 0
| 0.409836
| 0
| 0
| 0.176317
| 0
| 0
| 0
| 0
| 0
| 0.262295
| 1
| 0.04918
| false
| 0
| 0.032787
| 0
| 0.081967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fb4fda35d84ea57a88d7b6b84fcaf6473e4fbbf1
| 72
|
py
|
Python
|
gluoncv/auto/estimators/yolo/__init__.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 5,447
|
2018-04-25T18:02:51.000Z
|
2022-03-31T00:59:49.000Z
|
gluoncv/auto/estimators/yolo/__init__.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 1,566
|
2018-04-25T21:14:04.000Z
|
2022-03-31T06:42:42.000Z
|
gluoncv/auto/estimators/yolo/__init__.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 1,345
|
2018-04-25T18:44:13.000Z
|
2022-03-30T19:32:53.000Z
|
"""YOLO Estimator implementations"""
from .yolo import YOLOv3Estimator
| 18
| 36
| 0.791667
| 7
| 72
| 8.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.111111
| 72
| 3
| 37
| 24
| 0.875
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
837de84c0ae704fe59a7af41676a02cf9f86f076
| 153
|
py
|
Python
|
setup.py
|
ines/spacymoji
|
38d49e3785d9f8df5d33bc50eeb8d28a941887fe
|
[
"MIT"
] | 158
|
2017-10-12T22:02:59.000Z
|
2021-04-04T12:43:12.000Z
|
setup.py
|
ines/spacymoji
|
38d49e3785d9f8df5d33bc50eeb8d28a941887fe
|
[
"MIT"
] | 11
|
2018-07-07T01:49:43.000Z
|
2021-04-18T02:31:04.000Z
|
setup.py
|
ines/spacymoji
|
38d49e3785d9f8df5d33bc50eeb8d28a941887fe
|
[
"MIT"
] | 16
|
2018-03-15T22:05:21.000Z
|
2021-04-01T19:49:12.000Z
|
#!/usr/bin/env python
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name="spacymoji", packages=find_packages())
| 21.857143
| 53
| 0.718954
| 19
| 153
| 5.263158
| 0.736842
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150327
| 153
| 6
| 54
| 25.5
| 0.769231
| 0.130719
| 0
| 0
| 0
| 0
| 0.128788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
839569e05b8e3d1f30956908aaa6061f73341bf8
| 341
|
py
|
Python
|
Lib/site-packages/dev-0.4.0/dev/skeleton/abstract.py
|
Srinath-tr/Goferbot
|
0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | 1
|
2019-04-23T21:50:08.000Z
|
2019-04-23T21:50:08.000Z
|
Lib/site-packages/dev-0.4.0/dev/skeleton/abstract.py
|
Srinath-tr/Goferbot
|
0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/dev-0.4.0/dev/skeleton/abstract.py
|
Srinath-tr/Goferbot
|
0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | 2
|
2019-02-14T08:13:33.000Z
|
2019-04-23T21:47:48.000Z
|
"""
Put abstract base classes here.
If the number of ABCs is great, replace with a package module whose
sub-module names acts as categories for the contained ABCs.
Point being, avoid loading everything at once. Class objects do *not* have a tiny memory
footprint, so it is a good idea to only load them when they are needed.
"""
import abc
| 34.1
| 88
| 0.771261
| 60
| 341
| 4.383333
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 341
| 9
| 89
| 37.888889
| 0.942652
| 0.941349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
83964edc0e919d70bd5cc8cf03fd35becdd6493c
| 136
|
py
|
Python
|
selection/randomized/api.py
|
Madhav1812/selective-inference
|
3bb105c3adf8ae8e4a8e51889015c76f46c73f23
|
[
"BSD-3-Clause"
] | 1
|
2021-01-17T14:47:54.000Z
|
2021-01-17T14:47:54.000Z
|
selection/randomized/api.py
|
dankessler/selective-inference
|
7b8f232fdf19b43489d4f434c493cdd80ab8fc96
|
[
"BSD-3-Clause"
] | null | null | null |
selection/randomized/api.py
|
dankessler/selective-inference
|
7b8f232fdf19b43489d4f434c493cdd80ab8fc96
|
[
"BSD-3-Clause"
] | 1
|
2019-08-05T14:31:24.000Z
|
2019-08-05T14:31:24.000Z
|
from .query import multiple_queries, query
from .randomization import randomization
from .lasso import lasso
from .slope import slope
| 19.428571
| 42
| 0.823529
| 18
| 136
| 6.166667
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139706
| 136
| 6
| 43
| 22.666667
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
83a431654b3f70899ed6895a6bdb5c24416b9f52
| 27,223
|
py
|
Python
|
vmware_nsxlib/tests/unit/v3/policy/test_transaction.py
|
salv-orlando/vmware-nsxlib
|
283eff2881b99c57b3908d03fb1c91da7dbdf46e
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsxlib/tests/unit/v3/policy/test_transaction.py
|
salv-orlando/vmware-nsxlib
|
283eff2881b99c57b3908d03fb1c91da7dbdf46e
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsxlib/tests/unit/v3/policy/test_transaction.py
|
salv-orlando/vmware-nsxlib
|
283eff2881b99c57b3908d03fb1c91da7dbdf46e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from unittest import mock
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
from vmware_nsxlib.tests.unit.v3.policy import policy_testcase
from vmware_nsxlib.v3 import policy
from vmware_nsxlib.v3.policy import constants
from vmware_nsxlib.v3.policy import transaction as trans
class TestPolicyTransaction(policy_testcase.TestPolicyApi):
def setUp(self):
super(TestPolicyTransaction, self).setUp()
nsxlib_config = nsxlib_testcase.get_default_nsxlib_config()
# Mock the nsx-lib for the passthrough api
with mock.patch('vmware_nsxlib.v3.NsxLib.get_version',
return_value='2.5.0'):
self.policy_lib = policy.NsxPolicyLib(nsxlib_config)
self.policy_api = self.policy_lib.policy_api
self.policy_api.client = self.client
def assert_infra_patch_call(self, body):
self.assert_json_call('PATCH', self.client, 'infra',
data=body, headers=mock.ANY)
def test_domains_only(self):
tags = [{'scope': 'color', 'tag': 'green'}]
d1 = {'resource_type': 'Domain', 'id': 'domain1',
'display_name': 'd1', 'description': 'first domain',
'tags': tags}
d2 = {'resource_type': 'Domain', 'id': 'domain2',
'display_name': 'd2', 'description': 'no tags',
'tags': None}
with trans.NsxPolicyTransaction():
for d in (d1, d2):
self.policy_lib.domain.create_or_overwrite(
d['display_name'],
d['id'],
d['description'],
tags=d['tags'] if 'tags' in d else None)
expected_body = {'resource_type': 'Infra',
'children': [{'resource_type': 'ChildDomain',
'Domain': d1},
{'resource_type': 'ChildDomain',
'Domain': d2}]}
self.assert_infra_patch_call(expected_body)
def test_domains_and_groups(self):
tags = [{'scope': 'color', 'tag': 'green'}]
g1 = {'resource_type': 'Group', 'id': 'group1',
'display_name': 'g1',
'description': 'first group',
'tags': None, 'expression': []}
g2 = {'resource_type': 'Group', 'id': 'group2',
'description': 'second group',
'display_name': 'g2',
'tags': tags, 'expression': []}
g3 = {'resource_type': 'Group', 'id': 'group3',
'display_name': 'g3',
'description': 'third group',
'tags': None, 'expression': []}
d1 = {'resource_type': 'Domain', 'id': 'domain1',
'display_name': 'd1', 'description': 'first domain',
'tags': tags}
d2 = {'resource_type': 'Domain', 'id': 'domain2',
'display_name': 'd2', 'description': 'no tags',
'tags': None}
with trans.NsxPolicyTransaction():
for d in (d1, d2):
self.policy_lib.domain.create_or_overwrite(
d['display_name'],
d['id'],
d['description'],
tags=d['tags'] if 'tags' in d else None)
d['children'] = []
for g in (g1, g2, g3):
self.policy_lib.group.create_or_overwrite(
g['display_name'],
d['id'],
g['id'],
g['description'],
tags=g['tags'] if 'tags' in g else None)
d['children'].append({'resource_type': 'ChildGroup',
'Group': g})
expected_body = {'resource_type': 'Infra',
'children': [{'resource_type': 'ChildDomain',
'Domain': d1},
{'resource_type': 'ChildDomain',
'Domain': d2}]}
self.assert_infra_patch_call(expected_body)
def test_ip_address_pool_and_block_subnets(self):
pool = {'id': 'pool1',
'resource_type': 'IpAddressPool',
'display_name': 'pool1',
'children': []}
ip_block_id = 'block1'
subnet1 = {'id': 'subnet1',
'resource_type': 'IpAddressPoolBlockSubnet',
'ip_block_path': '/infra/ip-blocks/%s' % ip_block_id,
'size': 8}
subnet2 = {'id': 'subnet2',
'resource_type': 'IpAddressPoolBlockSubnet',
'ip_block_path': '/infra/ip-blocks/%s' % ip_block_id,
'size': 4}
with trans.NsxPolicyTransaction():
self.policy_lib.ip_pool.create_or_overwrite(
pool['display_name'],
ip_pool_id=pool['id'])
for s in (subnet1, subnet2):
self.policy_lib.ip_pool.allocate_block_subnet(
ip_pool_id=pool['id'],
ip_block_id=ip_block_id,
ip_subnet_id=s['id'],
size=s['size'])
pool['children'].append(
{'resource_type': 'ChildIpAddressPoolSubnet',
'IpAddressPoolSubnet': s})
expected_body = {'resource_type': 'Infra',
'children': [{'resource_type': 'ChildIpAddressPool',
'IpAddressPool': pool}]}
self.assert_infra_patch_call(expected_body)
def test_ip_address_pool_delete(self):
pool = {'id': 'pool1',
'resource_type': 'IpAddressPool'}
with trans.NsxPolicyTransaction():
self.policy_lib.ip_pool.delete(ip_pool_id=pool['id'])
expected_body = {'resource_type': 'Infra',
'children': [{'resource_type': 'ChildIpAddressPool',
'IpAddressPool': pool,
'marked_for_delete': True}]}
self.assert_infra_patch_call(expected_body)
def test_groups_only(self):
g1 = {'resource_type': 'Group', 'id': 'group1',
'display_name': 'g1',
'description': 'first group', 'expression': []}
g2 = {'resource_type': 'Group', 'id': 'group2',
'description': 'second group',
'display_name': 'g2', 'expression': []}
d1 = {'resource_type': 'Domain', 'id': 'domain1'}
d2 = {'resource_type': 'Domain', 'id': 'domain2'}
with trans.NsxPolicyTransaction():
for d in (d1, d2):
d['children'] = []
for g in (g1, g2):
self.policy_lib.group.create_or_overwrite(
g['display_name'],
d['id'],
g['id'],
g['description'])
d['children'].append({'resource_type': 'ChildGroup',
'Group': g})
expected_body = {'resource_type': 'Infra',
'children': [{'resource_type': 'ChildDomain',
'Domain': d1},
{'resource_type': 'ChildDomain',
'Domain': d2}]}
self.assert_infra_patch_call(expected_body)
def test_segment_ports(self):
port1 = {'id': 'port_on_seg1',
'resource_type': 'SegmentPort',
'display_name': 'port_on_seg1',
'attachment': {'type': 'VIF',
'app_id': 'app1',
'traffic_tag': 5}
}
port2 = {'id': 'port1_on_seg2',
'resource_type': 'SegmentPort',
'display_name': 'port_on_seg2',
'attachment': {'type': 'CHILD',
'app_id': 'app2',
'traffic_tag': None}
}
seg1 = {'id': 'seg1',
'resource_type': 'Segment',
'children': [{'resource_type': 'ChildSegmentPort',
'SegmentPort': port1}]}
seg2 = {'id': 'seg2',
'resource_type': 'Segment',
'children': [{'resource_type': 'ChildSegmentPort',
'SegmentPort': port2}]}
with trans.NsxPolicyTransaction():
self.policy_lib.segment_port.create_or_overwrite(
port1['display_name'],
seg1['id'],
port1['id'],
attachment_type=port1['attachment']['type'],
app_id=port1['attachment']['app_id'],
traffic_tag=port1['attachment']['traffic_tag'])
self.policy_lib.segment_port.create_or_overwrite(
port2['display_name'],
seg2['id'],
port2['id'],
attachment_type=port2['attachment']['type'],
app_id=port2['attachment']['app_id'],
traffic_tag=port2['attachment']['traffic_tag'])
expected_body = {'resource_type': 'Infra',
'children': [{'resource_type': 'ChildSegment',
'Segment': seg1},
{'resource_type': 'ChildSegment',
'Segment': seg2}]}
self.assert_infra_patch_call(expected_body)
def test_tier1_nat_rules_create(self):
tier1_id = 'tier1-1'
nat_rule_id1 = 'nat1'
nat_rule_id2 = 'nat2'
nat_rule1 = {"action": constants.NAT_ACTION_SNAT,
"display_name": "snat rule",
"id": nat_rule_id1,
"resource_type": "PolicyNatRule",
"firewall_match": constants.NAT_FIREWALL_MATCH_BYPASS}
nat_rule2 = {"action": constants.NAT_ACTION_DNAT,
"display_name": "dnat rule",
"id": nat_rule_id2,
"resource_type": "PolicyNatRule",
"firewall_match": constants.NAT_FIREWALL_MATCH_BYPASS}
policy_nat = {"id": "USER",
"resource_type": "PolicyNat",
"children": [
{"PolicyNatRule": nat_rule1,
"resource_type": "ChildPolicyNatRule"},
{"PolicyNatRule": nat_rule2,
"resource_type": "ChildPolicyNatRule"}]}
tier1_dict = {"id": tier1_id,
"resource_type": "Tier1",
"children": [{"PolicyNat": policy_nat,
"resource_type": "ChildPolicyNat"}]}
with trans.NsxPolicyTransaction():
self.policy_lib.tier1_nat_rule.create_or_overwrite(
'snat rule',
tier1_id,
nat_rule_id=nat_rule_id1,
action=constants.NAT_ACTION_SNAT)
self.policy_lib.tier1_nat_rule.create_or_overwrite(
'dnat rule',
tier1_id,
nat_rule_id=nat_rule_id2,
action=constants.NAT_ACTION_DNAT)
expected_body = {"resource_type": "Infra",
"children": [{"Tier1": tier1_dict,
"resource_type": "ChildTier1"}]}
self.assert_infra_patch_call(expected_body)
def test_tier1_nat_rules_delete(self):
tier1_id = 'tier1-1'
nat_rule_id1 = 'nat1'
nat_rule_id2 = 'nat2'
nat_rule1 = {"action": constants.NAT_ACTION_DNAT,
"id": nat_rule_id1,
"resource_type": "PolicyNatRule"}
nat_rule2 = {"action": constants.NAT_ACTION_DNAT,
"id": nat_rule_id2,
"resource_type": "PolicyNatRule"}
policy_nat = {"id": "USER",
"resource_type": "PolicyNat",
"children": [
{"PolicyNatRule": nat_rule1,
"marked_for_delete": True,
"resource_type": "ChildPolicyNatRule"},
{"PolicyNatRule": nat_rule2,
"marked_for_delete": True,
"resource_type": "ChildPolicyNatRule"}]}
tier1_dict = {"id": tier1_id,
"resource_type": "Tier1",
"children": [{"PolicyNat": policy_nat,
"resource_type": "ChildPolicyNat"}]}
with trans.NsxPolicyTransaction():
self.policy_lib.tier1_nat_rule.delete(
tier1_id,
nat_rule_id=nat_rule_id1)
self.policy_lib.tier1_nat_rule.delete(
tier1_id,
nat_rule_id=nat_rule_id2)
expected_body = {"resource_type": "Infra",
"children": [{"Tier1": tier1_dict,
"resource_type": "ChildTier1"}]}
self.assert_infra_patch_call(expected_body)
def test_creating_security_policy_and_dfw_rules(self):
dfw_rule = {'id': 'rule_id1', 'action': 'ALLOW',
'display_name': 'rule1', 'description': None,
'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6',
'logged': False, 'destination_groups': ['destination_url'],
'source_groups': ['src_url'], 'resource_type': 'Rule',
'scope': None, 'sequence_number': None, 'tag': None,
'services': ['ANY']}
security_policy = {'id': 'security_policy_id1',
'display_name': 'security_policy',
'category': 'Application',
'resource_type': 'SecurityPolicy'}
domain = {'resource_type': 'Domain', 'id': 'domain1'}
domain_id = domain['id']
map_id = security_policy['id']
dfw_rule_entries = [self.policy_lib.comm_map.build_entry(
name=dfw_rule['display_name'],
domain_id=domain_id,
map_id=map_id,
entry_id=dfw_rule['id'],
source_groups=dfw_rule['source_groups'],
dest_groups=dfw_rule['destination_groups']
)]
with trans.NsxPolicyTransaction():
self.policy_lib.comm_map.create_with_entries(
name=security_policy['display_name'],
domain_id=domain_id,
map_id=map_id,
entries=dfw_rule_entries
)
def get_group_path(group_id, domain_id):
return '/infra/domains/' + domain_id + '/groups/' + group_id
dfw_rule['destination_groups'] = [get_group_path(group_id, domain_id)
for group_id in
dfw_rule['destination_groups']]
dfw_rule['source_groups'] = [get_group_path(group_id, domain_id) for
group_id in dfw_rule['source_groups']]
child_rules = [{'resource_type': 'ChildRule', 'Rule': dfw_rule}]
security_policy.update({'children': child_rules})
child_security_policies = [{
'resource_type': 'ChildSecurityPolicy',
'SecurityPolicy': security_policy
}]
domain.update({'children': child_security_policies})
child_domains = [{'resource_type': 'ChildDomain',
'Domain': domain}]
expected_body = {'resource_type': 'Infra',
'children': child_domains}
self.assert_infra_patch_call(expected_body)
@mock.patch('vmware_nsxlib.v3.policy.core_defs.NsxPolicyApi.get')
def _test_updating_security_policy_and_dfw_rules(
self, use_child_rules, mock_get_api):
dfw_rule1 = {'id': 'rule_id1', 'action': 'ALLOW',
'display_name': 'rule1', 'description': None,
'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6',
'logged': False,
'destination_groups': ['destination_url'],
'source_groups': ['src_url'], 'resource_type': 'Rule',
'scope': None, 'sequence_number': None, 'tag': None,
'services': ['ANY'], "_create_time": 1}
dfw_rule2 = {'id': 'rule_id2', 'action': 'DROP',
'display_name': 'rule2', 'description': None,
'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6',
'logged': False,
'destination_groups': ['destination_url'],
'source_groups': ['src_url'], 'resource_type': 'Rule',
'scope': None, 'sequence_number': None, 'tag': None,
'services': ['ANY'], "_create_time": 1}
security_policy = {'id': 'security_policy_id1',
'display_name': 'security_policy',
'category': 'Application',
'resource_type': 'SecurityPolicy'}
domain = {'resource_type': 'Domain', 'id': 'domain1'}
domain_id = domain['id']
map_id = security_policy['id']
new_rule_name = 'new_rule1'
new_direction = 'IN'
dfw_rule_entries = [self.policy_lib.comm_map.build_entry(
name=new_rule_name,
domain_id=domain_id,
map_id=map_id,
entry_id=dfw_rule1['id'],
source_groups=dfw_rule1['source_groups'],
dest_groups=dfw_rule1['destination_groups'],
direction=new_direction
)]
def get_group_path(group_id, domain_id):
return '/infra/domains/' + domain_id + '/groups/' + group_id
for dfw_rule in [dfw_rule1, dfw_rule2]:
dfw_rule['destination_groups'] = [get_group_path(group_id,
domain_id)
for group_id in
dfw_rule['destination_groups']]
dfw_rule['source_groups'] = [get_group_path(group_id, domain_id)
for group_id in
dfw_rule['source_groups']]
security_policy_values = copy.deepcopy(security_policy)
security_policy_values.update({'rules':
copy.deepcopy([dfw_rule1, dfw_rule2])})
mock_get_api.return_value = security_policy_values
with trans.NsxPolicyTransaction():
self.policy_lib.comm_map.update_with_entries(
name=security_policy['display_name'],
domain_id=domain_id,
map_id=map_id,
entries=dfw_rule_entries,
use_child_rules=use_child_rules
)
dfw_rule1['display_name'] = new_rule_name
dfw_rule1['direction'] = new_direction
if use_child_rules:
child_rules = [{'resource_type': 'ChildRule', 'Rule': dfw_rule1},
{'resource_type': 'ChildRule', 'Rule': dfw_rule2,
'marked_for_delete': True}]
security_policy.update({'children': child_rules})
else:
security_policy['rules'] = copy.deepcopy([dfw_rule1, dfw_rule2])
child_security_policies = [{
'resource_type': 'ChildSecurityPolicy',
'SecurityPolicy': security_policy
}]
domain.update({'children': child_security_policies})
child_domains = [{
'resource_type': 'ChildDomain',
'Domain': domain
}]
expected_body = {'resource_type': 'Infra',
'children': child_domains}
self.assert_infra_patch_call(expected_body)
def test_updating_security_policy_and_dfw_rules(self):
return self._test_updating_security_policy_and_dfw_rules(True)
def test_updating_security_policy_and_dfw_rules_no_child_rules(self):
return self._test_updating_security_policy_and_dfw_rules(False)
@mock.patch('vmware_nsxlib.v3.policy.core_defs.NsxPolicyApi.get')
def test_updating_security_policy_patch_rules(self, mock_get_api):
dfw_rule1 = {'id': 'rule_id1', 'action': 'ALLOW',
'display_name': 'rule1', 'description': None,
'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6',
'logged': False,
'destination_groups': ['destination_url'],
'source_groups': ['src_url'], 'resource_type': 'Rule',
'scope': None, 'sequence_number': None, 'tag': None,
'services': ['ANY']}
dfw_rule2 = {'id': 'rule_id2', 'action': 'DROP',
'display_name': 'rule2', 'description': None,
'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6',
'logged': False,
'destination_groups': ['destination_url'],
'source_groups': ['src_url'], 'resource_type': 'Rule',
'scope': None, 'sequence_number': None, 'tag': None,
'services': ['ANY']}
security_policy = {'id': 'security_policy_id1',
'display_name': 'security_policy',
'category': 'Application',
'resource_type': 'SecurityPolicy'}
domain = {'resource_type': 'Domain', 'id': 'domain1'}
domain_id = domain['id']
map_id = security_policy['id']
dfw_rule_entries = [self.policy_lib.comm_map.build_entry(
name=rule['display_name'],
domain_id=domain_id,
map_id=map_id,
entry_id=rule['id'],
source_groups=rule['source_groups'],
dest_groups=rule['destination_groups'],
ip_protocol=rule['ip_protocol'],
action=rule['action'],
direction=rule['direction']
) for rule in [dfw_rule1, dfw_rule2]]
def get_group_path(group_id, domain_id):
return '/infra/domains/' + domain_id + '/groups/' + group_id
for dfw_rule in [dfw_rule1, dfw_rule2]:
dfw_rule['destination_groups'] = [get_group_path(group_id,
domain_id)
for group_id in
dfw_rule['destination_groups']]
dfw_rule['source_groups'] = [get_group_path(group_id, domain_id)
for group_id in
dfw_rule['source_groups']]
security_policy_values = copy.deepcopy(security_policy)
security_policy_values.update({'rules':
copy.deepcopy([dfw_rule1, dfw_rule2])})
mock_get_api.return_value = security_policy_values
with trans.NsxPolicyTransaction():
self.policy_lib.comm_map.patch_entries(
domain_id=domain_id,
map_id=map_id,
entries=dfw_rule_entries,
)
child_security_policies = [{
'resource_type': 'ChildResourceReference',
'target_type': 'SecurityPolicy',
'id': security_policy['id'],
}]
child_rules = [{'resource_type': 'ChildRule', 'Rule': dfw_rule1},
{'resource_type': 'ChildRule', 'Rule': dfw_rule2}]
child_security_policies[0].update({'children': child_rules})
domain.update({'children': child_security_policies})
child_domains = [{
'resource_type': 'ChildDomain',
'Domain': domain
}]
expected_body = {'resource_type': 'Infra',
'children': child_domains}
self.assert_infra_patch_call(expected_body)
@mock.patch('vmware_nsxlib.v3.policy.core_defs.NsxPolicyApi.get')
def test_updating_security_policy_with_no_entries_set(self, mock_get_api):
dfw_rule1 = {'id': 'rule_id1', 'action': 'ALLOW',
'display_name': 'rule1', 'description': None,
'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6',
'logged': False,
'destination_groups': ['destination_url'],
'source_groups': ['src_url'], 'resource_type': 'Rule',
'scope': None, 'sequence_number': None, 'tag': None,
'services': ['ANY'], "_create_time": 1}
security_policy = {'id': 'security_policy_id1',
'display_name': 'security_policy',
'category': 'Application',
'resource_type': 'SecurityPolicy'}
domain = {'resource_type': 'Domain', 'id': 'domain1'}
domain_id = domain['id']
map_id = security_policy['id']
def get_group_path(group_id, domain_id):
return '/infra/domains/' + domain_id + '/groups/' + group_id
for dfw_rule in [dfw_rule1]:
dfw_rule['destination_groups'] = [get_group_path(group_id,
domain_id)
for group_id in
dfw_rule['destination_groups']]
dfw_rule['source_groups'] = [get_group_path(group_id, domain_id)
for group_id in
dfw_rule['source_groups']]
security_policy.update({'rules': [dfw_rule1]})
mock_get_api.return_value = security_policy
with trans.NsxPolicyTransaction():
self.policy_lib.comm_map.update_with_entries(
name=security_policy['display_name'],
domain_id=domain_id,
map_id=map_id
)
child_security_policies = [{
'resource_type': 'ChildSecurityPolicy',
'SecurityPolicy': security_policy
}]
domain.update({'children': child_security_policies})
child_domains = [{
'resource_type': 'ChildDomain',
'Domain': domain
}]
expected_body = {'resource_type': 'Infra',
'children': child_domains}
self.assert_infra_patch_call(expected_body)
| 43.006319
| 79
| 0.512545
| 2,550
| 27,223
| 5.131373
| 0.101961
| 0.08162
| 0.017577
| 0.01987
| 0.809247
| 0.779901
| 0.747574
| 0.705923
| 0.65984
| 0.648223
| 0
| 0.013199
| 0.368255
| 27,223
| 632
| 80
| 43.074367
| 0.747645
| 0.023436
| 0
| 0.671154
| 0
| 0
| 0.232121
| 0.010501
| 0
| 0
| 0
| 0
| 0.026923
| 1
| 0.038462
| false
| 0.003846
| 0.013462
| 0.011538
| 0.065385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.