text stringlengths 81 112k |
|---|
use least squares to fit all default curves parameter seperately
Returns
-------
None
def fit_theta(self):
"""use least squares to fit all default curves parameter seperately
Returns
-------
None
"""
x = range(1, self.point_num + 1)
y = self.trial_history
for i in range(NUM_OF_FUNCTIONS):
model = curve_combination_models[i]
try:
# The maximum number of iterations to fit is 100*(N+1), where N is the number of elements in `x0`.
if model_para_num[model] == 2:
a, b = optimize.curve_fit(all_models[model], x, y)[0]
model_para[model][0] = a
model_para[model][1] = b
elif model_para_num[model] == 3:
a, b, c = optimize.curve_fit(all_models[model], x, y)[0]
model_para[model][0] = a
model_para[model][1] = b
model_para[model][2] = c
elif model_para_num[model] == 4:
a, b, c, d = optimize.curve_fit(all_models[model], x, y)[0]
model_para[model][0] = a
model_para[model][1] = b
model_para[model][2] = c
model_para[model][3] = d
except (RuntimeError, FloatingPointError, OverflowError, ZeroDivisionError):
# Ignore exceptions caused by numerical calculations
pass
except Exception as exception:
logger.critical("Exceptions in fit_theta:", exception) |
filter the poor performing curve
Returns
-------
None
def filter_curve(self):
"""filter the poor performing curve
Returns
-------
None
"""
avg = np.sum(self.trial_history) / self.point_num
standard = avg * avg * self.point_num
predict_data = []
tmp_model = []
for i in range(NUM_OF_FUNCTIONS):
var = 0
model = curve_combination_models[i]
for j in range(1, self.point_num + 1):
y = self.predict_y(model, j)
var += (y - self.trial_history[j - 1]) * (y - self.trial_history[j - 1])
if var < standard:
predict_data.append(y)
tmp_model.append(curve_combination_models[i])
median = np.median(predict_data)
std = np.std(predict_data)
for model in tmp_model:
y = self.predict_y(model, self.target_pos)
epsilon = self.point_num / 10 * std
if y < median + epsilon and y > median - epsilon:
self.effective_model.append(model)
self.effective_model_num = len(self.effective_model)
logger.info('List of effective model: ', self.effective_model) |
return the predict y of 'model' when epoch = pos
Parameters
----------
model: string
name of the curve function model
pos: int
the epoch number of the position you want to predict
Returns
-------
int:
The expected matrix at pos
def predict_y(self, model, pos):
"""return the predict y of 'model' when epoch = pos
Parameters
----------
model: string
name of the curve function model
pos: int
the epoch number of the position you want to predict
Returns
-------
int:
The expected matrix at pos
"""
if model_para_num[model] == 2:
y = all_models[model](pos, model_para[model][0], model_para[model][1])
elif model_para_num[model] == 3:
y = all_models[model](pos, model_para[model][0], model_para[model][1], model_para[model][2])
elif model_para_num[model] == 4:
y = all_models[model](pos, model_para[model][0], model_para[model][1], model_para[model][2], model_para[model][3])
return y |
return the value of the f_comb when epoch = pos
Parameters
----------
pos: int
the epoch number of the position you want to predict
sample: list
sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
Returns
-------
int
The expected matrix at pos with all the active function's prediction
def f_comb(self, pos, sample):
"""return the value of the f_comb when epoch = pos
Parameters
----------
pos: int
the epoch number of the position you want to predict
sample: list
sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
Returns
-------
int
The expected matrix at pos with all the active function's prediction
"""
ret = 0
for i in range(self.effective_model_num):
model = self.effective_model[i]
y = self.predict_y(model, pos)
ret += sample[i] * y
return ret |
normalize weight
Parameters
----------
samples: list
a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
Returns
-------
list
samples after normalize weight
def normalize_weight(self, samples):
"""normalize weight
Parameters
----------
samples: list
a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
Returns
-------
list
samples after normalize weight
"""
for i in range(NUM_OF_INSTANCE):
total = 0
for j in range(self.effective_model_num):
total += samples[i][j]
for j in range(self.effective_model_num):
samples[i][j] /= total
return samples |
returns the value of sigma square, given the weight's sample
Parameters
----------
sample: list
sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
Returns
-------
float
the value of sigma square, given the weight's sample
def sigma_sq(self, sample):
"""returns the value of sigma square, given the weight's sample
Parameters
----------
sample: list
sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
Returns
-------
float
the value of sigma square, given the weight's sample
"""
ret = 0
for i in range(1, self.point_num + 1):
temp = self.trial_history[i - 1] - self.f_comb(i, sample)
ret += temp * temp
return 1.0 * ret / self.point_num |
returns the value of normal distribution, given the weight's sample and target position
Parameters
----------
pos: int
the epoch number of the position you want to predict
sample: list
sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
Returns
-------
float
the value of normal distribution
def normal_distribution(self, pos, sample):
"""returns the value of normal distribution, given the weight's sample and target position
Parameters
----------
pos: int
the epoch number of the position you want to predict
sample: list
sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
Returns
-------
float
the value of normal distribution
"""
curr_sigma_sq = self.sigma_sq(sample)
delta = self.trial_history[pos - 1] - self.f_comb(pos, sample)
return np.exp(np.square(delta) / (-2.0 * curr_sigma_sq)) / np.sqrt(2 * np.pi * np.sqrt(curr_sigma_sq)) |
likelihood
Parameters
----------
sample: list
sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
Returns
-------
float
likelihood
def likelihood(self, samples):
"""likelihood
Parameters
----------
sample: list
sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
Returns
-------
float
likelihood
"""
ret = np.ones(NUM_OF_INSTANCE)
for i in range(NUM_OF_INSTANCE):
for j in range(1, self.point_num + 1):
ret[i] *= self.normal_distribution(j, samples[i])
return ret |
priori distribution
Parameters
----------
samples: list
a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
Returns
-------
float
priori distribution
def prior(self, samples):
"""priori distribution
Parameters
----------
samples: list
a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
Returns
-------
float
priori distribution
"""
ret = np.ones(NUM_OF_INSTANCE)
for i in range(NUM_OF_INSTANCE):
for j in range(self.effective_model_num):
if not samples[i][j] > 0:
ret[i] = 0
if self.f_comb(1, samples[i]) >= self.f_comb(self.target_pos, samples[i]):
ret[i] = 0
return ret |
posterior probability
Parameters
----------
samples: list
a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
Returns
-------
float
posterior probability
def target_distribution(self, samples):
"""posterior probability
Parameters
----------
samples: list
a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
Returns
-------
float
posterior probability
"""
curr_likelihood = self.likelihood(samples)
curr_prior = self.prior(samples)
ret = np.ones(NUM_OF_INSTANCE)
for i in range(NUM_OF_INSTANCE):
ret[i] = curr_likelihood[i] * curr_prior[i]
return ret |
Adjust the weight of each function using mcmc sampling.
The initial value of each weight is evenly distribute.
Brief introduction:
(1)Definition of sample:
Sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
(2)Definition of samples:
Samples is a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
(3)Definition of model:
Model is the function we chose right now. Such as: 'wap', 'weibull'.
(4)Definition of pos:
Pos is the position we want to predict, corresponds to the value of epoch.
Returns
-------
None
def mcmc_sampling(self):
"""Adjust the weight of each function using mcmc sampling.
The initial value of each weight is evenly distribute.
Brief introduction:
(1)Definition of sample:
Sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
(2)Definition of samples:
Samples is a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
(3)Definition of model:
Model is the function we chose right now. Such as: 'wap', 'weibull'.
(4)Definition of pos:
Pos is the position we want to predict, corresponds to the value of epoch.
Returns
-------
None
"""
init_weight = np.ones((self.effective_model_num), dtype=np.float) / self.effective_model_num
self.weight_samples = np.broadcast_to(init_weight, (NUM_OF_INSTANCE, self.effective_model_num))
for i in range(NUM_OF_SIMULATION_TIME):
# sample new value from Q(i, j)
new_values = np.random.randn(NUM_OF_INSTANCE, self.effective_model_num) * STEP_SIZE + self.weight_samples
new_values = self.normalize_weight(new_values)
# compute alpha(i, j) = min{1, P(j)Q(j, i)/P(i)Q(i, j)}
alpha = np.minimum(1, self.target_distribution(new_values) / self.target_distribution(self.weight_samples))
# sample u
u = np.random.rand(NUM_OF_INSTANCE)
# new value
change_value_flag = (u < alpha).astype(np.int)
for j in range(NUM_OF_INSTANCE):
new_values[j] = self.weight_samples[j] * (1 - change_value_flag[j]) + new_values[j] * change_value_flag[j]
self.weight_samples = new_values |
predict the value of target position
Parameters
----------
trial_history: list
The history performance matrix of each trial.
Returns
-------
float
expected final result performance of this hyperparameter config
def predict(self, trial_history):
"""predict the value of target position
Parameters
----------
trial_history: list
The history performance matrix of each trial.
Returns
-------
float
expected final result performance of this hyperparameter config
"""
self.trial_history = trial_history
self.point_num = len(trial_history)
self.fit_theta()
self.filter_curve()
if self.effective_model_num < LEAST_FITTED_FUNCTION:
# different curve's predictions are too scattered, requires more information
return None
self.mcmc_sampling()
ret = 0
for i in range(NUM_OF_INSTANCE):
ret += self.f_comb(self.target_pos, self.weight_samples[i])
return ret / NUM_OF_INSTANCE |
Detect the outlier
def _outlierDetection_threaded(inputs):
'''
Detect the outlier
'''
[samples_idx, samples_x, samples_y_aggregation] = inputs
sys.stderr.write("[%s] DEBUG: Evaluating %dth of %d samples\n"\
% (os.path.basename(__file__), samples_idx + 1, len(samples_x)))
outlier = None
# Create a diagnostic regression model which removes the sample that we want to evaluate
diagnostic_regressor_gp = gp_create_model.create_model(\
samples_x[0:samples_idx] + samples_x[samples_idx + 1:],\
samples_y_aggregation[0:samples_idx] + samples_y_aggregation[samples_idx + 1:])
mu, sigma = gp_prediction.predict(samples_x[samples_idx], diagnostic_regressor_gp['model'])
# 2.33 is the z-score for 98% confidence level
if abs(samples_y_aggregation[samples_idx] - mu) > (2.33 * sigma):
outlier = {"samples_idx": samples_idx,
"expected_mu": mu,
"expected_sigma": sigma,
"difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)}
return outlier |
Use Multi-thread to detect the outlier
def outlierDetection_threaded(samples_x, samples_y_aggregation):
'''
Use Multi-thread to detect the outlier
'''
outliers = []
threads_inputs = [[samples_idx, samples_x, samples_y_aggregation]\
for samples_idx in range(0, len(samples_x))]
threads_pool = ThreadPool(min(4, len(threads_inputs)))
threads_results = threads_pool.map(_outlierDetection_threaded, threads_inputs)
threads_pool.close()
threads_pool.join()
for threads_result in threads_results:
if threads_result is not None:
outliers.append(threads_result)
else:
print("error here.")
outliers = None if len(outliers) == 0 else outliers
return outliers |
deeper conv layer.
def deeper_conv_block(conv_layer, kernel_size, weighted=True):
'''deeper conv layer.
'''
n_dim = get_n_dim(conv_layer)
filter_shape = (kernel_size,) * 2
n_filters = conv_layer.filters
weight = np.zeros((n_filters, n_filters) + filter_shape)
center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
for i in range(n_filters):
filter_weight = np.zeros((n_filters,) + filter_shape)
index = (i,) + center
filter_weight[index] = 1
weight[i, ...] = filter_weight
bias = np.zeros(n_filters)
new_conv_layer = get_conv_class(n_dim)(
conv_layer.filters, n_filters, kernel_size=kernel_size
)
bn = get_batch_norm_class(n_dim)(n_filters)
if weighted:
new_conv_layer.set_weights(
(add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1])))
)
new_weights = [
add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
]
bn.set_weights(new_weights)
return [StubReLU(), new_conv_layer, bn] |
deeper dense layer.
def dense_to_deeper_block(dense_layer, weighted=True):
'''deeper dense layer.
'''
units = dense_layer.units
weight = np.eye(units)
bias = np.zeros(units)
new_dense_layer = StubDense(units, units)
if weighted:
new_dense_layer.set_weights(
(add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1])))
)
return [StubReLU(), new_dense_layer] |
wider previous dense layer.
def wider_pre_dense(layer, n_add, weighted=True):
'''wider previous dense layer.
'''
if not weighted:
return StubDense(layer.input_units, layer.units + n_add)
n_units2 = layer.units
teacher_w, teacher_b = layer.get_weights()
rand = np.random.randint(n_units2, size=n_add)
student_w = teacher_w.copy()
student_b = teacher_b.copy()
# target layer update (i)
for i in range(n_add):
teacher_index = rand[i]
new_weight = teacher_w[teacher_index, :]
new_weight = new_weight[np.newaxis, :]
student_w = np.concatenate((student_w, add_noise(new_weight, student_w)), axis=0)
student_b = np.append(student_b, add_noise(teacher_b[teacher_index], student_b))
new_pre_layer = StubDense(layer.input_units, n_units2 + n_add)
new_pre_layer.set_weights((student_w, student_b))
return new_pre_layer |
wider previous conv layer.
def wider_pre_conv(layer, n_add_filters, weighted=True):
'''wider previous conv layer.
'''
n_dim = get_n_dim(layer)
if not weighted:
return get_conv_class(n_dim)(
layer.input_channel,
layer.filters + n_add_filters,
kernel_size=layer.kernel_size,
)
n_pre_filters = layer.filters
rand = np.random.randint(n_pre_filters, size=n_add_filters)
teacher_w, teacher_b = layer.get_weights()
student_w = teacher_w.copy()
student_b = teacher_b.copy()
# target layer update (i)
for i in range(len(rand)):
teacher_index = rand[i]
new_weight = teacher_w[teacher_index, ...]
new_weight = new_weight[np.newaxis, ...]
student_w = np.concatenate((student_w, new_weight), axis=0)
student_b = np.append(student_b, teacher_b[teacher_index])
new_pre_layer = get_conv_class(n_dim)(
layer.input_channel, n_pre_filters + n_add_filters, layer.kernel_size
)
new_pre_layer.set_weights(
(add_noise(student_w, teacher_w), add_noise(student_b, teacher_b))
)
return new_pre_layer |
wider next conv layer.
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
'''wider next conv layer.
'''
n_dim = get_n_dim(layer)
if not weighted:
return get_conv_class(n_dim)(layer.input_channel + n_add,
layer.filters,
kernel_size=layer.kernel_size,
stride=layer.stride)
n_filters = layer.filters
teacher_w, teacher_b = layer.get_weights()
new_weight_shape = list(teacher_w.shape)
new_weight_shape[1] = n_add
new_weight = np.zeros(tuple(new_weight_shape))
student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
add_noise(new_weight, teacher_w),
teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
n_filters,
kernel_size=layer.kernel_size,
stride=layer.stride)
new_layer.set_weights((student_w, teacher_b))
return new_layer |
wider batch norm layer.
def wider_bn(layer, start_dim, total_dim, n_add, weighted=True):
'''wider batch norm layer.
'''
n_dim = get_n_dim(layer)
if not weighted:
return get_batch_norm_class(n_dim)(layer.num_features + n_add)
weights = layer.get_weights()
new_weights = [
add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])),
add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])),
add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])),
add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])),
]
student_w = tuple()
for weight, new_weight in zip(weights, new_weights):
temp_w = weight.copy()
temp_w = np.concatenate(
(temp_w[:start_dim], new_weight, temp_w[start_dim:total_dim])
)
student_w += (temp_w,)
new_layer = get_batch_norm_class(n_dim)(layer.num_features + n_add)
new_layer.set_weights(student_w)
return new_layer |
wider next dense layer.
def wider_next_dense(layer, start_dim, total_dim, n_add, weighted=True):
'''wider next dense layer.
'''
if not weighted:
return StubDense(layer.input_units + n_add, layer.units)
teacher_w, teacher_b = layer.get_weights()
student_w = teacher_w.copy()
n_units_each_channel = int(teacher_w.shape[1] / total_dim)
new_weight = np.zeros((teacher_w.shape[0], n_add * n_units_each_channel))
student_w = np.concatenate(
(
student_w[:, : start_dim * n_units_each_channel],
add_noise(new_weight, student_w),
student_w[
:, start_dim * n_units_each_channel : total_dim * n_units_each_channel
],
),
axis=1,
)
new_layer = StubDense(layer.input_units + n_add, layer.units)
new_layer.set_weights((student_w, teacher_b))
return new_layer |
add noise to the layer.
def add_noise(weights, other_weights):
'''add noise to the layer.
'''
w_range = np.ptp(other_weights.flatten())
noise_range = NOISE_RATIO * w_range
noise = np.random.uniform(-noise_range / 2.0, noise_range / 2.0, weights.shape)
return np.add(noise, weights) |
initilize dense layer weight.
def init_dense_weight(layer):
'''initilize dense layer weight.
'''
units = layer.units
weight = np.eye(units)
bias = np.zeros(units)
layer.set_weights(
(add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1])))
) |
initilize conv layer weight.
def init_conv_weight(layer):
'''initilize conv layer weight.
'''
n_filters = layer.filters
filter_shape = (layer.kernel_size,) * get_n_dim(layer)
weight = np.zeros((n_filters, n_filters) + filter_shape)
center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
for i in range(n_filters):
filter_weight = np.zeros((n_filters,) + filter_shape)
index = (i,) + center
filter_weight[index] = 1
weight[i, ...] = filter_weight
bias = np.zeros(n_filters)
layer.set_weights(
(add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1])))
) |
initilize batch norm layer weight.
def init_bn_weight(layer):
'''initilize batch norm layer weight.
'''
n_filters = layer.num_features
new_weights = [
add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
]
layer.set_weights(new_weights) |
parse log path
def parse_log_path(args, trial_content):
'''parse log path'''
path_list = []
host_list = []
for trial in trial_content:
if args.trial_id and args.trial_id != 'all' and trial.get('id') != args.trial_id:
continue
pattern = r'(?P<head>.+)://(?P<host>.+):(?P<path>.*)'
match = re.search(pattern,trial['logPath'])
if match:
path_list.append(match.group('path'))
host_list.append(match.group('host'))
if not path_list:
print_error('Trial id %s error!' % args.trial_id)
exit(1)
return path_list, host_list |
use ssh client to copy data from remote machine to local machien
def copy_data_from_remote(args, nni_config, trial_content, path_list, host_list, temp_nni_path):
'''use ssh client to copy data from remote machine to local machien'''
machine_list = nni_config.get_config('experimentConfig').get('machineList')
machine_dict = {}
local_path_list = []
for machine in machine_list:
machine_dict[machine['ip']] = {'port': machine['port'], 'passwd': machine['passwd'], 'username': machine['username']}
for index, host in enumerate(host_list):
local_path = os.path.join(temp_nni_path, trial_content[index].get('id'))
local_path_list.append(local_path)
print_normal('Copying log data from %s to %s' % (host + ':' + path_list[index], local_path))
sftp = create_ssh_sftp_client(host, machine_dict[host]['port'], machine_dict[host]['username'], machine_dict[host]['passwd'])
copy_remote_directory_to_local(sftp, path_list[index], local_path)
print_normal('Copy done!')
return local_path_list |
get path list according to different platform
def get_path_list(args, nni_config, trial_content, temp_nni_path):
'''get path list according to different platform'''
path_list, host_list = parse_log_path(args, trial_content)
platform = nni_config.get_config('experimentConfig').get('trainingServicePlatform')
if platform == 'local':
print_normal('Log path: %s' % ' '.join(path_list))
return path_list
elif platform == 'remote':
path_list = copy_data_from_remote(args, nni_config, trial_content, path_list, host_list, temp_nni_path)
print_normal('Log path: %s' % ' '.join(path_list))
return path_list
else:
print_error('Not supported platform!')
exit(1) |
call cmds to start tensorboard process in local machine
def start_tensorboard_process(args, nni_config, path_list, temp_nni_path):
'''call cmds to start tensorboard process in local machine'''
if detect_port(args.port):
print_error('Port %s is used by another process, please reset port!' % str(args.port))
exit(1)
stdout_file = open(os.path.join(temp_nni_path, 'tensorboard_stdout'), 'a+')
stderr_file = open(os.path.join(temp_nni_path, 'tensorboard_stderr'), 'a+')
cmds = ['tensorboard', '--logdir', format_tensorboard_log_path(path_list), '--port', str(args.port)]
tensorboard_process = Popen(cmds, stdout=stdout_file, stderr=stderr_file)
url_list = get_local_urls(args.port)
print_normal(COLOR_GREEN_FORMAT % 'Start tensorboard success!\n' + 'Tensorboard urls: ' + ' '.join(url_list))
tensorboard_process_pid_list = nni_config.get_config('tensorboardPidList')
if tensorboard_process_pid_list is None:
tensorboard_process_pid_list = [tensorboard_process.pid]
else:
tensorboard_process_pid_list.append(tensorboard_process.pid)
nni_config.set_config('tensorboardPidList', tensorboard_process_pid_list) |
stop tensorboard
def stop_tensorboard(args):
'''stop tensorboard'''
experiment_id = check_experiment_id(args)
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
config_file_name = experiment_dict[experiment_id]['fileName']
nni_config = Config(config_file_name)
tensorboard_pid_list = nni_config.get_config('tensorboardPidList')
if tensorboard_pid_list:
for tensorboard_pid in tensorboard_pid_list:
try:
cmds = ['kill', '-9', str(tensorboard_pid)]
call(cmds)
except Exception as exception:
print_error(exception)
nni_config.set_config('tensorboardPidList', [])
print_normal('Stop tensorboard success!')
else:
print_error('No tensorboard configuration!') |
start tensorboard
def start_tensorboard(args):
'''start tensorboard'''
experiment_id = check_experiment_id(args)
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
config_file_name = experiment_dict[experiment_id]['fileName']
nni_config = Config(config_file_name)
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
running, response = check_rest_server_quick(rest_port)
trial_content = None
if running:
response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT)
if response and check_response(response):
trial_content = json.loads(response.text)
else:
print_error('List trial failed...')
else:
print_error('Restful server is not running...')
if not trial_content:
print_error('No trial information!')
exit(1)
if len(trial_content) > 1 and not args.trial_id:
print_error('There are multiple trials, please set trial id!')
exit(1)
experiment_id = nni_config.get_config('experimentId')
temp_nni_path = os.path.join(tempfile.gettempdir(), 'nni', experiment_id)
os.makedirs(temp_nni_path, exist_ok=True)
path_list = get_path_list(args, nni_config, trial_content, temp_nni_path)
start_tensorboard_process(args, nni_config, path_list, temp_nni_path) |
The ratio is smaller the better
def _ratio_scores(parameters_value, clusteringmodel_gmm_good, clusteringmodel_gmm_bad):
'''
The ratio is smaller the better
'''
ratio = clusteringmodel_gmm_good.score([parameters_value]) / clusteringmodel_gmm_bad.score([parameters_value])
sigma = 0
return ratio, sigma |
Call selection
def selection_r(x_bounds,
x_types,
clusteringmodel_gmm_good,
clusteringmodel_gmm_bad,
num_starting_points=100,
minimize_constraints_fun=None):
'''
Call selection
'''
minimize_starting_points = [lib_data.rand(x_bounds, x_types)\
for i in range(0, num_starting_points)]
outputs = selection(x_bounds, x_types,
clusteringmodel_gmm_good,
clusteringmodel_gmm_bad,
minimize_starting_points,
minimize_constraints_fun)
return outputs |
Select the lowest mu value
def selection(x_bounds,
x_types,
clusteringmodel_gmm_good,
clusteringmodel_gmm_bad,
minimize_starting_points,
minimize_constraints_fun=None):
'''
Select the lowest mu value
'''
results = lib_acquisition_function.next_hyperparameter_lowest_mu(\
_ratio_scores, [clusteringmodel_gmm_good, clusteringmodel_gmm_bad],\
x_bounds, x_types, minimize_starting_points, \
minimize_constraints_fun=minimize_constraints_fun)
return results |
Minimize constraints fun summation
def _minimize_constraints_fun_summation(x):
'''
Minimize constraints fun summation
'''
summation = sum([x[i] for i in CONSTRAINT_PARAMS_IDX])
return CONSTRAINT_UPPERBOUND >= summation >= CONSTRAINT_LOWERBOUND |
Load dataset, use 20newsgroups dataset
def load_data():
'''Load dataset, use 20newsgroups dataset'''
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, random_state=99, test_size=0.25)
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
return X_train, X_test, y_train, y_test |
Get model according to parameters
def get_model(PARAMS):
'''Get model according to parameters'''
model = SVC()
model.C = PARAMS.get('C')
model.keral = PARAMS.get('keral')
model.degree = PARAMS.get('degree')
model.gamma = PARAMS.get('gamma')
model.coef0 = PARAMS.get('coef0')
return model |
generate num hyperparameter configurations from search space using Bayesian optimization
Parameters
----------
num: int
the number of hyperparameter configurations
Returns
-------
list
a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...]
def get_hyperparameter_configurations(self, num, r, config_generator):
"""generate num hyperparameter configurations from search space using Bayesian optimization
Parameters
----------
num: int
the number of hyperparameter configurations
Returns
-------
list
a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...]
"""
global _KEY
assert self.i == 0
hyperparameter_configs = dict()
for _ in range(num):
params_id = create_bracket_parameter_id(self.s, self.i)
params = config_generator.get_config(r)
params[_KEY] = r
hyperparameter_configs[params_id] = params
self._record_hyper_configs(hyperparameter_configs)
return [[key, value] for key, value in hyperparameter_configs.items()] |
Initialize Tuner, including creating Bayesian optimization-based parametric models
and search space formations
Parameters
----------
data: search space
search space of this experiment
Raises
------
ValueError
Error: Search space is None
def handle_initialize(self, data):
"""Initialize Tuner, including creating Bayesian optimization-based parametric models
and search space formations
Parameters
----------
data: search space
search space of this experiment
Raises
------
ValueError
Error: Search space is None
"""
logger.info('start to handle_initialize')
# convert search space jason to ConfigSpace
self.handle_update_search_space(data)
# generate BOHB config_generator using Bayesian optimization
if self.search_space:
self.cg = CG_BOHB(configspace=self.search_space,
min_points_in_model=self.min_points_in_model,
top_n_percent=self.top_n_percent,
num_samples=self.num_samples,
random_fraction=self.random_fraction,
bandwidth_factor=self.bandwidth_factor,
min_bandwidth=self.min_bandwidth)
else:
raise ValueError('Error: Search space is None')
# generate first brackets
self.generate_new_bracket()
send(CommandType.Initialized, '') |
generate a new bracket
def generate_new_bracket(self):
"""generate a new bracket"""
logger.debug(
'start to create a new SuccessiveHalving iteration, self.curr_s=%d', self.curr_s)
if self.curr_s < 0:
logger.info("s < 0, Finish this round of Hyperband in BOHB. Generate new round")
self.curr_s = self.s_max
self.brackets[self.curr_s] = Bracket(s=self.curr_s, s_max=self.s_max, eta=self.eta,
max_budget=self.max_budget, optimize_mode=self.optimize_mode)
next_n, next_r = self.brackets[self.curr_s].get_n_r()
logger.debug(
'new SuccessiveHalving iteration, next_n=%d, next_r=%d', next_n, next_r)
# rewrite with TPE
generated_hyper_configs = self.brackets[self.curr_s].get_hyperparameter_configurations(
next_n, next_r, self.cg)
self.generated_hyper_configs = generated_hyper_configs.copy() |
recerive the number of request and generate trials
Parameters
----------
data: int
number of trial jobs that nni manager ask to generate
def handle_request_trial_jobs(self, data):
"""recerive the number of request and generate trials
Parameters
----------
data: int
number of trial jobs that nni manager ask to generate
"""
# Receive new request
self.credit += data
for _ in range(self.credit):
self._request_one_trial_job() |
get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
def _request_one_trial_job(self):
"""get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
"""
if not self.generated_hyper_configs:
ret = {
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
send(CommandType.NoMoreTrialJobs, json_tricks.dumps(ret))
return
assert self.generated_hyper_configs
params = self.generated_hyper_configs.pop()
ret = {
'parameter_id': params[0],
'parameter_source': 'algorithm',
'parameters': params[1]
}
self.parameters[params[0]] = params[1]
send(CommandType.NewTrialJob, json_tricks.dumps(ret))
self.credit -= 1 |
change json format to ConfigSpace format dict<dict> -> configspace
Parameters
----------
data: JSON object
search space of this experiment
def handle_update_search_space(self, data):
"""change json format to ConfigSpace format dict<dict> -> configspace
Parameters
----------
data: JSON object
search space of this experiment
"""
search_space = data
cs = CS.ConfigurationSpace()
for var in search_space:
_type = str(search_space[var]["_type"])
if _type == 'choice':
cs.add_hyperparameter(CSH.CategoricalHyperparameter(
var, choices=search_space[var]["_value"]))
elif _type == 'randint':
cs.add_hyperparameter(CSH.UniformIntegerHyperparameter(
var, lower=0, upper=search_space[var]["_value"][0]))
elif _type == 'uniform':
cs.add_hyperparameter(CSH.UniformFloatHyperparameter(
var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1]))
elif _type == 'quniform':
cs.add_hyperparameter(CSH.UniformFloatHyperparameter(
var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1],
q=search_space[var]["_value"][2]))
elif _type == 'loguniform':
cs.add_hyperparameter(CSH.UniformFloatHyperparameter(
var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1],
log=True))
elif _type == 'qloguniform':
cs.add_hyperparameter(CSH.UniformFloatHyperparameter(
var, lower=search_space[var]["_value"][0], upper=search_space[var]["_value"][1],
q=search_space[var]["_value"][2], log=True))
elif _type == 'normal':
cs.add_hyperparameter(CSH.NormalFloatHyperparameter(
var, mu=search_space[var]["_value"][1], sigma=search_space[var]["_value"][2]))
elif _type == 'qnormal':
cs.add_hyperparameter(CSH.NormalFloatHyperparameter(
var, mu=search_space[var]["_value"][1], sigma=search_space[var]["_value"][2],
q=search_space[var]["_value"][3]))
elif _type == 'lognormal':
cs.add_hyperparameter(CSH.NormalFloatHyperparameter(
var, mu=search_space[var]["_value"][1], sigma=search_space[var]["_value"][2],
log=True))
elif _type == 'qlognormal':
cs.add_hyperparameter(CSH.NormalFloatHyperparameter(
var, mu=search_space[var]["_value"][1], sigma=search_space[var]["_value"][2],
q=search_space[var]["_value"][3], log=True))
else:
raise ValueError(
'unrecognized type in search_space, type is {}'.format(_type))
self.search_space = cs |
receive the information of trial end and generate next configuaration.
Parameters
----------
data: dict()
it has three keys: trial_job_id, event, hyper_params
trial_job_id: the id generated by training service
event: the job's state
hyper_params: the hyperparameters (a string) generated and returned by tuner
def handle_trial_end(self, data):
"""receive the information of trial end and generate next configuaration.
Parameters
----------
data: dict()
it has three keys: trial_job_id, event, hyper_params
trial_job_id: the id generated by training service
event: the job's state
hyper_params: the hyperparameters (a string) generated and returned by tuner
"""
logger.debug('Tuner handle trial end, result is %s', data)
hyper_params = json_tricks.loads(data['hyper_params'])
s, i, _ = hyper_params['parameter_id'].split('_')
hyper_configs = self.brackets[int(s)].inform_trial_end(int(i))
if hyper_configs is not None:
logger.debug(
'bracket %s next round %s, hyper_configs: %s', s, i, hyper_configs)
self.generated_hyper_configs = self.generated_hyper_configs + hyper_configs
for _ in range(self.credit):
self._request_one_trial_job()
# Finish this bracket and generate a new bracket
elif self.brackets[int(s)].no_more_trial:
self.curr_s -= 1
self.generate_new_bracket()
for _ in range(self.credit):
self._request_one_trial_job() |
reveice the metric data and update Bayesian optimization with final result
Parameters
----------
data:
it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'.
Raises
------
ValueError
Data type not supported
def handle_report_metric_data(self, data):
"""reveice the metric data and update Bayesian optimization with final result
Parameters
----------
data:
it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'.
Raises
------
ValueError
Data type not supported
"""
logger.debug('handle report metric data = %s', data)
assert 'value' in data
value = extract_scalar_reward(data['value'])
if self.optimize_mode is OptimizeMode.Maximize:
reward = -value
else:
reward = value
assert 'parameter_id' in data
s, i, _ = data['parameter_id'].split('_')
logger.debug('bracket id = %s, metrics value = %s, type = %s', s, value, data['type'])
s = int(s)
assert 'type' in data
if data['type'] == 'FINAL':
# and PERIODICAL metric are independent, thus, not comparable.
assert 'sequence' in data
self.brackets[s].set_config_perf(
int(i), data['parameter_id'], sys.maxsize, value)
self.completed_hyper_configs.append(data)
_parameters = self.parameters[data['parameter_id']]
_parameters.pop(_KEY)
# update BO with loss, max_s budget, hyperparameters
self.cg.new_result(loss=reward, budget=data['sequence'], parameters=_parameters, update_model=True)
elif data['type'] == 'PERIODICAL':
self.brackets[s].set_config_perf(
int(i), data['parameter_id'], data['sequence'], value)
else:
raise ValueError(
'Data type not supported: {}'.format(data['type'])) |
Import additional data for tuning
Parameters
----------
data:
a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
Raises
------
AssertionError
data doesn't have required key 'parameter' and 'value'
def handle_import_data(self, data):
"""Import additional data for tuning
Parameters
----------
data:
a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
Raises
------
AssertionError
data doesn't have required key 'parameter' and 'value'
"""
_completed_num = 0
for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data)))
_completed_num += 1
assert "parameter" in trial_info
_params = trial_info["parameter"]
assert "value" in trial_info
_value = trial_info['value']
if not _value:
logger.info("Useless trial data, value is %s, skip this trial data." %_value)
continue
budget_exist_flag = False
barely_params = dict()
for keys in _params:
if keys == _KEY:
_budget = _params[keys]
budget_exist_flag = True
else:
barely_params[keys] = _params[keys]
if not budget_exist_flag:
_budget = self.max_budget
logger.info("Set \"TRIAL_BUDGET\" value to %s (max budget)" %self.max_budget)
if self.optimize_mode is OptimizeMode.Maximize:
reward = -_value
else:
reward = _value
self.cg.new_result(loss=reward, budget=_budget, parameters=barely_params, update_model=True)
logger.info("Successfully import tuning data to BOHB advisor.") |
data_transforms for cifar10 dataset
def data_transforms_cifar10(args):
""" data_transforms for cifar10 dataset
"""
cifar_mean = [0.49139968, 0.48215827, 0.44653124]
cifar_std = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cifar_mean, cifar_std),
]
)
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(cifar_mean, cifar_std)]
)
return train_transform, valid_transform |
data_transforms for mnist dataset
def data_transforms_mnist(args, mnist_mean=None, mnist_std=None):
""" data_transforms for mnist dataset
"""
if mnist_mean is None:
mnist_mean = [0.5]
if mnist_std is None:
mnist_std = [0.5]
train_transform = transforms.Compose(
[
transforms.RandomCrop(28, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mnist_mean, mnist_std),
]
)
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std)]
)
return train_transform, valid_transform |
Compute the mean and std value of dataset.
def get_mean_and_std(dataset):
"""Compute the mean and std value of dataset."""
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=True, num_workers=2
)
mean = torch.zeros(3)
std = torch.zeros(3)
print("==> Computing mean and std..")
for inputs, _ in dataloader:
for i in range(3):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std |
Init layer parameters.
def init_params(net):
"""Init layer parameters."""
for module in net.modules():
if isinstance(module, nn.Conv2d):
init.kaiming_normal(module.weight, mode="fan_out")
if module.bias:
init.constant(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
init.constant(module.weight, 1)
init.constant(module.bias, 0)
elif isinstance(module, nn.Linear):
init.normal(module.weight, std=1e-3)
if module.bias:
init.constant(module.bias, 0) |
EarlyStopping step on each epoch
Arguments:
metrics {float} -- metric value
def step(self, metrics):
""" EarlyStopping step on each epoch
Arguments:
metrics {float} -- metric value
"""
if self.best is None:
self.best = metrics
return False
if np.isnan(metrics):
return True
if self.is_better(metrics, self.best):
self.num_bad_epochs = 0
self.best = metrics
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
return True
return False |
This can have false positives.
For examples, parameters can only be 0 or 5, and the summation constraint is between 6 and 7.
def check_feasibility(x_bounds, lowerbound, upperbound):
'''
This can have false positives.
For examples, parameters can only be 0 or 5, and the summation constraint is between 6 and 7.
'''
# x_bounds should be sorted, so even for "discrete_int" type,
# the smallest and the largest number should the first and the last element
x_bounds_lowerbound = sum([x_bound[0] for x_bound in x_bounds])
x_bounds_upperbound = sum([x_bound[-1] for x_bound in x_bounds])
# return ((x_bounds_lowerbound <= lowerbound) and (x_bounds_upperbound >= lowerbound)) or \
# ((x_bounds_lowerbound <= upperbound) and (x_bounds_upperbound >= upperbound))
return (x_bounds_lowerbound <= lowerbound <= x_bounds_upperbound) or \
(x_bounds_lowerbound <= upperbound <= x_bounds_upperbound) |
Key idea is that we try to move towards upperbound, by randomly choose one
value for each parameter. However, for the last parameter,
we need to make sure that its value can help us get above lowerbound
def rand(x_bounds, x_types, lowerbound, upperbound, max_retries=100):
'''
Key idea is that we try to move towards upperbound, by randomly choose one
value for each parameter. However, for the last parameter,
we need to make sure that its value can help us get above lowerbound
'''
outputs = None
if check_feasibility(x_bounds, lowerbound, upperbound) is True:
# Order parameters by their range size. We want the smallest range first,
# because the corresponding parameter has less numbers to choose from
x_idx_sorted = []
for i, _ in enumerate(x_bounds):
if x_types[i] == "discrete_int":
x_idx_sorted.append([i, len(x_bounds[i])])
elif (x_types[i] == "range_int") or (x_types[i] == "range_continuous"):
x_idx_sorted.append([i, math.floor(x_bounds[i][1] - x_bounds[i][0])])
x_idx_sorted = sorted(x_idx_sorted, key=itemgetter(1))
for _ in range(max_retries):
budget_allocated = 0
outputs = [None] * len(x_bounds)
for i, _ in enumerate(x_idx_sorted):
x_idx = x_idx_sorted[i][0]
# The amount of unallocated space that we have
budget_max = upperbound - budget_allocated
# NOT the Last x that we need to assign a random number
if i < (len(x_idx_sorted) - 1):
if x_bounds[x_idx][0] <= budget_max:
if x_types[x_idx] == "discrete_int":
# Note the valid integer
temp = []
for j in x_bounds[x_idx]:
if j <= budget_max:
temp.append(j)
# Randomly pick a number from the integer array
if temp:
outputs[x_idx] = temp[random.randint(0, len(temp) - 1)]
elif (x_types[x_idx] == "range_int") or \
(x_types[x_idx] == "range_continuous"):
outputs[x_idx] = random.randint(x_bounds[x_idx][0],
min(x_bounds[x_idx][-1], budget_max))
else:
# The last x that we need to assign a random number
randint_lowerbound = lowerbound - budget_allocated
randint_lowerbound = 0 if randint_lowerbound < 0 else randint_lowerbound
# This check:
# is our smallest possible value going to overflow the available budget space,
# and is our largest possible value going to underflow the lower bound
if (x_bounds[x_idx][0] <= budget_max) and \
(x_bounds[x_idx][-1] >= randint_lowerbound):
if x_types[x_idx] == "discrete_int":
temp = []
for j in x_bounds[x_idx]:
# if (j <= budget_max) and (j >= randint_lowerbound):
if randint_lowerbound <= j <= budget_max:
temp.append(j)
if temp:
outputs[x_idx] = temp[random.randint(0, len(temp) - 1)]
elif (x_types[x_idx] == "range_int") or \
(x_types[x_idx] == "range_continuous"):
outputs[x_idx] = random.randint(randint_lowerbound,
min(x_bounds[x_idx][1], budget_max))
if outputs[x_idx] is None:
break
else:
budget_allocated += outputs[x_idx]
if None not in outputs:
break
return outputs |
Change '~' to user home directory
def expand_path(experiment_config, key):
'''Change '~' to user home directory'''
if experiment_config.get(key):
experiment_config[key] = os.path.expanduser(experiment_config[key]) |
Change relative path to absolute path
def parse_relative_path(root_path, experiment_config, key):
'''Change relative path to absolute path'''
if experiment_config.get(key) and not os.path.isabs(experiment_config.get(key)):
absolute_path = os.path.join(root_path, experiment_config.get(key))
print_normal('expand %s: %s to %s ' % (key, experiment_config[key], absolute_path))
experiment_config[key] = absolute_path |
Change the time to seconds
def parse_time(time):
'''Change the time to seconds'''
unit = time[-1]
if unit not in ['s', 'm', 'h', 'd']:
print_error('the unit of time could only from {s, m, h, d}')
exit(1)
time = time[:-1]
if not time.isdigit():
print_error('time format error!')
exit(1)
parse_dict = {'s':1, 'm':60, 'h':3600, 'd':86400}
return int(time) * parse_dict[unit] |
Parse path in config file
def parse_path(experiment_config, config_path):
'''Parse path in config file'''
expand_path(experiment_config, 'searchSpacePath')
if experiment_config.get('trial'):
expand_path(experiment_config['trial'], 'codeDir')
if experiment_config.get('tuner'):
expand_path(experiment_config['tuner'], 'codeDir')
if experiment_config.get('assessor'):
expand_path(experiment_config['assessor'], 'codeDir')
if experiment_config.get('advisor'):
expand_path(experiment_config['advisor'], 'codeDir')
#if users use relative path, convert it to absolute path
root_path = os.path.dirname(config_path)
if experiment_config.get('searchSpacePath'):
parse_relative_path(root_path, experiment_config, 'searchSpacePath')
if experiment_config.get('trial'):
parse_relative_path(root_path, experiment_config['trial'], 'codeDir')
if experiment_config.get('tuner'):
parse_relative_path(root_path, experiment_config['tuner'], 'codeDir')
if experiment_config.get('assessor'):
parse_relative_path(root_path, experiment_config['assessor'], 'codeDir')
if experiment_config.get('advisor'):
parse_relative_path(root_path, experiment_config['advisor'], 'codeDir')
if experiment_config.get('machineList'):
for index in range(len(experiment_config['machineList'])):
parse_relative_path(root_path, experiment_config['machineList'][index], 'sshKeyPath') |
Validate searchspace content,
if the searchspace file is not json format or its values does not contain _type and _value which must be specified,
it will not be a valid searchspace file
def validate_search_space_content(experiment_config):
'''Validate searchspace content,
if the searchspace file is not json format or its values does not contain _type and _value which must be specified,
it will not be a valid searchspace file'''
try:
search_space_content = json.load(open(experiment_config.get('searchSpacePath'), 'r'))
for value in search_space_content.values():
if not value.get('_type') or not value.get('_value'):
print_error('please use _type and _value to specify searchspace!')
exit(1)
except:
print_error('searchspace file is not a valid json format!')
exit(1) |
Validate whether the kubeflow operators are valid
def validate_kubeflow_operators(experiment_config):
'''Validate whether the kubeflow operators are valid'''
if experiment_config.get('kubeflowConfig'):
if experiment_config.get('kubeflowConfig').get('operator') == 'tf-operator':
if experiment_config.get('trial').get('master') is not None:
print_error('kubeflow with tf-operator can not set master')
exit(1)
if experiment_config.get('trial').get('worker') is None:
print_error('kubeflow with tf-operator must set worker')
exit(1)
elif experiment_config.get('kubeflowConfig').get('operator') == 'pytorch-operator':
if experiment_config.get('trial').get('ps') is not None:
print_error('kubeflow with pytorch-operator can not set ps')
exit(1)
if experiment_config.get('trial').get('master') is None:
print_error('kubeflow with pytorch-operator must set master')
exit(1)
if experiment_config.get('kubeflowConfig').get('storage') == 'nfs':
if experiment_config.get('kubeflowConfig').get('nfs') is None:
print_error('please set nfs configuration!')
exit(1)
elif experiment_config.get('kubeflowConfig').get('storage') == 'azureStorage':
if experiment_config.get('kubeflowConfig').get('azureStorage') is None:
print_error('please set azureStorage configuration!')
exit(1)
elif experiment_config.get('kubeflowConfig').get('storage') is None:
if experiment_config.get('kubeflowConfig').get('azureStorage'):
print_error('please set storage type!')
exit(1) |
Validate whether the common values in experiment_config is valid
def validate_common_content(experiment_config):
'''Validate whether the common values in experiment_config is valid'''
if not experiment_config.get('trainingServicePlatform') or \
experiment_config.get('trainingServicePlatform') not in ['local', 'remote', 'pai', 'kubeflow', 'frameworkcontroller']:
print_error('Please set correct trainingServicePlatform!')
exit(1)
schema_dict = {
'local': LOCAL_CONFIG_SCHEMA,
'remote': REMOTE_CONFIG_SCHEMA,
'pai': PAI_CONFIG_SCHEMA,
'kubeflow': KUBEFLOW_CONFIG_SCHEMA,
'frameworkcontroller': FRAMEWORKCONTROLLER_CONFIG_SCHEMA
}
separate_schema_dict = {
'tuner': tuner_schema_dict,
'advisor': advisor_schema_dict,
'assessor': assessor_schema_dict
}
separate_builtInName_dict = {
'tuner': 'builtinTunerName',
'advisor': 'builtinAdvisorName',
'assessor': 'builtinAssessorName'
}
try:
schema_dict.get(experiment_config['trainingServicePlatform']).validate(experiment_config)
for separate_key in separate_schema_dict.keys():
if experiment_config.get(separate_key):
if experiment_config[separate_key].get(separate_builtInName_dict[separate_key]):
validate = False
for key in separate_schema_dict[separate_key].keys():
if key.__contains__(experiment_config[separate_key][separate_builtInName_dict[separate_key]]):
Schema({**separate_schema_dict[separate_key][key]}).validate(experiment_config[separate_key])
validate = True
break
if not validate:
print_error('%s %s error!' % (separate_key, separate_builtInName_dict[separate_key]))
exit(1)
else:
Schema({**separate_schema_dict[separate_key]['customized']}).validate(experiment_config[separate_key])
except SchemaError as error:
print_error('Your config file is not correct, please check your config file content!')
if error.__str__().__contains__('Wrong key'):
print_error(' '.join(error.__str__().split()[:3]))
else:
print_error(error)
exit(1)
#set default value
if experiment_config.get('maxExecDuration') is None:
experiment_config['maxExecDuration'] = '999d'
if experiment_config.get('maxTrialNum') is None:
experiment_config['maxTrialNum'] = 99999
if experiment_config['trainingServicePlatform'] == 'remote':
for index in range(len(experiment_config['machineList'])):
if experiment_config['machineList'][index].get('port') is None:
experiment_config['machineList'][index]['port'] = 22 |
check whether the file of customized tuner/assessor/advisor exists
spec_key: 'tuner', 'assessor', 'advisor'
def validate_customized_file(experiment_config, spec_key):
'''
check whether the file of customized tuner/assessor/advisor exists
spec_key: 'tuner', 'assessor', 'advisor'
'''
if experiment_config[spec_key].get('codeDir') and \
experiment_config[spec_key].get('classFileName') and \
experiment_config[spec_key].get('className'):
if not os.path.exists(os.path.join(
experiment_config[spec_key]['codeDir'],
experiment_config[spec_key]['classFileName'])):
print_error('%s file directory is not valid!'%(spec_key))
exit(1)
else:
print_error('%s file directory is not valid!'%(spec_key))
exit(1) |
Validate whether assessor in experiment_config is valid
def parse_assessor_content(experiment_config):
'''Validate whether assessor in experiment_config is valid'''
if experiment_config.get('assessor'):
if experiment_config['assessor'].get('builtinAssessorName'):
experiment_config['assessor']['className'] = experiment_config['assessor']['builtinAssessorName']
else:
validate_customized_file(experiment_config, 'assessor') |
Valid whether useAnnotation and searchSpacePath is coexist
spec_key: 'advisor' or 'tuner'
builtin_name: 'builtinAdvisorName' or 'builtinTunerName'
def validate_annotation_content(experiment_config, spec_key, builtin_name):
'''
Valid whether useAnnotation and searchSpacePath is coexist
spec_key: 'advisor' or 'tuner'
builtin_name: 'builtinAdvisorName' or 'builtinTunerName'
'''
if experiment_config.get('useAnnotation'):
if experiment_config.get('searchSpacePath'):
print_error('If you set useAnnotation=true, please leave searchSpacePath empty')
exit(1)
else:
# validate searchSpaceFile
if experiment_config[spec_key].get(builtin_name) == 'NetworkMorphism':
return
if experiment_config[spec_key].get(builtin_name):
if experiment_config.get('searchSpacePath') is None:
print_error('Please set searchSpacePath!')
exit(1)
validate_search_space_content(experiment_config) |
validate the trial config in pai platform
def validate_pai_trial_conifg(experiment_config):
'''validate the trial config in pai platform'''
if experiment_config.get('trainingServicePlatform') == 'pai':
if experiment_config.get('trial').get('shmMB') and \
experiment_config['trial']['shmMB'] > experiment_config['trial']['memoryMB']:
print_error('shmMB should be no more than memoryMB!')
exit(1) |
Validate whether experiment_config is valid
def validate_all_content(experiment_config, config_path):
'''Validate whether experiment_config is valid'''
parse_path(experiment_config, config_path)
validate_common_content(experiment_config)
validate_pai_trial_conifg(experiment_config)
experiment_config['maxExecDuration'] = parse_time(experiment_config['maxExecDuration'])
if experiment_config.get('advisor'):
if experiment_config.get('assessor') or experiment_config.get('tuner'):
print_error('advisor could not be set with assessor or tuner simultaneously!')
exit(1)
parse_advisor_content(experiment_config)
validate_annotation_content(experiment_config, 'advisor', 'builtinAdvisorName')
else:
if not experiment_config.get('tuner'):
raise Exception('Please provide tuner spec!')
parse_tuner_content(experiment_config)
parse_assessor_content(experiment_config)
validate_annotation_content(experiment_config, 'tuner', 'builtinTunerName') |
get urls of local machine
def get_local_urls(port):
'''get urls of local machine'''
url_list = []
for name, info in psutil.net_if_addrs().items():
for addr in info:
if AddressFamily.AF_INET == addr.family:
url_list.append('http://{}:{}'.format(addr.address, port))
return url_list |
Parse an annotation string.
Return an AST Expr node.
code: annotation string (excluding '@')
def parse_annotation(code):
"""Parse an annotation string.
Return an AST Expr node.
code: annotation string (excluding '@')
"""
module = ast.parse(code)
assert type(module) is ast.Module, 'internal error #1'
assert len(module.body) == 1, 'Annotation contains more than one expression'
assert type(module.body[0]) is ast.Expr, 'Annotation is not expression'
return module.body[0] |
Parse an annotation function.
Return the value of `name` keyword argument and the AST Call node.
func_name: expected function name
def parse_annotation_function(code, func_name):
"""Parse an annotation function.
Return the value of `name` keyword argument and the AST Call node.
func_name: expected function name
"""
expr = parse_annotation(code)
call = expr.value
assert type(call) is ast.Call, 'Annotation is not a function call'
assert type(call.func) is ast.Attribute, 'Unexpected annotation function'
assert type(call.func.value) is ast.Name, 'Invalid annotation function name'
assert call.func.value.id == 'nni', 'Annotation is not a NNI function'
assert call.func.attr == func_name, 'internal error #2'
assert len(call.keywords) == 1, 'Annotation function contains more than one keyword argument'
assert call.keywords[0].arg == 'name', 'Annotation keyword argument is not "name"'
name = call.keywords[0].value
return name, call |
Parse `nni.variable` expression.
Return the name argument and AST node of annotated expression.
code: annotation string
def parse_nni_variable(code):
"""Parse `nni.variable` expression.
Return the name argument and AST node of annotated expression.
code: annotation string
"""
name, call = parse_annotation_function(code, 'variable')
assert len(call.args) == 1, 'nni.variable contains more than one arguments'
arg = call.args[0]
assert type(arg) is ast.Call, 'Value of nni.variable is not a function call'
assert type(arg.func) is ast.Attribute, 'nni.variable value is not a NNI function'
assert type(arg.func.value) is ast.Name, 'nni.variable value is not a NNI function'
assert arg.func.value.id == 'nni', 'nni.variable value is not a NNI function'
name_str = astor.to_source(name).strip()
keyword_arg = ast.keyword(arg='name', value=ast.Str(s=name_str))
arg.keywords.append(keyword_arg)
if arg.func.attr == 'choice':
convert_args_to_dict(arg)
return name, arg |
Parse `nni.function_choice` expression.
Return the AST node of annotated expression and a list of dumped function call expressions.
code: annotation string
def parse_nni_function(code):
"""Parse `nni.function_choice` expression.
Return the AST node of annotated expression and a list of dumped function call expressions.
code: annotation string
"""
name, call = parse_annotation_function(code, 'function_choice')
funcs = [ast.dump(func, False) for func in call.args]
convert_args_to_dict(call, with_lambda=True)
name_str = astor.to_source(name).strip()
call.keywords[0].value = ast.Str(s=name_str)
return call, funcs |
Convert all args to a dict such that every key and value in the dict is the same as the value of the arg.
Return the AST Call node with only one arg that is the dictionary
def convert_args_to_dict(call, with_lambda=False):
"""Convert all args to a dict such that every key and value in the dict is the same as the value of the arg.
Return the AST Call node with only one arg that is the dictionary
"""
keys, values = list(), list()
for arg in call.args:
if type(arg) in [ast.Str, ast.Num]:
arg_value = arg
else:
# if arg is not a string or a number, we use its source code as the key
arg_value = astor.to_source(arg).strip('\n"')
arg_value = ast.Str(str(arg_value))
arg = make_lambda(arg) if with_lambda else arg
keys.append(arg_value)
values.append(arg)
del call.args[:]
call.args.append(ast.Dict(keys=keys, values=values))
return call |
Wrap an AST Call node to lambda expression node.
call: ast.Call node
def make_lambda(call):
"""Wrap an AST Call node to lambda expression node.
call: ast.Call node
"""
empty_args = ast.arguments(args=[], vararg=None, kwarg=None, defaults=[])
return ast.Lambda(args=empty_args, body=call) |
Replace a node annotated by `nni.variable`.
node: the AST node to replace
annotation: annotation string
def replace_variable_node(node, annotation):
"""Replace a node annotated by `nni.variable`.
node: the AST node to replace
annotation: annotation string
"""
assert type(node) is ast.Assign, 'nni.variable is not annotating assignment expression'
assert len(node.targets) == 1, 'Annotated assignment has more than one left-hand value'
name, expr = parse_nni_variable(annotation)
assert test_variable_equal(node.targets[0], name), 'Annotated variable has wrong name'
node.value = expr
return node |
Replace a node annotated by `nni.function_choice`.
node: the AST node to replace
annotation: annotation string
def replace_function_node(node, annotation):
"""Replace a node annotated by `nni.function_choice`.
node: the AST node to replace
annotation: annotation string
"""
target, funcs = parse_nni_function(annotation)
FuncReplacer(funcs, target).visit(node)
return node |
Annotate user code.
Return annotated code (str) if annotation detected; return None if not.
code: original user code (str)
def parse(code):
"""Annotate user code.
Return annotated code (str) if annotation detected; return None if not.
code: original user code (str)
"""
try:
ast_tree = ast.parse(code)
except Exception:
raise RuntimeError('Bad Python code')
transformer = Transformer()
try:
transformer.visit(ast_tree)
except AssertionError as exc:
raise RuntimeError('%d: %s' % (ast_tree.last_line, exc.args[0]))
if not transformer.annotated:
return None
last_future_import = -1
import_nni = ast.Import(names=[ast.alias(name='nni', asname=None)])
nodes = ast_tree.body
for i, _ in enumerate(nodes):
if type(nodes[i]) is ast.ImportFrom and nodes[i].module == '__future__':
last_future_import = i
nodes.insert(last_future_import + 1, import_nni)
return astor.to_source(ast_tree) |
main function.
def main():
'''
main function.
'''
args = parse_args()
if args.multi_thread:
enable_multi_thread()
if args.advisor_class_name:
# advisor is enabled and starts to run
if args.multi_phase:
raise AssertionError('multi_phase has not been supported in advisor')
if args.advisor_class_name in AdvisorModuleName:
dispatcher = create_builtin_class_instance(
args.advisor_class_name,
args.advisor_args, True)
else:
dispatcher = create_customized_class_instance(
args.advisor_directory,
args.advisor_class_filename,
args.advisor_class_name,
args.advisor_args)
if dispatcher is None:
raise AssertionError('Failed to create Advisor instance')
try:
dispatcher.run()
except Exception as exception:
logger.exception(exception)
raise
else:
# tuner (and assessor) is enabled and starts to run
tuner = None
assessor = None
if args.tuner_class_name in ModuleName:
tuner = create_builtin_class_instance(
args.tuner_class_name,
args.tuner_args)
else:
tuner = create_customized_class_instance(
args.tuner_directory,
args.tuner_class_filename,
args.tuner_class_name,
args.tuner_args)
if tuner is None:
raise AssertionError('Failed to create Tuner instance')
if args.assessor_class_name:
if args.assessor_class_name in ModuleName:
assessor = create_builtin_class_instance(
args.assessor_class_name,
args.assessor_args)
else:
assessor = create_customized_class_instance(
args.assessor_directory,
args.assessor_class_filename,
args.assessor_class_name,
args.assessor_args)
if assessor is None:
raise AssertionError('Failed to create Assessor instance')
if args.multi_phase:
dispatcher = MultiPhaseMsgDispatcher(tuner, assessor)
else:
dispatcher = MsgDispatcher(tuner, assessor)
try:
dispatcher.run()
tuner._on_exit()
if assessor is not None:
assessor._on_exit()
except Exception as exception:
logger.exception(exception)
tuner._on_error()
if assessor is not None:
assessor._on_error()
raise |
Load yaml file content
def get_yml_content(file_path):
'''Load yaml file content'''
try:
with open(file_path, 'r') as file:
return yaml.load(file, Loader=yaml.Loader)
except yaml.scanner.ScannerError as err:
print_error('yaml file format error!')
exit(1)
except Exception as exception:
print_error(exception)
exit(1) |
Detect if the port is used
def detect_port(port):
'''Detect if the port is used'''
socket_test = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
socket_test.connect(('127.0.0.1', int(port)))
socket_test.close()
return True
except:
return False |
Create the Gaussian Mixture Model
def create_model(samples_x, samples_y_aggregation, percentage_goodbatch=0.34):
'''
Create the Gaussian Mixture Model
'''
samples = [samples_x[i] + [samples_y_aggregation[i]] for i in range(0, len(samples_x))]
# Sorts so that we can get the top samples
samples = sorted(samples, key=itemgetter(-1))
samples_goodbatch_size = int(len(samples) * percentage_goodbatch)
samples_goodbatch = samples[0:samples_goodbatch_size]
samples_badbatch = samples[samples_goodbatch_size:]
samples_x_goodbatch = [sample_goodbatch[0:-1] for sample_goodbatch in samples_goodbatch]
#samples_y_goodbatch = [sample_goodbatch[-1] for sample_goodbatch in samples_goodbatch]
samples_x_badbatch = [sample_badbatch[0:-1] for sample_badbatch in samples_badbatch]
# === Trains GMM clustering models === #
#sys.stderr.write("[%s] Train GMM's GMM model\n" % (os.path.basename(__file__)))
bgmm_goodbatch = mm.BayesianGaussianMixture(n_components=max(1, samples_goodbatch_size - 1))
bad_n_components = max(1, len(samples_x) - samples_goodbatch_size - 1)
bgmm_badbatch = mm.BayesianGaussianMixture(n_components=bad_n_components)
bgmm_goodbatch.fit(samples_x_goodbatch)
bgmm_badbatch.fit(samples_x_badbatch)
model = {}
model['clusteringmodel_good'] = bgmm_goodbatch
model['clusteringmodel_bad'] = bgmm_badbatch
return model |
Selecte R value
def selection_r(acquisition_function,
samples_y_aggregation,
x_bounds,
x_types,
regressor_gp,
num_starting_points=100,
minimize_constraints_fun=None):
'''
Selecte R value
'''
minimize_starting_points = [lib_data.rand(x_bounds, x_types) \
for i in range(0, num_starting_points)]
outputs = selection(acquisition_function, samples_y_aggregation,
x_bounds, x_types, regressor_gp,
minimize_starting_points,
minimize_constraints_fun=minimize_constraints_fun)
return outputs |
selection
def selection(acquisition_function,
samples_y_aggregation,
x_bounds, x_types,
regressor_gp,
minimize_starting_points,
minimize_constraints_fun=None):
'''
selection
'''
outputs = None
sys.stderr.write("[%s] Exercise \"%s\" acquisition function\n" \
% (os.path.basename(__file__), acquisition_function))
if acquisition_function == "ei":
outputs = lib_acquisition_function.next_hyperparameter_expected_improvement(\
gp_prediction.predict, [regressor_gp], x_bounds, x_types, \
samples_y_aggregation, minimize_starting_points, \
minimize_constraints_fun=minimize_constraints_fun)
elif acquisition_function == "lc":
outputs = lib_acquisition_function.next_hyperparameter_lowest_confidence(\
gp_prediction.predict, [regressor_gp], x_bounds, x_types,\
minimize_starting_points, minimize_constraints_fun=minimize_constraints_fun)
elif acquisition_function == "lm":
outputs = lib_acquisition_function.next_hyperparameter_lowest_mu(\
gp_prediction.predict, [regressor_gp], x_bounds, x_types,\
minimize_starting_points, minimize_constraints_fun=minimize_constraints_fun)
return outputs |
Reports intermediate result to Assessor.
metric: serializable object.
def report_intermediate_result(metric):
"""Reports intermediate result to Assessor.
metric: serializable object.
"""
global _intermediate_seq
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_intermediate_result'
metric = json_tricks.dumps({
'parameter_id': _params['parameter_id'],
'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID,
'type': 'PERIODICAL',
'sequence': _intermediate_seq,
'value': metric
})
_intermediate_seq += 1
platform.send_metric(metric) |
Reports final result to tuner.
metric: serializable object.
def report_final_result(metric):
"""Reports final result to tuner.
metric: serializable object.
"""
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_final_result'
metric = json_tricks.dumps({
'parameter_id': _params['parameter_id'],
'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID,
'type': 'FINAL',
'sequence': 0, # TODO: may be unnecessary
'value': metric
})
platform.send_metric(metric) |
get args from command line
def get_args():
""" get args from command line
"""
parser = argparse.ArgumentParser("FashionMNIST")
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer")
parser.add_argument("--epochs", type=int, default=200, help="epoch limit")
parser.add_argument(
"--learning_rate", type=float, default=0.001, help="learning rate"
)
parser.add_argument("--cutout", action="store_true", default=False, help="use cutout")
parser.add_argument("--cutout_length", type=int, default=8, help="cutout length")
parser.add_argument(
"--model_path", type=str, default="./", help="Path to save the destination model"
)
return parser.parse_args() |
build model from json representation
def build_graph_from_json(ir_model_json):
"""build model from json representation
"""
graph = json_to_graph(ir_model_json)
logging.debug(graph.operation_history)
model = graph.produce_torch_model()
return model |
parse reveive msgs to global variable
def parse_rev_args(receive_msg):
""" parse reveive msgs to global variable
"""
global trainloader
global testloader
global net
global criterion
global optimizer
# Loading Data
logger.debug("Preparing data..")
raw_train_data = torchvision.datasets.FashionMNIST(
root="./data", train=True, download=True
)
dataset_mean, dataset_std = (
[raw_train_data.train_data.float().mean() / 255],
[raw_train_data.train_data.float().std() / 255],
)
transform_train, transform_test = utils.data_transforms_mnist(
args, dataset_mean, dataset_std
)
trainset = torchvision.datasets.FashionMNIST(
root="./data", train=True, download=True, transform=transform_train
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=True, num_workers=2
)
testset = torchvision.datasets.FashionMNIST(
root="./data", train=False, download=True, transform=transform_test
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, shuffle=False, num_workers=2
)
# Model
logger.debug("Building model..")
net = build_graph_from_json(receive_msg)
net = net.to(device)
criterion = nn.CrossEntropyLoss()
if args.optimizer == "SGD":
optimizer = optim.SGD(
net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4
)
if args.optimizer == "Adadelta":
optimizer = optim.Adadelta(net.parameters(), lr=args.learning_rate)
if args.optimizer == "Adagrad":
optimizer = optim.Adagrad(net.parameters(), lr=args.learning_rate)
if args.optimizer == "Adam":
optimizer = optim.Adam(net.parameters(), lr=args.learning_rate)
if args.optimizer == "Adamax":
optimizer = optim.Adamax(net.parameters(), lr=args.learning_rate)
if args.optimizer == "RMSprop":
optimizer = optim.RMSprop(net.parameters(), lr=args.learning_rate)
return 0 |
train model on each epoch in trainset
def train(epoch):
""" train model on each epoch in trainset
"""
global trainloader
global testloader
global net
global criterion
global optimizer
logger.debug("Epoch: %d", epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
acc = 100.0 * correct / total
logger.debug(
"Loss: %.3f | Acc: %.3f%% (%d/%d)",
train_loss / (batch_idx + 1),
100.0 * correct / total,
correct,
total,
)
return acc |
Freeze BatchNorm layers.
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval() |
parse reveive msgs to global variable
def parse_rev_args(receive_msg):
""" parse reveive msgs to global variable
"""
global trainloader
global testloader
global net
global criterion
global optimizer
# Loading Data
logger.debug("Preparing data..")
transform_train, transform_test = utils.data_transforms_cifar10(args)
trainset = torchvision.datasets.CIFAR10(
root="./data", train=True, download=True, transform=transform_train
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=True, num_workers=2
)
testset = torchvision.datasets.CIFAR10(
root="./data", train=False, download=True, transform=transform_test
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, shuffle=False, num_workers=2
)
# Model
logger.debug("Building model..")
net = build_graph_from_json(receive_msg)
net = net.to(device)
criterion = nn.CrossEntropyLoss()
if device == "cuda" and torch.cuda.device_count() > 1:
net = torch.nn.DataParallel(net)
if args.optimizer == "SGD":
optimizer = optim.SGD(
net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4
)
if args.optimizer == "Adadelta":
optimizer = optim.Adadelta(net.parameters(), lr=args.learning_rate)
if args.optimizer == "Adagrad":
optimizer = optim.Adagrad(net.parameters(), lr=args.learning_rate)
if args.optimizer == "Adam":
optimizer = optim.Adam(net.parameters(), lr=args.learning_rate)
if args.optimizer == "Adamax":
optimizer = optim.Adamax(net.parameters(), lr=args.learning_rate)
if args.optimizer == "RMSprop":
optimizer = optim.RMSprop(net.parameters(), lr=args.learning_rate)
return 0 |
登录的统一接口
:param config_file 登录数据文件,若无则选择参数登录模式
:param user: 各家券商的账号或者雪球的用户名
:param password: 密码, 券商为加密后的密码,雪球为明文密码
:param account: [雪球登录需要]雪球手机号(邮箱手机二选一)
:param portfolio_code: [雪球登录需要]组合代码
:param portfolio_market: [雪球登录需要]交易市场,
可选['cn', 'us', 'hk'] 默认 'cn'
def prepare(self, config_file=None, user=None, password=None, **kwargs):
"""登录的统一接口
:param config_file 登录数据文件,若无则选择参数登录模式
:param user: 各家券商的账号或者雪球的用户名
:param password: 密码, 券商为加密后的密码,雪球为明文密码
:param account: [雪球登录需要]雪球手机号(邮箱手机二选一)
:param portfolio_code: [雪球登录需要]组合代码
:param portfolio_market: [雪球登录需要]交易市场,
可选['cn', 'us', 'hk'] 默认 'cn'
"""
if config_file is not None:
self.read_config(config_file)
else:
self._prepare_account(user, password, **kwargs)
self.autologin() |
实现自动登录
:param limit: 登录次数限制
def autologin(self, limit=10):
"""实现自动登录
:param limit: 登录次数限制
"""
for _ in range(limit):
if self.login():
break
else:
raise exceptions.NotLoginError(
"登录失败次数过多, 请检查密码是否正确 / 券商服务器是否处于维护中 / 网络连接是否正常"
)
self.keepalive() |
启动保持在线的进程
def keepalive(self):
"""启动保持在线的进程 """
if self.heart_thread.is_alive():
self.heart_active = True
else:
self.heart_thread.start() |
读取 config
def __read_config(self):
"""读取 config"""
self.config = helpers.file2dict(self.config_path)
self.global_config = helpers.file2dict(self.global_config_path)
self.config.update(self.global_config) |
默认提供最近30天的交割单, 通常只能返回查询日期内最新的 90 天数据。
:return:
def exchangebill(self):
"""
默认提供最近30天的交割单, 通常只能返回查询日期内最新的 90 天数据。
:return:
"""
# TODO 目前仅在 华泰子类 中实现
start_date, end_date = helpers.get_30_date()
return self.get_exchangebill(start_date, end_date) |
发起对 api 的请求并过滤返回结果
:param params: 交易所需的动态参数
def do(self, params):
"""发起对 api 的请求并过滤返回结果
:param params: 交易所需的动态参数"""
request_params = self.create_basic_params()
request_params.update(params)
response_data = self.request(request_params)
try:
format_json_data = self.format_response_data(response_data)
# pylint: disable=broad-except
except Exception:
# Caused by server force logged out
return None
return_data = self.fix_error_data(format_json_data)
try:
self.check_login_status(return_data)
except exceptions.NotLoginError:
self.autologin()
return return_data |
格式化返回的值为正确的类型
:param response_data: 返回的数据
def format_response_data_type(self, response_data):
"""格式化返回的值为正确的类型
:param response_data: 返回的数据
"""
if isinstance(response_data, list) and not isinstance(
response_data, str
):
return response_data
int_match_str = "|".join(self.config["response_format"]["int"])
float_match_str = "|".join(self.config["response_format"]["float"])
for item in response_data:
for key in item:
try:
if re.search(int_match_str, key) is not None:
item[key] = helpers.str2num(item[key], "int")
elif re.search(float_match_str, key) is not None:
item[key] = helpers.str2num(item[key], "float")
except ValueError:
continue
return response_data |
登陆客户端
:param config_path: 登陆配置文件,跟参数登陆方式二选一
:param user: 账号
:param password: 明文密码
:param exe_path: 客户端路径类似 r'C:\\htzqzyb2\\xiadan.exe', 默认 r'C:\\htzqzyb2\\xiadan.exe'
:param comm_password: 通讯密码
:return:
def prepare(
self,
config_path=None,
user=None,
password=None,
exe_path=None,
comm_password=None,
**kwargs
):
"""
登陆客户端
:param config_path: 登陆配置文件,跟参数登陆方式二选一
:param user: 账号
:param password: 明文密码
:param exe_path: 客户端路径类似 r'C:\\htzqzyb2\\xiadan.exe', 默认 r'C:\\htzqzyb2\\xiadan.exe'
:param comm_password: 通讯密码
:return:
"""
params = locals().copy()
params.pop("self")
if config_path is not None:
account = helpers.file2dict(config_path)
params["user"] = account["user"]
params["password"] = account["password"]
params["broker"] = self._broker
response = self._s.post(self._api + "/prepare", json=params)
if response.status_code >= 300:
raise Exception(response.json()["error"])
return response.json() |
跟踪ricequant对应的模拟交易,支持多用户多策略
:param users: 支持easytrader的用户对象,支持使用 [] 指定多个用户
:param run_id: ricequant 的模拟交易ID,支持使用 [] 指定多个模拟交易
:param track_interval: 轮训模拟交易时间,单位为秒
:param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒
:param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令
:param entrust_prop: 委托方式, 'limit' 为限价,'market' 为市价, 仅在银河实现
:param send_interval: 交易发送间隔, 默认为0s。调大可防止卖出买入时卖出单没有及时成交导致的买入金额不足
def follow(
self,
users,
run_id,
track_interval=1,
trade_cmd_expire_seconds=120,
cmd_cache=True,
entrust_prop="limit",
send_interval=0,
):
"""跟踪ricequant对应的模拟交易,支持多用户多策略
:param users: 支持easytrader的用户对象,支持使用 [] 指定多个用户
:param run_id: ricequant 的模拟交易ID,支持使用 [] 指定多个模拟交易
:param track_interval: 轮训模拟交易时间,单位为秒
:param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒
:param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令
:param entrust_prop: 委托方式, 'limit' 为限价,'market' 为市价, 仅在银河实现
:param send_interval: 交易发送间隔, 默认为0s。调大可防止卖出买入时卖出单没有及时成交导致的买入金额不足
"""
users = self.warp_list(users)
run_ids = self.warp_list(run_id)
if cmd_cache:
self.load_expired_cmd_cache()
self.start_trader_thread(
users, trade_cmd_expire_seconds, entrust_prop, send_interval
)
workers = []
for id_ in run_ids:
strategy_name = self.extract_strategy_name(id_)
strategy_worker = Thread(
target=self.track_strategy_worker,
args=[id_, strategy_name],
kwargs={"interval": track_interval},
)
strategy_worker.start()
workers.append(strategy_worker)
log.info("开始跟踪策略: %s", strategy_name)
for worker in workers:
worker.join() |
登陆客户端
:param user: 账号
:param password: 明文密码
:param exe_path: 客户端路径类似 'C:\\中国银河证券双子星3.2\\Binarystar.exe',
默认 'C:\\中国银河证券双子星3.2\\Binarystar.exe'
:param comm_password: 通讯密码, 华泰需要,可不设
:return:
def login(self, user, password, exe_path, comm_password=None, **kwargs):
"""
登陆客户端
:param user: 账号
:param password: 明文密码
:param exe_path: 客户端路径类似 'C:\\中国银河证券双子星3.2\\Binarystar.exe',
默认 'C:\\中国银河证券双子星3.2\\Binarystar.exe'
:param comm_password: 通讯密码, 华泰需要,可不设
:return:
"""
try:
self._app = pywinauto.Application().connect(
path=self._run_exe_path(exe_path), timeout=1
)
# pylint: disable=broad-except
except Exception:
self._app = pywinauto.Application().start(exe_path)
# wait login window ready
while True:
try:
self._app.top_window().Edit1.wait("ready")
break
except RuntimeError:
pass
self._app.top_window().Edit1.type_keys(user)
self._app.top_window().Edit2.type_keys(password)
edit3 = self._app.top_window().window(control_id=0x3eb)
while True:
try:
code = self._handle_verify_code()
edit3.type_keys(code)
time.sleep(1)
self._app.top_window()["确定(Y)"].click()
# detect login is success or not
try:
self._app.top_window().wait_not("exists", 5)
break
# pylint: disable=broad-except
except Exception:
self._app.top_window()["确定"].click()
# pylint: disable=broad-except
except Exception:
pass
self._app = pywinauto.Application().connect(
path=self._run_exe_path(exe_path), timeout=10
)
self._main = self._app.window(title="网上股票交易系统5.0") |
:param user: 用户名
:param password: 密码
:param exe_path: 客户端路径, 类似
:param comm_password:
:param kwargs:
:return:
def login(self, user, password, exe_path, comm_password=None, **kwargs):
"""
:param user: 用户名
:param password: 密码
:param exe_path: 客户端路径, 类似
:param comm_password:
:param kwargs:
:return:
"""
if comm_password is None:
raise ValueError("华泰必须设置通讯密码")
try:
self._app = pywinauto.Application().connect(
path=self._run_exe_path(exe_path), timeout=1
)
# pylint: disable=broad-except
except Exception:
self._app = pywinauto.Application().start(exe_path)
# wait login window ready
while True:
try:
self._app.top_window().Edit1.wait("ready")
break
except RuntimeError:
pass
self._app.top_window().Edit1.type_keys(user)
self._app.top_window().Edit2.type_keys(password)
self._app.top_window().Edit3.type_keys(comm_password)
self._app.top_window().button0.click()
# detect login is success or not
self._app.top_window().wait_not("exists", 10)
self._app = pywinauto.Application().connect(
path=self._run_exe_path(exe_path), timeout=10
)
self._close_prompt_windows()
self._main = self._app.window(title="网上股票交易系统5.0") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.