text stringlengths 81 112k |
|---|
List trial
def trial_ls(args):
'''List trial'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
running, response = check_rest_server_quick(rest_port)
if running:
response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT)
if response and check_response(response):
content = json.loads(response.text)
for index, value in enumerate(content):
content[index] = convert_time_stamp_to_date(value)
print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':')))
else:
print_error('List trial failed...')
else:
print_error('Restful server is not running...') |
List trial
def trial_kill(args):
'''List trial'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
running, _ = check_rest_server_quick(rest_port)
if running:
response = rest_delete(trial_job_id_url(rest_port, args.id), REST_TIME_OUT)
if response and check_response(response):
print(response.text)
else:
print_error('Kill trial job failed...')
else:
print_error('Restful server is not running...') |
Get experiment information
def list_experiment(args):
'''Get experiment information'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
running, _ = check_rest_server_quick(rest_port)
if running:
response = rest_get(experiment_url(rest_port), REST_TIME_OUT)
if response and check_response(response):
content = convert_time_stamp_to_date(json.loads(response.text))
print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':')))
else:
print_error('List experiment failed...')
else:
print_error('Restful server is not running...') |
Show the status of experiment
def experiment_status(args):
'''Show the status of experiment'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
result, response = check_rest_server_quick(rest_port)
if not result:
print_normal('Restful server is not running...')
else:
print(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) |
internal function to call get_log_content
def log_internal(args, filetype):
'''internal function to call get_log_content'''
file_name = get_config_filename(args)
if filetype == 'stdout':
file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stdout')
else:
file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stderr')
print(check_output_command(file_full_path, head=args.head, tail=args.tail)) |
get trial log path
def log_trial(args):
''''get trial log path'''
trial_id_path_dict = {}
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
running, response = check_rest_server_quick(rest_port)
if running:
response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT)
if response and check_response(response):
content = json.loads(response.text)
for trial in content:
trial_id_path_dict[trial['id']] = trial['logPath']
else:
print_error('Restful server is not running...')
exit(1)
if args.id:
if args.trial_id:
if trial_id_path_dict.get(args.trial_id):
print_normal('id:' + args.trial_id + ' path:' + trial_id_path_dict[args.trial_id])
else:
print_error('trial id is not valid!')
exit(1)
else:
print_error('please specific the trial id!')
exit(1)
else:
for key in trial_id_path_dict:
print('id:' + key + ' path:' + trial_id_path_dict[key]) |
show the url of web ui
def webui_url(args):
'''show the url of web ui'''
nni_config = Config(get_config_filename(args))
print_normal('{0} {1}'.format('Web UI url:', ' '.join(nni_config.get_config('webuiUrl')))) |
get the information of all experiments
def experiment_list(args):
'''get the information of all experiments'''
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
if not experiment_dict:
print('There is no experiment running...')
exit(1)
update_experiment()
experiment_id_list = []
if args.all and args.all == 'all':
for key in experiment_dict.keys():
experiment_id_list.append(key)
else:
for key in experiment_dict.keys():
if experiment_dict[key]['status'] != 'STOPPED':
experiment_id_list.append(key)
if not experiment_id_list:
print_warning('There is no experiment running...\nYou can use \'nnictl experiment list all\' to list all stopped experiments!')
experiment_information = ""
for key in experiment_id_list:
experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], experiment_dict[key]['port'],\
experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], experiment_dict[key]['endTime']))
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) |
get the interval of two times
def get_time_interval(time1, time2):
'''get the interval of two times'''
try:
#convert time to timestamp
time1 = time.mktime(time.strptime(time1, '%Y/%m/%d %H:%M:%S'))
time2 = time.mktime(time.strptime(time2, '%Y/%m/%d %H:%M:%S'))
seconds = (datetime.datetime.fromtimestamp(time2) - datetime.datetime.fromtimestamp(time1)).seconds
#convert seconds to day:hour:minute:second
days = seconds / 86400
seconds %= 86400
hours = seconds / 3600
seconds %= 3600
minutes = seconds / 60
seconds %= 60
return '%dd %dh %dm %ds' % (days, hours, minutes, seconds)
except:
return 'N/A' |
show experiment information in monitor
def show_experiment_info():
'''show experiment information in monitor'''
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
if not experiment_dict:
print('There is no experiment running...')
exit(1)
update_experiment()
experiment_id_list = []
for key in experiment_dict.keys():
if experiment_dict[key]['status'] != 'STOPPED':
experiment_id_list.append(key)
if not experiment_id_list:
print_warning('There is no experiment running...')
return
for key in experiment_id_list:
print(EXPERIMENT_MONITOR_INFO % (key, experiment_dict[key]['status'], experiment_dict[key]['port'], \
experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], get_time_interval(experiment_dict[key]['startTime'], experiment_dict[key]['endTime'])))
print(TRIAL_MONITOR_HEAD)
running, response = check_rest_server_quick(experiment_dict[key]['port'])
if running:
response = rest_get(trial_jobs_url(experiment_dict[key]['port']), REST_TIME_OUT)
if response and check_response(response):
content = json.loads(response.text)
for index, value in enumerate(content):
content[index] = convert_time_stamp_to_date(value)
print(TRIAL_MONITOR_CONTENT % (content[index].get('id'), content[index].get('startTime'), content[index].get('endTime'), content[index].get('status')))
print(TRIAL_MONITOR_TAIL) |
monitor the experiment
def monitor_experiment(args):
'''monitor the experiment'''
if args.time <= 0:
print_error('please input a positive integer as time interval, the unit is second.')
exit(1)
while True:
try:
os.system('clear')
update_experiment()
show_experiment_info()
time.sleep(args.time)
except KeyboardInterrupt:
exit(0)
except Exception as exception:
print_error(exception)
exit(1) |
output: List[Dict]
def parse_trial_data(content):
"""output: List[Dict]"""
trial_records = []
for trial_data in content:
for phase_i in range(len(trial_data['hyperParameters'])):
hparam = json.loads(trial_data['hyperParameters'][phase_i])['parameters']
hparam['id'] = trial_data['id']
if 'finalMetricData' in trial_data.keys() and phase_i < len(trial_data['finalMetricData']):
reward = json.loads(trial_data['finalMetricData'][phase_i]['data'])
if isinstance(reward, (float, int)):
dict_tmp = {**hparam, **{'reward': reward}}
elif isinstance(reward, dict):
dict_tmp = {**hparam, **reward}
else:
raise ValueError("Invalid finalMetricsData format: {}/{}".format(type(reward), reward))
else:
dict_tmp = hparam
trial_records.append(dict_tmp)
return trial_records |
export experiment metadata to csv
def export_trials_data(args):
"""export experiment metadata to csv
"""
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
running, response = check_rest_server_quick(rest_port)
if running:
response = rest_get(trial_jobs_url(rest_port), 20)
if response is not None and check_response(response):
content = json.loads(response.text)
# dframe = pd.DataFrame.from_records([parse_trial_data(t_data) for t_data in content])
# dframe.to_csv(args.csv_path, sep='\t')
records = parse_trial_data(content)
if args.type == 'json':
json_records = []
for trial in records:
value = trial.pop('reward', None)
trial_id = trial.pop('id', None)
json_records.append({'parameter': trial, 'value': value, 'id': trial_id})
with open(args.path, 'w') as file:
if args.type == 'csv':
writer = csv.DictWriter(file, set.union(*[set(r.keys()) for r in records]))
writer.writeheader()
writer.writerows(records)
else:
json.dump(json_records, file)
else:
print_error('Export failed...')
else:
print_error('Restful server is not Running') |
copy remote directory to local machine
def copy_remote_directory_to_local(sftp, remote_path, local_path):
'''copy remote directory to local machine'''
try:
os.makedirs(local_path, exist_ok=True)
files = sftp.listdir(remote_path)
for file in files:
remote_full_path = os.path.join(remote_path, file)
local_full_path = os.path.join(local_path, file)
try:
if sftp.listdir(remote_full_path):
copy_remote_directory_to_local(sftp, remote_full_path, local_full_path)
except:
sftp.get(remote_full_path, local_full_path)
except Exception:
pass |
create ssh client
def create_ssh_sftp_client(host_ip, port, username, password):
'''create ssh client'''
try:
check_environment()
import paramiko
conn = paramiko.Transport(host_ip, port)
conn.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(conn)
return sftp
except Exception as exception:
print_error('Create ssh client error %s\n' % exception) |
Change search space from json format to hyperopt format
def json2space(x, oldy=None, name=NodeType.Root.value):
"""Change search space from json format to hyperopt format
"""
y = list()
if isinstance(x, dict):
if NodeType.Type.value in x.keys():
_type = x[NodeType.Type.value]
name = name + '-' + _type
if _type == 'choice':
if oldy != None:
_index = oldy[NodeType.Index.value]
y += json2space(x[NodeType.Value.value][_index],
oldy[NodeType.Value.value], name=name+'[%d]' % _index)
else:
y += json2space(x[NodeType.Value.value], None, name=name)
y.append(name)
else:
for key in x.keys():
y += json2space(x[key], (oldy[key] if oldy !=
None else None), name+"[%s]" % str(key))
elif isinstance(x, list):
for i, x_i in enumerate(x):
y += json2space(x_i, (oldy[i] if oldy !=
None else None), name+"[%d]" % i)
else:
pass
return y |
Json to pramaters.
def json2paramater(x, is_rand, random_state, oldy=None, Rand=False, name=NodeType.Root.value):
"""Json to pramaters.
"""
if isinstance(x, dict):
if NodeType.Type.value in x.keys():
_type = x[NodeType.Type.value]
_value = x[NodeType.Value.value]
name = name + '-' + _type
Rand |= is_rand[name]
if Rand is True:
if _type == 'choice':
_index = random_state.randint(len(_value))
y = {
NodeType.Index.value: _index,
NodeType.Value.value: json2paramater(x[NodeType.Value.value][_index],
is_rand,
random_state,
None,
Rand,
name=name+"[%d]" % _index)
}
else:
y = eval('parameter_expressions.' +
_type)(*(_value + [random_state]))
else:
y = copy.deepcopy(oldy)
else:
y = dict()
for key in x.keys():
y[key] = json2paramater(x[key], is_rand, random_state, oldy[key]
if oldy != None else None, Rand, name + "[%s]" % str(key))
elif isinstance(x, list):
y = list()
for i, x_i in enumerate(x):
y.append(json2paramater(x_i, is_rand, random_state, oldy[i]
if oldy != None else None, Rand, name + "[%d]" % i))
else:
y = copy.deepcopy(x)
return y |
Delete index information from params
Parameters
----------
params : dict
Returns
-------
result : dict
def _split_index(params):
"""Delete index information from params
Parameters
----------
params : dict
Returns
-------
result : dict
"""
result = {}
for key in params:
if isinstance(params[key], dict):
value = params[key]['_value']
else:
value = params[key]
result[key] = value
return result |
Parameters
----------
config : str
info : str
save_dir : str
def mutation(self, config=None, info=None, save_dir=None):
"""
Parameters
----------
config : str
info : str
save_dir : str
"""
self.result = None
self.config = config
self.restore_dir = self.save_dir
self.save_dir = save_dir
self.info = info |
Update search space.
Search_space contains the information that user pre-defined.
Parameters
----------
search_space : dict
def update_search_space(self, search_space):
"""Update search space.
Search_space contains the information that user pre-defined.
Parameters
----------
search_space : dict
"""
self.searchspace_json = search_space
self.space = json2space(self.searchspace_json)
self.random_state = np.random.RandomState()
self.population = []
is_rand = dict()
for item in self.space:
is_rand[item] = True
for _ in range(self.population_size):
config = json2paramater(
self.searchspace_json, is_rand, self.random_state)
self.population.append(Individual(config=config)) |
Returns a dict of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
config : dict
def generate_parameters(self, parameter_id):
"""Returns a dict of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
config : dict
"""
if not self.population:
raise RuntimeError('The population is empty')
pos = -1
for i in range(len(self.population)):
if self.population[i].result is None:
pos = i
break
if pos != -1:
indiv = copy.deepcopy(self.population[pos])
self.population.pop(pos)
total_config = indiv.config
else:
random.shuffle(self.population)
if self.population[0].result < self.population[1].result:
self.population[0] = self.population[1]
# mutation
space = json2space(self.searchspace_json,
self.population[0].config)
is_rand = dict()
mutation_pos = space[random.randint(0, len(space)-1)]
for i in range(len(self.space)):
is_rand[self.space[i]] = (self.space[i] == mutation_pos)
config = json2paramater(
self.searchspace_json, is_rand, self.random_state, self.population[0].config)
self.population.pop(1)
# remove "_index" from config and save params-id
total_config = config
self.total_data[parameter_id] = total_config
config = _split_index(total_config)
return config |
Record the result from a trial
Parameters
----------
parameters: dict
value : dict/float
if value is dict, it should have "default" key.
value is final metrics of the trial.
def receive_trial_result(self, parameter_id, parameters, value):
'''Record the result from a trial
Parameters
----------
parameters: dict
value : dict/float
if value is dict, it should have "default" key.
value is final metrics of the trial.
'''
reward = extract_scalar_reward(value)
if parameter_id not in self.total_data:
raise RuntimeError('Received parameter_id not in total_data.')
# restore the paramsters contains "_index"
params = self.total_data[parameter_id]
if self.optimize_mode == OptimizeMode.Minimize:
reward = -reward
indiv = Individual(config=params, result=reward)
self.population.append(indiv) |
Load json file content
Parameters
----------
file_path:
path to the file
Raises
------
TypeError
Error with the file path
def get_json_content(file_path):
"""Load json file content
Parameters
----------
file_path:
path to the file
Raises
------
TypeError
Error with the file path
"""
try:
with open(file_path, 'r') as file:
return json.load(file)
except TypeError as err:
print('Error: ', err)
return None |
Generate the Parameter Configuration Space (PCS) which defines the
legal ranges of the parameters to be optimized and their default values.
Generally, the format is:
# parameter_name categorical {value_1, ..., value_N} [default value]
# parameter_name ordinal {value_1, ..., value_N} [default value]
# parameter_name integer [min_value, max_value] [default value]
# parameter_name integer [min_value, max_value] [default value] log
# parameter_name real [min_value, max_value] [default value]
# parameter_name real [min_value, max_value] [default value] log
Reference: https://automl.github.io/SMAC3/stable/options.html
Parameters
----------
nni_search_space_content: search_space
The search space in this experiment in nni
Returns
-------
Parameter Configuration Space (PCS)
the legal ranges of the parameters to be optimized and their default values
Raises
------
RuntimeError
unsupported type or value error or incorrect search space
def generate_pcs(nni_search_space_content):
"""Generate the Parameter Configuration Space (PCS) which defines the
legal ranges of the parameters to be optimized and their default values.
Generally, the format is:
# parameter_name categorical {value_1, ..., value_N} [default value]
# parameter_name ordinal {value_1, ..., value_N} [default value]
# parameter_name integer [min_value, max_value] [default value]
# parameter_name integer [min_value, max_value] [default value] log
# parameter_name real [min_value, max_value] [default value]
# parameter_name real [min_value, max_value] [default value] log
Reference: https://automl.github.io/SMAC3/stable/options.html
Parameters
----------
nni_search_space_content: search_space
The search space in this experiment in nni
Returns
-------
Parameter Configuration Space (PCS)
the legal ranges of the parameters to be optimized and their default values
Raises
------
RuntimeError
unsupported type or value error or incorrect search space
"""
categorical_dict = {}
search_space = nni_search_space_content
with open('param_config_space.pcs', 'w') as pcs_fd:
if isinstance(search_space, dict):
for key in search_space.keys():
if isinstance(search_space[key], dict):
try:
if search_space[key]['_type'] == 'choice':
choice_len = len(search_space[key]['_value'])
pcs_fd.write('%s categorical {%s} [%s]\n' % (
key,
json.dumps(list(range(choice_len)))[1:-1],
json.dumps(0)))
if key in categorical_dict:
raise RuntimeError('%s has already existed, please make sure search space has no duplicate key.' % key)
categorical_dict[key] = search_space[key]['_value']
elif search_space[key]['_type'] == 'randint':
# TODO: support lower bound in randint
pcs_fd.write('%s integer [0, %d] [%d]\n' % (
key,
search_space[key]['_value'][0],
search_space[key]['_value'][0]))
elif search_space[key]['_type'] == 'uniform':
pcs_fd.write('%s real %s [%s]\n' % (
key,
json.dumps(search_space[key]['_value']),
json.dumps(search_space[key]['_value'][0])))
elif search_space[key]['_type'] == 'loguniform':
# use np.round here to ensure that the rounded defaut value is in the range, which will be rounded in configure_space package
search_space[key]['_value'] = list(np.round(np.log(search_space[key]['_value']), 10))
pcs_fd.write('%s real %s [%s]\n' % (
key,
json.dumps(search_space[key]['_value']),
json.dumps(search_space[key]['_value'][0])))
elif search_space[key]['_type'] == 'quniform' \
and search_space[key]['_value'][2] == 1:
pcs_fd.write('%s integer [%d, %d] [%d]\n' % (
key,
search_space[key]['_value'][0],
search_space[key]['_value'][1],
search_space[key]['_value'][0]))
else:
raise RuntimeError('unsupported _type %s' % search_space[key]['_type'])
except:
raise RuntimeError('_type or _value error.')
else:
raise RuntimeError('incorrect search space.')
return categorical_dict
return None |
Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and
can be constructed either by providing an actual scenario-object, or by specifing the options in a scenario file.
Reference: https://automl.github.io/SMAC3/stable/options.html
The format of the scenario file is one option per line:
OPTION1 = VALUE1
OPTION2 = VALUE2
...
Parameters
----------
abort_on_first_run_crash: bool
If true, SMAC will abort if the first run of the target algorithm crashes. Default: True,
because trials reported to nni tuner would always in success state
algo: function
Specifies the target algorithm call that SMAC will optimize. Interpreted as a bash-command.
Not required by tuner, but required by nni's training service for running trials
always_race_default:
Race new incumbents always against default configuration
cost_for_crash:
Defines the cost-value for crashed runs on scenarios with quality as run-obj. Default: 2147483647.0.
Trials reported to nni tuner would always in success state
cutoff_time:
Maximum runtime, after which the target algorithm is cancelled. `Required if *run_obj* is runtime`
deterministic: bool
If true, the optimization process will be repeatable.
execdir:
Specifies the path to the execution-directory. Default: .
Trials are executed by nni's training service
feature_file:
Specifies the file with the instance-features.
No features specified or feature file is not supported
initial_incumbent:
DEFAULT is the default from the PCS. Default: DEFAULT. Must be from: [‘DEFAULT’, ‘RANDOM’].
input_psmac_dirs:
For parallel SMAC, multiple output-directories are used.
Parallelism is supported by nni
instance_file:
Specifies the file with the training-instances. Not supported
intensification_percentage:
The fraction of time to be used on intensification (versus choice of next Configurations). Default: 0.5.
Not supported, trials are controlled by nni's training service and kill be assessor
maxR: int
Maximum number of calls per configuration. Default: 2000.
memory_limit:
Maximum available memory the target algorithm can occupy before being cancelled.
minR: int
Minimum number of calls per configuration. Default: 1.
output_dir:
Specifies the output-directory for all emerging files, such as logging and results.
Default: smac3-output_2018-01-22_15:05:56_807070.
overall_obj:
PARX, where X is an integer defining the penalty imposed on timeouts (i.e. runtimes that exceed the cutoff-time).
Timeout is not supported
paramfile:
Specifies the path to the PCS-file.
run_obj:
Defines what metric to optimize. When optimizing runtime, cutoff_time is required as well.
Must be from: [‘runtime’, ‘quality’].
runcount_limit: int
Maximum number of algorithm-calls during optimization. Default: inf.
Use default because this is controlled by nni
shared_model:
Whether to run SMAC in parallel mode. Parallelism is supported by nni
test_instance_file:
Specifies the file with the test-instances. Instance is not supported
tuner-timeout:
Maximum amount of CPU-time used for optimization. Not supported
wallclock_limit: int
Maximum amount of wallclock-time used for optimization. Default: inf.
Use default because this is controlled by nni
Returns
-------
Scenario:
The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and can be constructed
either by providing an actual scenario-object, or by specifing the options in a scenario file
def generate_scenario(ss_content):
"""Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and
can be constructed either by providing an actual scenario-object, or by specifing the options in a scenario file.
Reference: https://automl.github.io/SMAC3/stable/options.html
The format of the scenario file is one option per line:
OPTION1 = VALUE1
OPTION2 = VALUE2
...
Parameters
----------
abort_on_first_run_crash: bool
If true, SMAC will abort if the first run of the target algorithm crashes. Default: True,
because trials reported to nni tuner would always in success state
algo: function
Specifies the target algorithm call that SMAC will optimize. Interpreted as a bash-command.
Not required by tuner, but required by nni's training service for running trials
always_race_default:
Race new incumbents always against default configuration
cost_for_crash:
Defines the cost-value for crashed runs on scenarios with quality as run-obj. Default: 2147483647.0.
Trials reported to nni tuner would always in success state
cutoff_time:
Maximum runtime, after which the target algorithm is cancelled. `Required if *run_obj* is runtime`
deterministic: bool
If true, the optimization process will be repeatable.
execdir:
Specifies the path to the execution-directory. Default: .
Trials are executed by nni's training service
feature_file:
Specifies the file with the instance-features.
No features specified or feature file is not supported
initial_incumbent:
DEFAULT is the default from the PCS. Default: DEFAULT. Must be from: [‘DEFAULT’, ‘RANDOM’].
input_psmac_dirs:
For parallel SMAC, multiple output-directories are used.
Parallelism is supported by nni
instance_file:
Specifies the file with the training-instances. Not supported
intensification_percentage:
The fraction of time to be used on intensification (versus choice of next Configurations). Default: 0.5.
Not supported, trials are controlled by nni's training service and kill be assessor
maxR: int
Maximum number of calls per configuration. Default: 2000.
memory_limit:
Maximum available memory the target algorithm can occupy before being cancelled.
minR: int
Minimum number of calls per configuration. Default: 1.
output_dir:
Specifies the output-directory for all emerging files, such as logging and results.
Default: smac3-output_2018-01-22_15:05:56_807070.
overall_obj:
PARX, where X is an integer defining the penalty imposed on timeouts (i.e. runtimes that exceed the cutoff-time).
Timeout is not supported
paramfile:
Specifies the path to the PCS-file.
run_obj:
Defines what metric to optimize. When optimizing runtime, cutoff_time is required as well.
Must be from: [‘runtime’, ‘quality’].
runcount_limit: int
Maximum number of algorithm-calls during optimization. Default: inf.
Use default because this is controlled by nni
shared_model:
Whether to run SMAC in parallel mode. Parallelism is supported by nni
test_instance_file:
Specifies the file with the test-instances. Instance is not supported
tuner-timeout:
Maximum amount of CPU-time used for optimization. Not supported
wallclock_limit: int
Maximum amount of wallclock-time used for optimization. Default: inf.
Use default because this is controlled by nni
Returns
-------
Scenario:
The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and can be constructed
either by providing an actual scenario-object, or by specifing the options in a scenario file
"""
with open('scenario.txt', 'w') as sce_fd:
sce_fd.write('deterministic = 0\n')
#sce_fd.write('output_dir = \n')
sce_fd.write('paramfile = param_config_space.pcs\n')
sce_fd.write('run_obj = quality\n')
return generate_pcs(ss_content) |
Load or create dataset
def load_data(train_path='./data/regression.train', test_path='./data/regression.test'):
'''
Load or create dataset
'''
print('Load data...')
df_train = pd.read_csv(train_path, header=None, sep='\t')
df_test = pd.read_csv(test_path, header=None, sep='\t')
num = len(df_train)
split_num = int(0.9 * num)
y_train = df_train[0].values
y_test = df_test[0].values
y_eval = y_train[split_num:]
y_train = y_train[:split_num]
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
X_eval = X_train[split_num:, :]
X_train = X_train[:split_num, :]
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_eval, y_eval, reference=lgb_train)
return lgb_train, lgb_eval, X_test, y_test |
The distance between two layers.
def layer_distance(a, b):
"""The distance between two layers."""
# pylint: disable=unidiomatic-typecheck
if type(a) != type(b):
return 1.0
if is_layer(a, "Conv"):
att_diff = [
(a.filters, b.filters),
(a.kernel_size, b.kernel_size),
(a.stride, b.stride),
]
return attribute_difference(att_diff)
if is_layer(a, "Pooling"):
att_diff = [
(a.padding, b.padding),
(a.kernel_size, b.kernel_size),
(a.stride, b.stride),
]
return attribute_difference(att_diff)
return 0.0 |
The attribute distance.
def attribute_difference(att_diff):
''' The attribute distance.
'''
ret = 0
for a_value, b_value in att_diff:
if max(a_value, b_value) == 0:
ret += 0
else:
ret += abs(a_value - b_value) * 1.0 / max(a_value, b_value)
return ret * 1.0 / len(att_diff) |
The distance between the layers of two neural networks.
def layers_distance(list_a, list_b):
"""The distance between the layers of two neural networks."""
len_a = len(list_a)
len_b = len(list_b)
f = np.zeros((len_a + 1, len_b + 1))
f[-1][-1] = 0
for i in range(-1, len_a):
f[i][-1] = i + 1
for j in range(-1, len_b):
f[-1][j] = j + 1
for i in range(len_a):
for j in range(len_b):
f[i][j] = min(
f[i][j - 1] + 1,
f[i - 1][j] + 1,
f[i - 1][j - 1] + layer_distance(list_a[i], list_b[j]),
)
return f[len_a - 1][len_b - 1] |
The distance between two skip-connections.
def skip_connection_distance(a, b):
"""The distance between two skip-connections."""
if a[2] != b[2]:
return 1.0
len_a = abs(a[1] - a[0])
len_b = abs(b[1] - b[0])
return (abs(a[0] - b[0]) + abs(len_a - len_b)) / (max(a[0], b[0]) + max(len_a, len_b)) |
The distance between the skip-connections of two neural networks.
def skip_connections_distance(list_a, list_b):
"""The distance between the skip-connections of two neural networks."""
distance_matrix = np.zeros((len(list_a), len(list_b)))
for i, a in enumerate(list_a):
for j, b in enumerate(list_b):
distance_matrix[i][j] = skip_connection_distance(a, b)
return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs(
len(list_a) - len(list_b)
) |
The distance between two neural networks.
Args:
x: An instance of NetworkDescriptor.
y: An instance of NetworkDescriptor
Returns:
The edit-distance between x and y.
def edit_distance(x, y):
"""The distance between two neural networks.
Args:
x: An instance of NetworkDescriptor.
y: An instance of NetworkDescriptor
Returns:
The edit-distance between x and y.
"""
ret = layers_distance(x.layers, y.layers)
ret += Constant.KERNEL_LAMBDA * skip_connections_distance(
x.skip_connections, y.skip_connections
)
return ret |
Calculate the edit distance.
Args:
train_x: A list of neural architectures.
train_y: A list of neural architectures.
Returns:
An edit-distance matrix.
def edit_distance_matrix(train_x, train_y=None):
"""Calculate the edit distance.
Args:
train_x: A list of neural architectures.
train_y: A list of neural architectures.
Returns:
An edit-distance matrix.
"""
if train_y is None:
ret = np.zeros((train_x.shape[0], train_x.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_x):
if x_index == y_index:
ret[x_index][y_index] = 0
elif x_index < y_index:
ret[x_index][y_index] = edit_distance(x, y)
else:
ret[x_index][y_index] = ret[y_index][x_index]
return ret
ret = np.zeros((train_x.shape[0], train_y.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_y):
ret[x_index][y_index] = edit_distance(x, y)
return ret |
The Euclidean distance between two vectors.
def vector_distance(a, b):
"""The Euclidean distance between two vectors."""
a = np.array(a)
b = np.array(b)
return np.linalg.norm(a - b) |
Use Bourgain algorithm to embed the neural architectures based on their edit-distance.
Args:
distance_matrix: A matrix of edit-distances.
Returns:
A matrix of distances after embedding.
def bourgain_embedding_matrix(distance_matrix):
"""Use Bourgain algorithm to embed the neural architectures based on their edit-distance.
Args:
distance_matrix: A matrix of edit-distances.
Returns:
A matrix of distances after embedding.
"""
distance_matrix = np.array(distance_matrix)
n = len(distance_matrix)
if n == 1:
return distance_matrix
np.random.seed(123)
distort_elements = []
r = range(n)
k = int(math.ceil(math.log(n) / math.log(2) - 1))
t = int(math.ceil(math.log(n)))
counter = 0
for i in range(0, k + 1):
for t in range(t):
s = np.random.choice(r, 2 ** i)
for j in r:
d = min([distance_matrix[j][s] for s in s])
counter += len(s)
if i == 0 and t == 0:
distort_elements.append([d])
else:
distort_elements[j].append(d)
return rbf_kernel(distort_elements, distort_elements) |
Check if the target descriptor is in the descriptors.
def contain(descriptors, target_descriptor):
"""Check if the target descriptor is in the descriptors."""
for descriptor in descriptors:
if edit_distance(descriptor, target_descriptor) < 1e-5:
return True
return False |
Fit the regressor with more data.
Args:
train_x: A list of NetworkDescriptor.
train_y: A list of metric values.
def fit(self, train_x, train_y):
""" Fit the regressor with more data.
Args:
train_x: A list of NetworkDescriptor.
train_y: A list of metric values.
"""
if self.first_fitted:
self.incremental_fit(train_x, train_y)
else:
self.first_fit(train_x, train_y) |
Incrementally fit the regressor.
def incremental_fit(self, train_x, train_y):
""" Incrementally fit the regressor. """
if not self._first_fitted:
raise ValueError("The first_fit function needs to be called first.")
train_x, train_y = np.array(train_x), np.array(train_y)
# Incrementally compute K
up_right_k = edit_distance_matrix(self._x, train_x)
down_left_k = np.transpose(up_right_k)
down_right_k = edit_distance_matrix(train_x)
up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1)
down_k = np.concatenate((down_left_k, down_right_k), axis=1)
temp_distance_matrix = np.concatenate((up_k, down_k), axis=0)
k_matrix = bourgain_embedding_matrix(temp_distance_matrix)
diagonal = np.diag_indices_from(k_matrix)
diagonal = (diagonal[0][-len(train_x) :], diagonal[1][-len(train_x) :])
k_matrix[diagonal] += self.alpha
try:
self._l_matrix = cholesky(k_matrix, lower=True) # Line 2
except LinAlgError:
return self
self._x = np.concatenate((self._x, train_x), axis=0)
self._y = np.concatenate((self._y, train_y), axis=0)
self._distance_matrix = temp_distance_matrix
self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3
return self |
Fit the regressor for the first time.
def first_fit(self, train_x, train_y):
""" Fit the regressor for the first time. """
train_x, train_y = np.array(train_x), np.array(train_y)
self._x = np.copy(train_x)
self._y = np.copy(train_y)
self._distance_matrix = edit_distance_matrix(self._x)
k_matrix = bourgain_embedding_matrix(self._distance_matrix)
k_matrix[np.diag_indices_from(k_matrix)] += self.alpha
self._l_matrix = cholesky(k_matrix, lower=True) # Line 2
self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3
self._first_fitted = True
return self |
Predict the result.
Args:
train_x: A list of NetworkDescriptor.
Returns:
y_mean: The predicted mean.
y_std: The predicted standard deviation.
def predict(self, train_x):
"""Predict the result.
Args:
train_x: A list of NetworkDescriptor.
Returns:
y_mean: The predicted mean.
y_std: The predicted standard deviation.
"""
k_trans = np.exp(-np.power(edit_distance_matrix(train_x, self._x), 2))
y_mean = k_trans.dot(self._alpha_vector) # Line 4 (y_mean = f_star)
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
l_inv = solve_triangular(self._l_matrix.T, np.eye(self._l_matrix.shape[0]))
k_inv = l_inv.dot(l_inv.T)
# Compute variance of predictive distribution
y_var = np.ones(len(train_x), dtype=np.float)
y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var) |
Generate new architecture.
Args:
descriptors: All the searched neural architectures.
Returns:
graph: An instance of Graph. A morphed neural network with weights.
father_id: The father node ID in the search tree.
def generate(self, descriptors):
"""Generate new architecture.
Args:
descriptors: All the searched neural architectures.
Returns:
graph: An instance of Graph. A morphed neural network with weights.
father_id: The father node ID in the search tree.
"""
model_ids = self.search_tree.adj_list.keys()
target_graph = None
father_id = None
descriptors = deepcopy(descriptors)
elem_class = Elem
if self.optimizemode is OptimizeMode.Maximize:
elem_class = ReverseElem
# Initialize the priority queue.
pq = PriorityQueue()
temp_list = []
for model_id in model_ids:
metric_value = self.searcher.get_metric_value_by_id(model_id)
temp_list.append((metric_value, model_id))
temp_list = sorted(temp_list)
for metric_value, model_id in temp_list:
graph = self.searcher.load_model_by_id(model_id)
graph.clear_operation_history()
graph.clear_weights()
pq.put(elem_class(metric_value, model_id, graph))
t = 1.0
t_min = self.t_min
alpha = 0.9
opt_acq = self._get_init_opt_acq_value()
while not pq.empty() and t > t_min:
elem = pq.get()
if self.optimizemode is OptimizeMode.Maximize:
temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)
else:
temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)
ap = math.exp(temp_exp)
if ap >= random.uniform(0, 1):
for temp_graph in transform(elem.graph):
if contain(descriptors, temp_graph.extract_descriptor()):
continue
temp_acq_value = self.acq(temp_graph)
pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph))
descriptors.append(temp_graph.extract_descriptor())
if self._accept_new_acq_value(opt_acq, temp_acq_value):
opt_acq = temp_acq_value
father_id = elem.father_id
target_graph = deepcopy(temp_graph)
t *= alpha
# Did not found a not duplicated architecture
if father_id is None:
return None, None
nm_graph = self.searcher.load_model_by_id(father_id)
for args in target_graph.operation_history:
getattr(nm_graph, args[0])(*list(args[1:]))
return nm_graph, father_id |
estimate the value of generated graph
def acq(self, graph):
''' estimate the value of generated graph
'''
mean, std = self.gpr.predict(np.array([graph.extract_descriptor()]))
if self.optimizemode is OptimizeMode.Maximize:
return mean + self.beta * std
return mean - self.beta * std |
add child to search tree itself.
Arguments:
u {int} -- father id
v {int} -- child id
def add_child(self, u, v):
''' add child to search tree itself.
Arguments:
u {int} -- father id
v {int} -- child id
'''
if u == -1:
self.root = v
self.adj_list[v] = []
return
if v not in self.adj_list[u]:
self.adj_list[u].append(v)
if v not in self.adj_list:
self.adj_list[v] = [] |
A recursive function to return the content of the tree in a dict.
def get_dict(self, u=None):
""" A recursive function to return the content of the tree in a dict."""
if u is None:
return self.get_dict(self.root)
children = []
for v in self.adj_list[u]:
children.append(self.get_dict(v))
ret = {"name": u, "children": children}
return ret |
Train a network from a specific graph.
def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
'''
Train a network from a specific graph.
'''
global sess
with tf.Graph().as_default():
train_model = GAG(cfg, embed, p_graph)
train_model.build_net(is_training=True)
tf.get_variable_scope().reuse_variables()
dev_model = GAG(cfg, embed, p_graph)
dev_model.build_net(is_training=False)
with tf.Session() as sess:
if restore_path is not None:
restore_mapping = dict(zip(restore_shared, restore_shared))
logger.debug('init shared variables from {}, restore_scopes: {}'.format(restore_path, restore_shared))
init_from_checkpoint(restore_path, restore_mapping)
logger.debug('init variables')
logger.debug(sess.run(tf.report_uninitialized_variables()))
init = tf.global_variables_initializer()
sess.run(init)
# writer = tf.summary.FileWriter('%s/graph/'%execution_path, sess.graph)
logger.debug('assign to graph')
saver = tf.train.Saver()
train_loss = None
bestacc = 0
patience = 5
patience_increase = 2
improvement_threshold = 0.995
for epoch in range(max_epoch):
logger.debug('begin to train')
train_batches = data.get_batches(qp_pairs, cfg.batch_size)
train_loss = run_epoch(train_batches, train_model, True)
logger.debug('epoch ' + str(epoch) +
' loss: ' + str(train_loss))
dev_batches = list(data.get_batches(
dev_qp_pairs, cfg.batch_size))
_, position1, position2, ids, contexts = run_epoch(
dev_batches, dev_model, False)
answers = generate_predict_json(
position1, position2, ids, contexts)
if save_path is not None:
logger.info('save prediction file to {}'.format(save_path))
with open(os.path.join(save_path, 'epoch%d.prediction' % epoch), 'w') as file:
json.dump(answers, file)
else:
answers = json.dumps(answers)
answers = json.loads(answers)
iter = epoch + 1
acc = evaluate.evaluate_with_predictions(
args.dev_file, answers)
logger.debug('Send intermediate acc: %s', str(acc))
nni.report_intermediate_result(acc)
logger.debug('Send intermediate result done.')
if acc > bestacc:
if acc * improvement_threshold > bestacc:
patience = max(patience, iter * patience_increase)
bestacc = acc
if save_path is not None:
logger.info('save model & prediction to {}'.format(save_path))
saver.save(sess, os.path.join(save_path, 'epoch%d.model' % epoch))
with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file:
pickle.dump(
(position1, position2, ids, contexts), file)
logger.debug('epoch %d acc %g bestacc %g' %
(epoch, acc, bestacc))
if patience <= iter:
break
logger.debug('save done.')
return train_loss, bestacc |
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Call 'generate_parameters()' by 'count' times by default.
User code must override either this function or 'generate_parameters()'.
If there's no more trial, user should raise nni.NoMoreTrialError exception in generate_parameters().
If so, this function will only return sets of trial (hyper-)parameters that have already been collected.
parameter_id_list: list of int
def generate_multiple_parameters(self, parameter_id_list):
"""Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Call 'generate_parameters()' by 'count' times by default.
User code must override either this function or 'generate_parameters()'.
If there's no more trial, user should raise nni.NoMoreTrialError exception in generate_parameters().
If so, this function will only return sets of trial (hyper-)parameters that have already been collected.
parameter_id_list: list of int
"""
result = []
for parameter_id in parameter_id_list:
try:
_logger.debug("generating param for {}".format(parameter_id))
res = self.generate_parameters(parameter_id)
except nni.NoMoreTrialError:
return result
result.append(res)
return result |
Load graph
def graph_loads(graph_json):
'''
Load graph
'''
layers = []
for layer in graph_json['layers']:
layer_info = Layer(layer['graph_type'], layer['input'], layer['output'], layer['size'], layer['hash_id'])
layer_info.is_delete = layer['is_delete']
_logger.debug('append layer {}'.format(layer_info))
layers.append(layer_info)
graph = Graph(graph_json['max_layer_num'], graph_json['min_layer_num'], [], [], [])
graph.layers = layers
_logger.debug('graph {} loaded'.format(graph))
return graph |
Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers
def update_hash(self, layers: Iterable):
"""
Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers
"""
if self.graph_type == LayerType.input.value:
return
hasher = hashlib.md5()
hasher.update(LayerType(self.graph_type).name.encode('ascii'))
hasher.update(str(self.size).encode('ascii'))
for i in self.input:
if layers[i].hash_id is None:
raise ValueError('Hash id of layer {}: {} not generated!'.format(i, layers[i]))
hasher.update(layers[i].hash_id.encode('ascii'))
self.hash_id = hasher.hexdigest() |
update hash id of each layer, in topological order/recursively
hash id will be used in weight sharing
def update_hash(self):
"""
update hash id of each layer, in topological order/recursively
hash id will be used in weight sharing
"""
_logger.debug('update hash')
layer_in_cnt = [len(layer.input) for layer in self.layers]
topo_queue = deque([i for i, layer in enumerate(self.layers) if not layer.is_delete and layer.graph_type == LayerType.input.value])
while topo_queue:
layer_i = topo_queue.pop()
self.layers[layer_i].update_hash(self.layers)
for layer_j in self.layers[layer_i].output:
layer_in_cnt[layer_j] -= 1
if layer_in_cnt[layer_j] == 0:
topo_queue.appendleft(layer_j) |
Initialize root logger.
This will redirect anything from logging.getLogger() as well as stdout to specified file.
logger_file_path: path of logger file (path-like object).
def init_logger(logger_file_path, log_level_name='info'):
"""Initialize root logger.
This will redirect anything from logging.getLogger() as well as stdout to specified file.
logger_file_path: path of logger file (path-like object).
"""
log_level = log_level_map.get(log_level_name, logging.INFO)
logger_file = open(logger_file_path, 'w')
fmt = '[%(asctime)s] %(levelname)s (%(name)s/%(threadName)s) %(message)s'
logging.Formatter.converter = time.localtime
formatter = logging.Formatter(fmt, _time_format)
handler = logging.StreamHandler(logger_file)
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(log_level)
# these modules are too verbose
logging.getLogger('matplotlib').setLevel(log_level)
sys.stdout = _LoggerFileWrapper(logger_file) |
Create simple convolutional model
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
'''
Create simple convolutional model
'''
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers)
if hyper_params['optimizer'] == 'Adam':
optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])
else:
optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
return model |
Load MNIST dataset
def load_mnist_data(args):
'''
Load MNIST dataset
'''
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
LOG.debug('x_train shape: %s', (x_train.shape,))
LOG.debug('x_test shape: %s', (x_test.shape,))
return x_train, y_train, x_test, y_test |
Train model
def train(args, params):
'''
Train model
'''
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
# nni
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])
_, acc = model.evaluate(x_test, y_test, verbose=0)
LOG.debug('Final result is: %d', acc)
nni.report_final_result(acc) |
Run on end of each epoch
def on_epoch_end(self, epoch, logs={}):
'''
Run on end of each epoch
'''
LOG.debug(logs)
nni.report_intermediate_result(logs["val_acc"]) |
get all of config values
def get_all_config(self):
'''get all of config values'''
return json.dumps(self.config, indent=4, sort_keys=True, separators=(',', ':')) |
set {key:value} paris to self.config
def set_config(self, key, value):
'''set {key:value} paris to self.config'''
self.config = self.read_file()
self.config[key] = value
self.write_file() |
save config to local file
def write_file(self):
'''save config to local file'''
if self.config:
try:
with open(self.config_file, 'w') as file:
json.dump(self.config, file)
except IOError as error:
print('Error:', error)
return |
set {key:value} paris to self.experiment
def add_experiment(self, id, port, time, file_name, platform):
'''set {key:value} paris to self.experiment'''
self.experiments[id] = {}
self.experiments[id]['port'] = port
self.experiments[id]['startTime'] = time
self.experiments[id]['endTime'] = 'N/A'
self.experiments[id]['status'] = 'INITIALIZED'
self.experiments[id]['fileName'] = file_name
self.experiments[id]['platform'] = platform
self.write_file() |
Update experiment
def update_experiment(self, id, key, value):
'''Update experiment'''
if id not in self.experiments:
return False
self.experiments[id][key] = value
self.write_file()
return True |
remove an experiment by id
def remove_experiment(self, id):
'''remove an experiment by id'''
if id in self.experiments:
self.experiments.pop(id)
self.write_file() |
save config to local file
def write_file(self):
'''save config to local file'''
try:
with open(self.experiment_file, 'w') as file:
json.dump(self.experiments, file)
except IOError as error:
print('Error:', error)
return |
load config from local file
def read_file(self):
'''load config from local file'''
if os.path.exists(self.experiment_file):
try:
with open(self.experiment_file, 'r') as file:
return json.load(file)
except ValueError:
return {}
return {} |
load data from file
def load_from_file(path, fmt=None, is_training=True):
'''
load data from file
'''
if fmt is None:
fmt = 'squad'
assert fmt in ['squad', 'csv'], 'input format must be squad or csv'
qp_pairs = []
if fmt == 'squad':
with open(path) as data_file:
data = json.load(data_file)['data']
for doc in data:
for paragraph in doc['paragraphs']:
passage = paragraph['context']
for qa_pair in paragraph['qas']:
question = qa_pair['question']
qa_id = qa_pair['id']
if not is_training:
qp_pairs.append(
{'passage': passage, 'question': question, 'id': qa_id})
else:
for answer in qa_pair['answers']:
answer_begin = int(answer['answer_start'])
answer_end = answer_begin + len(answer['text'])
qp_pairs.append({'passage': passage,
'question': question,
'id': qa_id,
'answer_begin': answer_begin,
'answer_end': answer_end})
else:
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
line_num = 0
for row in reader:
qp_pairs.append(
{'passage': row[1], 'question': row[0], 'id': line_num})
line_num += 1
return qp_pairs |
tokenize function.
def tokenize(qp_pair, tokenizer=None, is_training=False):
'''
tokenize function.
'''
question_tokens = tokenizer.tokenize(qp_pair['question'])
passage_tokens = tokenizer.tokenize(qp_pair['passage'])
if is_training:
question_tokens = question_tokens[:300]
passage_tokens = passage_tokens[:300]
passage_tokens.insert(
0, {'word': '<BOS>', 'original_text': '<BOS>', 'char_begin': 0, 'char_end': 0})
passage_tokens.append(
{'word': '<EOS>', 'original_text': '<EOS>', 'char_begin': 0, 'char_end': 0})
qp_pair['question_tokens'] = question_tokens
qp_pair['passage_tokens'] = passage_tokens |
Build the vocab from corpus.
def collect_vocab(qp_pairs):
'''
Build the vocab from corpus.
'''
vocab = set()
for qp_pair in qp_pairs:
for word in qp_pair['question_tokens']:
vocab.add(word['word'])
for word in qp_pair['passage_tokens']:
vocab.add(word['word'])
return vocab |
Shuffle the step
def shuffle_step(entries, step):
'''
Shuffle the step
'''
answer = []
for i in range(0, len(entries), step):
sub = entries[i:i+step]
shuffle(sub)
answer += sub
return answer |
Get batches data and shuffle.
def get_batches(qp_pairs, batch_size, need_sort=True):
'''
Get batches data and shuffle.
'''
if need_sort:
qp_pairs = sorted(qp_pairs, key=lambda qp: (
len(qp['passage_tokens']), qp['id']), reverse=True)
batches = [{'qp_pairs': qp_pairs[i:(i + batch_size)]}
for i in range(0, len(qp_pairs), batch_size)]
shuffle(batches)
return batches |
Get char input.
def get_char_input(data, char_dict, max_char_length):
'''
Get char input.
'''
batch_size = len(data)
sequence_length = max(len(d) for d in data)
char_id = np.zeros((max_char_length, sequence_length,
batch_size), dtype=np.int32)
char_lengths = np.zeros((sequence_length, batch_size), dtype=np.float32)
for batch_idx in range(0, min(len(data), batch_size)):
batch_data = data[batch_idx]
for sample_idx in range(0, min(len(batch_data), sequence_length)):
word = batch_data[sample_idx]['word']
char_lengths[sample_idx, batch_idx] = min(
len(word), max_char_length)
for i in range(0, min(len(word), max_char_length)):
char_id[i, sample_idx, batch_idx] = get_id(char_dict, word[i])
return char_id, char_lengths |
Get word input.
def get_word_input(data, word_dict, embed, embed_dim):
'''
Get word input.
'''
batch_size = len(data)
max_sequence_length = max(len(d) for d in data)
sequence_length = max_sequence_length
word_input = np.zeros((max_sequence_length, batch_size,
embed_dim), dtype=np.float32)
ids = np.zeros((sequence_length, batch_size), dtype=np.int32)
masks = np.zeros((sequence_length, batch_size), dtype=np.float32)
lengths = np.zeros([batch_size], dtype=np.int32)
for batch_idx in range(0, min(len(data), batch_size)):
batch_data = data[batch_idx]
lengths[batch_idx] = len(batch_data)
for sample_idx in range(0, min(len(batch_data), sequence_length)):
word = batch_data[sample_idx]['word'].lower()
if word in word_dict.keys():
word_input[sample_idx, batch_idx] = embed[word_dict[word]]
ids[sample_idx, batch_idx] = word_dict[word]
masks[sample_idx, batch_idx] = 1
word_input = np.reshape(word_input, (-1, embed_dim))
return word_input, ids, masks, lengths |
Given word return word index.
def get_word_index(tokens, char_index):
'''
Given word return word index.
'''
for (i, token) in enumerate(tokens):
if token['char_end'] == 0:
continue
if token['char_begin'] <= char_index and char_index <= token['char_end']:
return i
return 0 |
Get answer's index of begin and end.
def get_answer_begin_end(data):
'''
Get answer's index of begin and end.
'''
begin = []
end = []
for qa_pair in data:
tokens = qa_pair['passage_tokens']
char_begin = qa_pair['answer_begin']
char_end = qa_pair['answer_end']
word_begin = get_word_index(tokens, char_begin)
word_end = get_word_index(tokens, char_end)
begin.append(word_begin)
end.append(word_end)
return np.asarray(begin), np.asarray(end) |
Get bucket by length.
def get_buckets(min_length, max_length, bucket_count):
'''
Get bucket by length.
'''
if bucket_count <= 0:
return [max_length]
unit_length = int((max_length - min_length) // (bucket_count))
buckets = [min_length + unit_length *
(i + 1) for i in range(0, bucket_count)]
buckets[-1] = max_length
return buckets |
tokenize function in Tokenizer.
def tokenize(self, text):
'''
tokenize function in Tokenizer.
'''
start = -1
tokens = []
for i, character in enumerate(text):
if character == ' ' or character == '\t':
if start >= 0:
word = text[start:i]
tokens.append({
'word': word,
'original_text': word,
'char_begin': start,
'char_end': i})
start = -1
else:
if start < 0:
start = i
if start >= 0:
tokens.append({
'word': text[start:len(text)],
'original_text': text[start:len(text)],
'char_begin': start,
'char_end': len(text)
})
return tokens |
generate new id and event hook for new Individual
def generate_new_id(self):
"""
generate new id and event hook for new Individual
"""
self.events.append(Event())
indiv_id = self.indiv_counter
self.indiv_counter += 1
return indiv_id |
initialize populations for evolution tuner
def init_population(self, population_size, graph_max_layer, graph_min_layer):
"""
initialize populations for evolution tuner
"""
population = []
graph = Graph(max_layer_num=graph_max_layer, min_layer_num=graph_min_layer,
inputs=[Layer(LayerType.input.value, output=[4, 5], size='x'), Layer(LayerType.input.value, output=[4, 5], size='y')],
output=[Layer(LayerType.output.value, inputs=[4], size='x'), Layer(LayerType.output.value, inputs=[5], size='y')],
hide=[Layer(LayerType.attention.value, inputs=[0, 1], output=[2]),
Layer(LayerType.attention.value, inputs=[1, 0], output=[3])])
for _ in range(population_size):
graph_tmp = copy.deepcopy(graph)
graph_tmp.mutation()
population.append(Individual(indiv_id=self.generate_new_id(), graph_cfg=graph_tmp, result=None))
return population |
Returns a set of trial graph config, as a serializable object.
An example configuration:
```json
{
"shared_id": [
"4a11b2ef9cb7211590dfe81039b27670",
"370af04de24985e5ea5b3d72b12644c9",
"11f646e9f650f5f3fedc12b6349ec60f",
"0604e5350b9c734dd2d770ee877cfb26",
"6dbeb8b022083396acb721267335f228",
"ba55380d6c84f5caeb87155d1c5fa654"
],
"graph": {
"layers": [
...
{
"hash_id": "ba55380d6c84f5caeb87155d1c5fa654",
"is_delete": false,
"size": "x",
"graph_type": 0,
"output": [
6
],
"output_size": 1,
"input": [
7,
1
],
"input_size": 2
},
...
]
},
"restore_dir": "/mnt/nfs/nni/ga_squad/87",
"save_dir": "/mnt/nfs/nni/ga_squad/95"
}
```
`restore_dir` means the path in which to load the previous trained model weights. if null, init from stratch.
`save_dir` means the path to save trained model for current trial.
`graph` is the configuration of model network.
Note: each configuration of layers has a `hash_id` property,
which tells tuner & trial code whether to share trained weights or not.
`shared_id` is the hash_id of layers that should be shared with previously trained model.
def generate_parameters(self, parameter_id):
"""Returns a set of trial graph config, as a serializable object.
An example configuration:
```json
{
"shared_id": [
"4a11b2ef9cb7211590dfe81039b27670",
"370af04de24985e5ea5b3d72b12644c9",
"11f646e9f650f5f3fedc12b6349ec60f",
"0604e5350b9c734dd2d770ee877cfb26",
"6dbeb8b022083396acb721267335f228",
"ba55380d6c84f5caeb87155d1c5fa654"
],
"graph": {
"layers": [
...
{
"hash_id": "ba55380d6c84f5caeb87155d1c5fa654",
"is_delete": false,
"size": "x",
"graph_type": 0,
"output": [
6
],
"output_size": 1,
"input": [
7,
1
],
"input_size": 2
},
...
]
},
"restore_dir": "/mnt/nfs/nni/ga_squad/87",
"save_dir": "/mnt/nfs/nni/ga_squad/95"
}
```
`restore_dir` means the path in which to load the previous trained model weights. if null, init from stratch.
`save_dir` means the path to save trained model for current trial.
`graph` is the configuration of model network.
Note: each configuration of layers has a `hash_id` property,
which tells tuner & trial code whether to share trained weights or not.
`shared_id` is the hash_id of layers that should be shared with previously trained model.
"""
logger.debug('acquiring lock for param {}'.format(parameter_id))
self.thread_lock.acquire()
logger.debug('lock for current thread acquired')
if not self.population:
logger.debug("the len of poplution lower than zero.")
raise Exception('The population is empty')
pos = -1
for i in range(len(self.population)):
if self.population[i].result is None:
pos = i
break
if pos != -1:
indiv = copy.deepcopy(self.population[pos])
self.population.pop(pos)
graph_param = json.loads(graph_dumps(indiv.config))
else:
random.shuffle(self.population)
if self.population[0].result < self.population[1].result:
self.population[0] = self.population[1]
indiv = copy.deepcopy(self.population[0])
self.population.pop(1)
indiv.mutation(indiv_id = self.generate_new_id())
graph_param = json.loads(graph_dumps(indiv.config))
param_json = {
'graph': graph_param,
'restore_dir': self.save_dir(indiv.parent_id),
'save_dir': self.save_dir(indiv.indiv_id),
'shared_id': list(indiv.shared_ids) if indiv.parent_id is not None else None,
}
logger.debug('generate_parameter return value is:')
logger.debug(param_json)
logger.debug('releasing lock')
self.thread_lock.release()
if indiv.parent_id is not None:
logger.debug("new trial {} pending on parent experiment {}".format(indiv.indiv_id, indiv.parent_id))
self.events[indiv.parent_id].wait()
logger.debug("trial {} ready".format(indiv.indiv_id))
return param_json |
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
def receive_trial_result(self, parameter_id, parameters, value):
'''
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
'''
logger.debug('acquiring lock for param {}'.format(parameter_id))
self.thread_lock.acquire()
logger.debug('lock for current acquired')
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Minimize:
reward = -reward
logger.debug('receive trial result is:\n')
logger.debug(str(parameters))
logger.debug(str(reward))
indiv = Individual(indiv_id=int(os.path.split(parameters['save_dir'])[1]),
graph_cfg=graph_loads(parameters['graph']), result=reward)
self.population.append(indiv)
logger.debug('releasing lock')
self.thread_lock.release()
self.events[indiv.indiv_id].set() |
update data
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
def _update_data(self, trial_job_id, trial_history):
"""update data
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
"""
if trial_job_id not in self.running_history:
self.running_history[trial_job_id] = []
self.running_history[trial_job_id].extend(trial_history[len(self.running_history[trial_job_id]):]) |
trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
def trial_end(self, trial_job_id, success):
"""trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
"""
if trial_job_id in self.running_history:
if success:
cnt = 0
history_sum = 0
self.completed_avg_history[trial_job_id] = []
for each in self.running_history[trial_job_id]:
cnt += 1
history_sum += each
self.completed_avg_history[trial_job_id].append(history_sum / cnt)
self.running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not in running_history') |
assess_trial
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
Returns
-------
bool
AssessResult.Good or AssessResult.Bad
Raises
------
Exception
unrecognize exception in medianstop_assessor
def assess_trial(self, trial_job_id, trial_history):
"""assess_trial
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
Returns
-------
bool
AssessResult.Good or AssessResult.Bad
Raises
------
Exception
unrecognize exception in medianstop_assessor
"""
curr_step = len(trial_history)
if curr_step < self.start_step:
return AssessResult.Good
try:
num_trial_history = [float(ele) for ele in trial_history]
except (TypeError, ValueError) as error:
logger.warning('incorrect data type or value:')
logger.exception(error)
except Exception as error:
logger.warning('unrecognized exception in medianstop_assessor:')
logger.excpetion(error)
self._update_data(trial_job_id, num_trial_history)
if self.high_better:
best_history = max(trial_history)
else:
best_history = min(trial_history)
avg_array = []
for id in self.completed_avg_history:
if len(self.completed_avg_history[id]) >= curr_step:
avg_array.append(self.completed_avg_history[id][curr_step - 1])
if len(avg_array) > 0:
avg_array.sort()
if self.high_better:
median = avg_array[(len(avg_array)-1) // 2]
return AssessResult.Bad if best_history < median else AssessResult.Good
else:
median = avg_array[len(avg_array) // 2]
return AssessResult.Bad if best_history > median else AssessResult.Good
else:
return AssessResult.Good |
Copy directory from HDFS to local
def copyHdfsDirectoryToLocal(hdfsDirectory, localDirectory, hdfsClient):
'''Copy directory from HDFS to local'''
if not os.path.exists(localDirectory):
os.makedirs(localDirectory)
try:
listing = hdfsClient.list_status(hdfsDirectory)
except Exception as exception:
nni_log(LogType.Error, 'List hdfs directory {0} error: {1}'.format(hdfsDirectory, str(exception)))
raise exception
for f in listing:
if f.type == 'DIRECTORY':
subHdfsDirectory = posixpath.join(hdfsDirectory, f.pathSuffix)
subLocalDirectory = os.path.join(localDirectory, f.pathSuffix)
copyHdfsDirectoryToLocal(subHdfsDirectory, subLocalDirectory, hdfsClient)
elif f.type == 'FILE':
hdfsFilePath = posixpath.join(hdfsDirectory, f.pathSuffix)
localFilePath = os.path.join(localDirectory, f.pathSuffix)
copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient)
else:
raise AssertionError('unexpected type {}'.format(f.type)) |
Copy file from HDFS to local
def copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient, override=True):
'''Copy file from HDFS to local'''
if not hdfsClient.exists(hdfsFilePath):
raise Exception('HDFS file {} does not exist!'.format(hdfsFilePath))
try:
file_status = hdfsClient.get_file_status(hdfsFilePath)
if file_status.type != 'FILE':
raise Exception('HDFS file path {} is not a file'.format(hdfsFilePath))
except Exception as exception:
nni_log(LogType.Error, 'Get hdfs file {0} status error: {1}'.format(hdfsFilePath, str(exception)))
raise exception
if os.path.exists(localFilePath) and override:
os.remove(localFilePath)
try:
hdfsClient.copy_to_local(hdfsFilePath, localFilePath)
except Exception as exception:
nni_log(LogType.Error, 'Copy hdfs file {0} to {1} error: {2}'.format(hdfsFilePath, localFilePath, str(exception)))
raise exception
nni_log(LogType.Info, 'Successfully copied hdfs file {0} to {1}, {2} bytes'.format(hdfsFilePath, localFilePath, file_status.length)) |
Copy directory from local to HDFS
def copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient):
'''Copy directory from local to HDFS'''
if not os.path.exists(localDirectory):
raise Exception('Local Directory does not exist!')
hdfsClient.mkdirs(hdfsDirectory)
result = True
for file in os.listdir(localDirectory):
file_path = os.path.join(localDirectory, file)
if os.path.isdir(file_path):
hdfs_directory = os.path.join(hdfsDirectory, file)
try:
result = result and copyDirectoryToHdfs(file_path, hdfs_directory, hdfsClient)
except Exception as exception:
nni_log(LogType.Error, 'Copy local directory {0} to hdfs directory {1} error: {2}'.format(file_path, hdfs_directory, str(exception)))
result = False
else:
hdfs_file_path = os.path.join(hdfsDirectory, file)
try:
result = result and copyFileToHdfs(file_path, hdfs_file_path, hdfsClient)
except Exception as exception:
nni_log(LogType.Error, 'Copy local file {0} to hdfs {1} error: {2}'.format(file_path, hdfs_file_path, str(exception)))
result = False
return result |
Copy a local file to HDFS directory
def copyFileToHdfs(localFilePath, hdfsFilePath, hdfsClient, override=True):
'''Copy a local file to HDFS directory'''
if not os.path.exists(localFilePath):
raise Exception('Local file Path does not exist!')
if os.path.isdir(localFilePath):
raise Exception('localFile should not a directory!')
if hdfsClient.exists(hdfsFilePath):
if override:
hdfsClient.delete(hdfsFilePath)
else:
return False
try:
hdfsClient.copy_from_local(localFilePath, hdfsFilePath)
return True
except Exception as exception:
nni_log(LogType.Error, 'Copy local file {0} to hdfs file {1} error: {2}'.format(localFilePath, hdfsFilePath, str(exception)))
return False |
Load dataset, use boston dataset
def load_data():
'''Load dataset, use boston dataset'''
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=99, test_size=0.25)
#normalize data
ss_X = StandardScaler()
ss_y = StandardScaler()
X_train = ss_X.fit_transform(X_train)
X_test = ss_X.transform(X_test)
y_train = ss_y.fit_transform(y_train[:, None])[:,0]
y_test = ss_y.transform(y_test[:, None])[:,0]
return X_train, X_test, y_train, y_test |
Get model according to parameters
def get_model(PARAMS):
'''Get model according to parameters'''
model_dict = {
'LinearRegression': LinearRegression(),
'SVR': SVR(),
'KNeighborsRegressor': KNeighborsRegressor(),
'DecisionTreeRegressor': DecisionTreeRegressor()
}
if not model_dict.get(PARAMS['model_name']):
LOG.exception('Not supported model!')
exit(1)
model = model_dict[PARAMS['model_name']]
try:
if PARAMS['model_name'] == 'SVR':
model.kernel = PARAMS['svr_kernel']
elif PARAMS['model_name'] == 'KNeighborsRegressor':
model.weights = PARAMS['knr_weights']
except Exception as exception:
LOG.exception(exception)
raise
return model |
Train model and predict result
def run(X_train, X_test, y_train, y_test, PARAMS):
'''Train model and predict result'''
model.fit(X_train, y_train)
predict_y = model.predict(X_test)
score = r2_score(y_test, predict_y)
LOG.debug('r2 score: %s' % score)
nni.report_final_result(score) |
Add a skip-connection to the descriptor.
Args:
u: Number of convolutional layers before the starting point.
v: Number of convolutional layers before the ending point.
connection_type: Must be either CONCAT_CONNECT or ADD_CONNECT.
def add_skip_connection(self, u, v, connection_type):
""" Add a skip-connection to the descriptor.
Args:
u: Number of convolutional layers before the starting point.
v: Number of convolutional layers before the ending point.
connection_type: Must be either CONCAT_CONNECT or ADD_CONNECT.
"""
if connection_type not in [self.CONCAT_CONNECT, self.ADD_CONNECT]:
raise ValueError(
"connection_type should be NetworkDescriptor.CONCAT_CONNECT "
"or NetworkDescriptor.ADD_CONNECT."
)
self.skip_connections.append((u, v, connection_type)) |
NetworkDescriptor to json representation
def to_json(self):
''' NetworkDescriptor to json representation
'''
skip_list = []
for u, v, connection_type in self.skip_connections:
skip_list.append({"from": u, "to": v, "type": connection_type})
return {"node_list": self.layers, "skip_list": skip_list} |
Add a layer to the Graph.
Args:
layer: An instance of the subclasses of StubLayer in layers.py.
input_node_id: An integer. The ID of the input node of the layer.
Returns:
output_node_id: An integer. The ID of the output node of the layer.
def add_layer(self, layer, input_node_id):
"""Add a layer to the Graph.
Args:
layer: An instance of the subclasses of StubLayer in layers.py.
input_node_id: An integer. The ID of the input node of the layer.
Returns:
output_node_id: An integer. The ID of the output node of the layer.
"""
if isinstance(input_node_id, Iterable):
layer.input = list(map(lambda x: self.node_list[x], input_node_id))
output_node_id = self._add_node(Node(layer.output_shape))
for node_id in input_node_id:
self._add_edge(layer, node_id, output_node_id)
else:
layer.input = self.node_list[input_node_id]
output_node_id = self._add_node(Node(layer.output_shape))
self._add_edge(layer, input_node_id, output_node_id)
layer.output = self.node_list[output_node_id]
return output_node_id |
Add a new node to node_list and give the node an ID.
Args:
node: An instance of Node.
Returns:
node_id: An integer.
def _add_node(self, node):
"""Add a new node to node_list and give the node an ID.
Args:
node: An instance of Node.
Returns:
node_id: An integer.
"""
node_id = len(self.node_list)
self.node_to_id[node] = node_id
self.node_list.append(node)
self.adj_list[node_id] = []
self.reverse_adj_list[node_id] = []
return node_id |
Add a new layer to the graph. The nodes should be created in advance.
def _add_edge(self, layer, input_id, output_id):
"""Add a new layer to the graph. The nodes should be created in advance."""
if layer in self.layer_to_id:
layer_id = self.layer_to_id[layer]
if input_id not in self.layer_id_to_input_node_ids[layer_id]:
self.layer_id_to_input_node_ids[layer_id].append(input_id)
if output_id not in self.layer_id_to_output_node_ids[layer_id]:
self.layer_id_to_output_node_ids[layer_id].append(output_id)
else:
layer_id = len(self.layer_list)
self.layer_list.append(layer)
self.layer_to_id[layer] = layer_id
self.layer_id_to_input_node_ids[layer_id] = [input_id]
self.layer_id_to_output_node_ids[layer_id] = [output_id]
self.adj_list[input_id].append((output_id, layer_id))
self.reverse_adj_list[output_id].append((input_id, layer_id)) |
Redirect the layer to a new node.
Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`
while keeping all other property of the edge the same.
def _redirect_edge(self, u_id, v_id, new_v_id):
"""Redirect the layer to a new node.
Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`
while keeping all other property of the edge the same.
"""
layer_id = None
for index, edge_tuple in enumerate(self.adj_list[u_id]):
if edge_tuple[0] == v_id:
layer_id = edge_tuple[1]
self.adj_list[u_id][index] = (new_v_id, layer_id)
self.layer_list[layer_id].output = self.node_list[new_v_id]
break
for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]):
if edge_tuple[0] == u_id:
layer_id = edge_tuple[1]
self.reverse_adj_list[v_id].remove(edge_tuple)
break
self.reverse_adj_list[new_v_id].append((u_id, layer_id))
for index, value in enumerate(self.layer_id_to_output_node_ids[layer_id]):
if value == v_id:
self.layer_id_to_output_node_ids[layer_id][index] = new_v_id
break |
Replace the layer with a new layer.
def _replace_layer(self, layer_id, new_layer):
"""Replace the layer with a new layer."""
old_layer = self.layer_list[layer_id]
new_layer.input = old_layer.input
new_layer.output = old_layer.output
new_layer.output.shape = new_layer.output_shape
self.layer_list[layer_id] = new_layer
self.layer_to_id[new_layer] = layer_id
self.layer_to_id.pop(old_layer) |
Return the topological order of the node IDs from the input node to the output node.
def topological_order(self):
"""Return the topological order of the node IDs from the input node to the output node."""
q = Queue()
in_degree = {}
for i in range(self.n_nodes):
in_degree[i] = 0
for u in range(self.n_nodes):
for v, _ in self.adj_list[u]:
in_degree[v] += 1
for i in range(self.n_nodes):
if in_degree[i] == 0:
q.put(i)
order_list = []
while not q.empty():
u = q.get()
order_list.append(u)
for v, _ in self.adj_list[u]:
in_degree[v] -= 1
if in_degree[v] == 0:
q.put(v)
return order_list |
Given two node IDs, return all the pooling layers between them.
def _get_pooling_layers(self, start_node_id, end_node_id):
"""Given two node IDs, return all the pooling layers between them."""
layer_list = []
node_list = [start_node_id]
assert self._depth_first_search(end_node_id, layer_list, node_list)
ret = []
for layer_id in layer_list:
layer = self.layer_list[layer_id]
if is_layer(layer, "Pooling"):
ret.append(layer)
elif is_layer(layer, "Conv") and layer.stride != 1:
ret.append(layer)
return ret |
Search for all the layers and nodes down the path.
A recursive function to search all the layers and nodes between the node in the node_list
and the node with target_id.
def _depth_first_search(self, target_id, layer_id_list, node_list):
"""Search for all the layers and nodes down the path.
A recursive function to search all the layers and nodes between the node in the node_list
and the node with target_id."""
assert len(node_list) <= self.n_nodes
u = node_list[-1]
if u == target_id:
return True
for v, layer_id in self.adj_list[u]:
layer_id_list.append(layer_id)
node_list.append(v)
if self._depth_first_search(target_id, layer_id_list, node_list):
return True
layer_id_list.pop()
node_list.pop()
return False |
Search the graph for all the layers to be widened caused by an operation.
It is an recursive function with duplication check to avoid deadlock.
It searches from a starting node u until the corresponding layers has been widened.
Args:
u: The starting node ID.
start_dim: The position to insert the additional dimensions.
total_dim: The total number of dimensions the layer has before widening.
n_add: The number of dimensions to add.
def _search(self, u, start_dim, total_dim, n_add):
"""Search the graph for all the layers to be widened caused by an operation.
It is an recursive function with duplication check to avoid deadlock.
It searches from a starting node u until the corresponding layers has been widened.
Args:
u: The starting node ID.
start_dim: The position to insert the additional dimensions.
total_dim: The total number of dimensions the layer has before widening.
n_add: The number of dimensions to add.
"""
if (u, start_dim, total_dim, n_add) in self.vis:
return
self.vis[(u, start_dim, total_dim, n_add)] = True
for v, layer_id in self.adj_list[u]:
layer = self.layer_list[layer_id]
if is_layer(layer, "Conv"):
new_layer = wider_next_conv(
layer, start_dim, total_dim, n_add, self.weighted
)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, "Dense"):
new_layer = wider_next_dense(
layer, start_dim, total_dim, n_add, self.weighted
)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, "BatchNormalization"):
new_layer = wider_bn(layer, start_dim, total_dim, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
self._search(v, start_dim, total_dim, n_add)
elif is_layer(layer, "Concatenate"):
if self.layer_id_to_input_node_ids[layer_id][1] == u:
# u is on the right of the concat
# next_start_dim += next_total_dim - total_dim
left_dim = self._upper_layer_width(
self.layer_id_to_input_node_ids[layer_id][0]
)
next_start_dim = start_dim + left_dim
next_total_dim = total_dim + left_dim
else:
next_start_dim = start_dim
next_total_dim = total_dim + self._upper_layer_width(
self.layer_id_to_input_node_ids[layer_id][1]
)
self._search(v, next_start_dim, next_total_dim, n_add)
else:
self._search(v, start_dim, total_dim, n_add)
for v, layer_id in self.reverse_adj_list[u]:
layer = self.layer_list[layer_id]
if is_layer(layer, "Conv"):
new_layer = wider_pre_conv(layer, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, "Dense"):
new_layer = wider_pre_dense(layer, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, "Concatenate"):
continue
else:
self._search(v, start_dim, total_dim, n_add) |
Insert a relu-conv-bn block after the target block.
Args:
target_id: A convolutional layer ID. The new block should be inserted after the block.
new_layer: An instance of StubLayer subclasses.
def to_deeper_model(self, target_id, new_layer):
"""Insert a relu-conv-bn block after the target block.
Args:
target_id: A convolutional layer ID. The new block should be inserted after the block.
new_layer: An instance of StubLayer subclasses.
"""
self.operation_history.append(("to_deeper_model", target_id, new_layer))
input_id = self.layer_id_to_input_node_ids[target_id][0]
output_id = self.layer_id_to_output_node_ids[target_id][0]
if self.weighted:
if is_layer(new_layer, "Dense"):
init_dense_weight(new_layer)
elif is_layer(new_layer, "Conv"):
init_conv_weight(new_layer)
elif is_layer(new_layer, "BatchNormalization"):
init_bn_weight(new_layer)
self._insert_new_layers([new_layer], input_id, output_id) |
Widen the last dimension of the output of the pre_layer.
Args:
pre_layer_id: The ID of a convolutional layer or dense layer.
n_add: The number of dimensions to add.
def to_wider_model(self, pre_layer_id, n_add):
"""Widen the last dimension of the output of the pre_layer.
Args:
pre_layer_id: The ID of a convolutional layer or dense layer.
n_add: The number of dimensions to add.
"""
self.operation_history.append(("to_wider_model", pre_layer_id, n_add))
pre_layer = self.layer_list[pre_layer_id]
output_id = self.layer_id_to_output_node_ids[pre_layer_id][0]
dim = layer_width(pre_layer)
self.vis = {}
self._search(output_id, dim, dim, n_add)
# Update the tensor shapes.
for u in self.topological_order:
for v, layer_id in self.adj_list[u]:
self.node_list[v].shape = self.layer_list[layer_id].output_shape |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.