code stringlengths 17 6.64M |
|---|
def rounddict(d: Dict[(Any, float)], x=2):
return {k: round(number=v, ndigits=x) for (k, v) in d.items()}
|
def run_analysis(sample, graph, config: AnalysisPipelineConfig, n_iter, recomputation=True, bw_GBps=12, verbose=True, async_pipeline=False, add_comm_times_to_balance=True, sequential_model=None, stages_on_same_gpu: Optional[List[Set[int]]]=None, PRINT_THEORETICAL=False, PRINT_MIN_MAX_BALANCE=False, PRINT_VAR_STD=False, UTILIZATION_SLOWDOWN_SPEEDUP=True, PRINT_1F1B=True, DO_THEORETICAL=False, TRY_SSGD_ANALYSIS=False, TRY_ASGD_ANALYSIS=True):
if (not stages_on_same_gpu):
stages_on_same_gpu = list()
sample_save = sample
if isinstance(sample, dict):
sample = tuple([sample[i] for i in config.model_inputs()])
elif (not isinstance(sample, tuple)):
sample = (sample,)
unique_stages_on_same_gpu = stages_on_same_gpu
stages_on_same_gpu = defaultdict(set)
for i in unique_stages_on_same_gpu:
for j in i:
stages_on_same_gpu[j] = i
for i in unique_stages_on_same_gpu:
assert (len(i) >= 1)
num_dummy_stages = sum(((len(i) - 1) for i in unique_stages_on_same_gpu))
theoretical_string = maybe_do_theoretical_analysis(DO_THEORETICAL, PRINT_THEORETICAL, PRINT_MIN_MAX_BALANCE, async_pipeline, graph, recomputation)
if torch.cuda.is_available():
torch.cuda.reset_peak_memory_stats()
profile_result = profile_execution(sample, config, (n_iter + 1), recomputation=recomputation, bw_GBps=bw_GBps, async_pipeline=async_pipeline, add_comm_times_to_balance=add_comm_times_to_balance, stages_on_same_gpu=stages_on_same_gpu)
real_f_times = profile_result.f_times_mean
f_std = profile_result.f_times_std
real_b_times = profile_result.b_times_mean
b_std = profile_result.b_times_std
comm_volume_stats = profile_result.communication_stats
nocomm_real_f_times = profile_result.nocommf_times_mean
nocomm_real_f_std = profile_result.nocommf_times_std
nocomm_real_b_times = profile_result.nocommb_times_mean
nocomm_real_b_std = profile_result.nocommb_times_std
warnings_list = profile_result.warnings_list
max_memory_allocated = None
if torch.cuda.is_available():
max_memory_allocated = torch.cuda.max_memory_allocated()
def get_seq_no_recomp_no_comm_times():
try:
seq_times = profile_execution(sample, config, (n_iter + 1), recomputation=False, bw_GBps=bw_GBps, async_pipeline=False, add_comm_times_to_balance=add_comm_times_to_balance, stages_on_same_gpu=stages_on_same_gpu)
except Exception as e:
print('-E- failed at get_seq_no_recomp_no_comm_times, known issue')
raise e
return seq_times
def get_comm_vol_str(comm_volume_stats):
communication_volume = dict()
for (idx, stats) in comm_volume_stats.items():
units = {'input size': 'MB', 'recieve_time': 'ms', 'out': 'MB', 'send time': 'ms'}
newd = {k: f'{stats[k]:.2f} {units[k]}' for k in stats}
communication_volume[idx] = ', '.join(('{!s}:{!r}'.format(key, val) for (key, val) in newd.items()))
return communication_volume
n_partitions = config.n_stages
num_real_stages = (n_partitions - num_dummy_stages)
pipeline_representation_stage_to_device_map = sorted_stage_to_device_map(n_partitions, stages_on_same_gpu)
if (n_partitions != num_real_stages):
for i in unique_stages_on_same_gpu:
j = min(i)
for k in i:
if (k == j):
continue
for means_list in [real_f_times, real_b_times, nocomm_real_f_times, nocomm_real_b_times, comm_volume_stats]:
if isinstance(means_list[j], dict):
d1 = means_list[j]
d2 = means_list[k]
assert isinstance(d1, dict)
assert isinstance(d2, dict)
for key in d1:
d1[key] += d2[key]
else:
means_list[j] += means_list[k]
del means_list[k]
comm_volume_str = get_comm_vol_str(comm_volume_stats)
real_b_slowdown = slowdown(real_b_times, nocomm_real_b_times)
real_f_slowdown = slowdown(real_f_times, nocomm_real_f_times)
comp_comm_ratio_f = computation_communication_ratio(nocomm_real_f_times, {k: v['send time'] for (k, v) in comm_volume_stats.items()})
comp_comm_ratio_b = computation_communication_ratio(nocomm_real_b_times, {k: v['recieve_time'] for (k, v) in comm_volume_stats.items()})
real_f_utilization = utilization(real_f_times, comp_comm_ratio_f)
real_b_utilization = utilization(real_b_times, comp_comm_ratio_b)
pipe_times = (real_f_times, real_b_times, nocomm_real_f_times, nocomm_real_b_times)
expected_speedup = expected_speedup_after_partitioning(*pipe_times)
try:
seq_profile_result = get_seq_no_recomp_no_comm_times()
expected_speedup_compared_to_seq_no_comm = expected_speedup_compared_to_seq(pipe_times, seq_profile_result)
seq_success = True
except (Exception, RuntimeError) as e:
warnings.warn(f'sequential no_recomputation analysis failed: {sys.exc_info()[0]}, {str(e)}')
seq_success = False
expected_speedup_compared_to_seq_no_comm = None
seq_profile_result = None
comp_comm_ratio_f = rounddict(comp_comm_ratio_f)
comp_comm_ratio_b = rounddict(comp_comm_ratio_b)
real_b_utilization = rounddict(real_b_utilization)
real_f_utilization = rounddict(real_f_utilization)
d_param_count = parameter_count(config)
with io.StringIO() as buf, redirect_stdout(buf):
pprint(d_param_count)
s_param_count = buf.getvalue()
d_same_gpu_parameter_count = same_gpu_parameter_count(stage_param_count=d_param_count, stages_on_same_gpu=stages_on_same_gpu)
num_params_milions = (d_same_gpu_parameter_count['total'] / 1000000.0)
num_params_milions = round(number=num_params_milions, ndigits=1)
with io.StringIO() as buf, redirect_stdout(buf):
print(f'Number of Model Parameters {num_params_milions}M')
pprint(d_same_gpu_parameter_count)
s_gpu_param_count = buf.getvalue()
fwd_plus_backward_std = dict()
if (n_partitions != num_real_stages):
warnings.warn('calculating std is not implemented for multiple stages on same GPU')
else:
fwd_plus_backward_std['pipeline_no_comm'] = add_stds_dicts(nocomm_real_f_std, nocomm_real_b_std)
fwd_plus_backward = dict()
fwd_plus_backward['pipeline_no_comm'] = add_dicts(nocomm_real_f_times, nocomm_real_b_times)
fwd_plus_backward['pipeline_with_non_parallel_comm'] = add_dicts(real_f_times, real_b_times)
for (i, v) in fwd_plus_backward.items():
if (i == 'seq_no_comm_no_recomp'):
continue
worstcase = max(v.values())
v['worstcase'] = worstcase
if (i in fwd_plus_backward_std):
key_matching_top_val = max(v.items(), key=operator.itemgetter(1))[0]
v['worstcase_std'] = fwd_plus_backward_std[i][key_matching_top_val]
if seq_success:
fwd_plus_backward['seq_no_comm_no_recomp'] = (add_dicts(seq_profile_result.nocommf_times_mean, seq_profile_result.nocommb_times_mean) if seq_success else dict())
fwd_plus_backward['pipeline_vs_seq_no_comm'] = (sum(fwd_plus_backward['seq_no_comm_no_recomp'].values()) / fwd_plus_backward['pipeline_no_comm']['worstcase'])
fwd_plus_backward['expected_compute_utilization'] = {i: (v / fwd_plus_backward['pipeline_no_comm']['worstcase']) for (i, v) in fwd_plus_backward['pipeline_no_comm'].items() if (i != 'worstcase')}
for i in list(fwd_plus_backward.keys()):
v = fwd_plus_backward[i]
fwd_plus_backward[i] = (rounddict(v, 2) if isinstance(v, dict) else round(v, 2))
with io.StringIO() as buf, redirect_stdout(buf):
pprint(fwd_plus_backward)
s_fwd_plus_backward = buf.getvalue()
if verbose:
s = '-I- Printing Report\n'
if warnings_list:
s += (('warnings:\n' + '\n'.join(warnings_list)) + '\n')
if (graph is not None):
s += f'''Number of nodes in Computation Graph: {graph.num_nodes}
'''
s += f'''Number of stages: {num_real_stages}
'''
if num_dummy_stages:
s += f'''n_partitions:{n_partitions}, num_dummy_stages:{num_dummy_stages}
'''
s += f'''unique_stages_on_same_gpu: {unique_stages_on_same_gpu}
'''
s += f'''"stage_to_device_map": {pipeline_representation_stage_to_device_map},
'''
s += f'''backward times {('do not ' if (not recomputation) else '')}include recomputation
'''
if (async_pipeline and recomputation):
s += f'''Analysis for async_pipeline=True: last partition will not do recomputation.
'''
s += theoretical_string
s += f'''
Stage parameter count:
{s_param_count}'''
if s_gpu_param_count:
s += f'''
GPU parameter count:
{s_gpu_param_count}'''
with_comm_str = ('with' if add_comm_times_to_balance else 'without')
s += f'''
real times are based on real measurements of execution time ({with_comm_str} communication) of generated partitions ms
'''
s += f'''forward {rounddict(real_f_times)}
backward {rounddict(real_b_times)}
'''
if PRINT_VAR_STD:
s += f'''std of real execution times
'''
s += f'''forward{rounddict(f_std)}
backward{rounddict(b_std)}
'''
if UTILIZATION_SLOWDOWN_SPEEDUP:
s += f'''
Analysis for T = (1-R)fwd + R*bwd:
'''
s += f'''
Pipeline Slowdown: (compared to sequential execution with no communication, and same recompute policy)
'''
s += f'''forward {real_f_slowdown:.3f}
backward {real_b_slowdown:.3f}
'''
s += f'''
Expected utilization by partition
'''
s += f'''forward {real_f_utilization}
backward {real_b_utilization}
'''
s += f'''
worstcase: bwd: {max(real_b_times.values()):.3f} fwd: {max(real_f_times.values()):.3f}'''
s += f'''
Expected speedup for {num_real_stages} partitions is: {expected_speedup:.3f}'''
s += f'''
Assuming bandwidth of {bw_GBps} GBps between GPUs
'''
s += f'''
communication volumes size of activations of each partition
'''
for (idx, volume) in comm_volume_str.items():
s += f'''{idx}: {volume}
'''
s += f'''
Compuatation Communication ratio (comp/(comp+comm)):
'''
s += f'''forward {comp_comm_ratio_f}
backward {comp_comm_ratio_b}
'''
if PRINT_1F1B:
s += f'''
Analysis for T = fwd + bwd:
{s_fwd_plus_backward}'''
if seq_success:
s += f'''
expected_speedup_compared_to_seq_no_recomp_no_comm: {expected_speedup_compared_to_seq_no_comm:.3f}'''
data_parallel_analysis(TRY_ASGD_ANALYSIS, TRY_SSGD_ANALYSIS, bw_GBps, expected_speedup, num_real_stages, sample_save, sequential_model, verbose, config)
if torch.cuda.is_available():
s += f'''
Analysis max cuda memory used {(max_memory_allocated / 1000000000.0):.2f}GB'''
print(s)
else:
s = ''
metric_to_maximize = (- fwd_plus_backward['pipeline_no_comm']['worstcase'])
warnings.warn('ignoring communication in metric_to_maximize')
return (metric_to_maximize, s)
|
def data_parallel_analysis(TRY_ASGD_ANALYSIS, TRY_SSGD_ANALYSIS, bw_GBps, expected_speedup, num_real_stages, sample, sequential_model, verbose, config: AnalysisPipelineConfig):
if (TRY_SSGD_ANALYSIS and torch.cuda.is_available() and (sequential_model is not None)):
n_workers = num_real_stages
model = sequential_model
try:
(ssgd_expected_speedup, ssgd_stats) = ssgd_run_analysis(sample, model, n_workers, bw_GBps=bw_GBps, verbose=verbose)
if verbose:
ssgd_output = None
with io.StringIO() as buf, redirect_stdout(buf):
print()
print('Printing SSGD analysis:')
print('(naive: assuming 0 concurency between communication and computation)')
pprint(rounddict(ssgd_stats))
print(f'ssgd_expected_speedup: {ssgd_expected_speedup:.3f}')
pipeline_to_ssgd_speedup = (expected_speedup / ssgd_expected_speedup)
print(f'Pipeline/SSGD: {pipeline_to_ssgd_speedup:.3f}')
ssgd_output = buf.getvalue()
print(ssgd_output)
except Exception as e:
print(f'SSGD analysis failed: {sys.exc_info()[0]}', str(e))
if (TRY_ASGD_ANALYSIS and torch.cuda.is_available() and (sequential_model is not None)):
n_workers = num_real_stages
model = sequential_model
comm_comp_concurrency_ratio = 0.5
DROP_BATCH_FOR_ASGD = False
asgd_ok = False
first_time = True
asgd_div = 1
asgd_sample = sample
def extract_batch_size_(sample):
if isinstance(sample, torch.Tensor):
sample = (sample,)
if isinstance(sample, tuple):
b = None
for i in sample:
if isinstance(i, torch.Tensor):
b = i.shape[0]
return b
if isinstance(sample, dict):
for i in sample.values():
if isinstance(i, torch.Tensor):
b = i.shape[0]
return b
def shrink_sample(sample, len_to_take):
if isinstance(sample, torch.Tensor):
return sample[:len_to_take]
if isinstance(sample, tuple):
shrinked = []
for i in sample:
if isinstance(i, torch.Tensor):
shrinked += i[:len_to_take]
else:
shrinked.append(i)
return tuple(shrinked)
if isinstance(sample, dict):
return {i: shrink_sample(v, len_to_take) for (i, v) in sample.items()}
return sample
while ((not asgd_ok) or first_time):
if ((not first_time) and DROP_BATCH_FOR_ASGD):
asgd_div *= 2
bz = extract_batch_size_(asgd_sample)
if (asgd_div > bz):
break
len_to_take = (bz // 2)
asgd_sample = shrink_sample(asgd_sample, len_to_take)
elif ((not first_time) and (not DROP_BATCH_FOR_ASGD)):
break
else:
first_time = False
bz = extract_batch_size_(asgd_sample)
print(f'Trying ASGD analysis with batch size {bz} per worker')
try:
(asgd_expected_speedup, asgd_stats) = asgd_run_analysis(sample, model, n_workers, bw_GBps=bw_GBps, verbose=verbose, comm_comp_concurrency_ratio=comm_comp_concurrency_ratio)
asgd_ok = True
if verbose:
if (asgd_div > 1):
warnings.warn('ASGD STATS ARE FOR LOWER BATCH, please ignore it')
asgd_output = None
with io.StringIO() as buf, redirect_stdout(buf):
print()
print('Printing ASGD analysis:')
print(f'(assuming {comm_comp_concurrency_ratio} concurency between communication and computation)')
pprint(rounddict(asgd_stats))
print(f'asgd_expected_speedup: {asgd_expected_speedup:.3f}')
pipeline_to_asgd_speedup = (expected_speedup / asgd_expected_speedup)
print(f'Pipeline/ASGD: {pipeline_to_asgd_speedup:.3f}')
asgd_output = buf.getvalue()
print(asgd_output)
break
except Exception as e:
print(f'ASGD analysis failed: {sys.exc_info()[0]}', str(e))
asgd_ok = False
|
def first_arg_cache(function):
memo = {}
@wraps(function)
def wrapper(*args):
try:
return memo[id(args[0])]
except KeyError:
rv = function(*args)
memo[id(args[0])] = rv
return rv
return wrapper
|
def computation_communication_ratio(comp_times, comm_times):
assert (len(comp_times) == len(comm_times))
ratio = {k: (comp_times[k] / (comm_times[k] + comp_times[k])) for k in comp_times}
return ratio
|
def utilization(times, comp_fraction):
worst = max(times.values())
base_util = {k: round((v / worst), 2) for (k, v) in times.items()}
comp_util = {k: (base_util[k] * comp_fraction[k]) for k in comp_fraction}
return comp_util
|
def slowdown(times, times_wo_comm):
worst = max(times.values())
n_partitions = len(times)
ideal = sum(times_wo_comm.values())
actual = (n_partitions * worst)
model_parallel_and_partitioning_slowdown = (actual / ideal)
return model_parallel_and_partitioning_slowdown
|
def imbbalance_slowdown(times):
worst = max(times.values())
n_partitions = len(times)
total = sum(times.values())
actual = (n_partitions * worst)
partitioning_slowdown = (actual / total)
return partitioning_slowdown
|
def expected_speedup_after_partitioning(fwd_times, bwd_times, fwd_times_wo_comm, bwd_times_wo_comm):
n_partitions = len(fwd_times)
assert (len(fwd_times) == len(bwd_times))
fwd_slowdown = slowdown(fwd_times, fwd_times_wo_comm)
bwd_slowdown = slowdown(bwd_times, bwd_times_wo_comm)
worst_fwd = max(fwd_times.values())
worst_bwd = max(bwd_times.values())
fwd_plus_bwd = (worst_fwd + worst_bwd)
bwd_ratio = (worst_bwd / fwd_plus_bwd)
fwd_ratio = (worst_fwd / fwd_plus_bwd)
partitioning_slowdown = ((bwd_ratio * bwd_slowdown) + (fwd_ratio * fwd_slowdown))
expected_speedup = (n_partitions / partitioning_slowdown)
return expected_speedup
|
def expected_speedup_compared_to_seq(pipe_times, seq_times: ProfileResult):
def extract_seq_stuff(seq_times):
nocomm_real_b_times = seq_times.nocommb_times_mean
nocomm_real_f_times = seq_times.nocommf_times_mean
real_b_times = seq_times.b_times_mean
real_f_times = seq_times.f_times_mean
b_seq_no_recomp_no_comm_times = sum(nocomm_real_b_times.values())
f_seq_no_recomp_no_comm_times = sum(nocomm_real_f_times.values())
b_seq_no_recomp_with_comm_times = sum(real_b_times.values())
f_seq_no_recomp_with_comm_times = sum(real_f_times.values())
seq_times = ((b_seq_no_recomp_no_comm_times, f_seq_no_recomp_no_comm_times), (b_seq_no_recomp_with_comm_times, f_seq_no_recomp_with_comm_times))
return seq_times
(fwd_times, bwd_times, fwd_times_wo_comm, bwd_times_wo_comm) = pipe_times
((b_seq_no_recomp_no_comm_times, f_seq_no_recomp_no_comm_times), (b_seq_no_recomp_with_comm_times, f_seq_no_recomp_with_comm_times)) = extract_seq_stuff(seq_times)
worst_fwd = max(fwd_times.values())
worst_bwd = max(bwd_times.values())
pipe_fwd_plus_bwd = (worst_fwd + worst_bwd)
seq_fwd_plus_bwd = (f_seq_no_recomp_no_comm_times + b_seq_no_recomp_no_comm_times)
expected_speedup = (seq_fwd_plus_bwd / pipe_fwd_plus_bwd)
return expected_speedup
|
def parameter_count(partition_config: AnalysisPipelineConfig):
n_partitions = partition_config.n_stages
d = {}
for i in range(n_partitions):
model = partition_config.stage_to_model[i]
n_params = sum((p.numel() for p in model.parameters()))
d[i] = n_params
total = sum(d.values())
d['total'] = total
return d
|
def same_gpu_parameter_count(stage_param_count: Dict[(Union[(int, str)], int)], stages_on_same_gpu: Dict[(int, Set[int])]):
def set_to_hashable(s: Set[int]):
return tuple(sorted(s))[0]
gpu_to_params = defaultdict(int)
for (stage_id, v) in stages_on_same_gpu.items():
k = set_to_hashable(v)
gpu_to_params[k] += stage_param_count[stage_id]
gpu_to_params['total'] = stage_param_count['total']
return dict(gpu_to_params)
|
@dataclass
class ProfileResult():
f_times_mean: Dict[(int, float)]
f_times_std: Dict[(int, float)]
b_times_mean: Dict[(int, float)]
b_times_std: Dict[(int, float)]
communication_stats: Dict[(int, Dict[(str, float)])]
nocommf_times_mean: Dict[(int, float)]
nocommf_times_std: Dict[(int, float)]
nocommb_times_mean: Dict[(int, float)]
nocommb_times_std: Dict[(int, float)]
warnings_list: List[str]
|
def profile_execution(model_inputs, partition_config: AnalysisPipelineConfig, n_iters: int, recomputation=True, bw_GBps=12, async_pipeline=False, add_comm_times_to_balance=True, stages_on_same_gpu: Optional[Dict[(int, Set[int])]]=None, parallel_comm_and_comp_ratio=0, different_links_between_accelerators=False) -> ProfileResult:
'\n Perform forward/backward passes and measure execution times across n_iters batches\n # TODO: currently its just the same input sample n_iter times, this could be improved.\n '
if (not stages_on_same_gpu):
stages_on_same_gpu = dict()
n_partitions = partition_config.n_stages
f_times = {i: [] for i in range(n_partitions)}
b_times = {i: [] for i in range(n_partitions)}
nocommf_times = {i: [] for i in range(n_partitions)}
nocommb_times = {i: [] for i in range(n_partitions)}
communication_stats = {}
is_parameter = set()
if (not isinstance(model_inputs, (tuple, list))):
model_inputs = (model_inputs,)
warnings_list = []
for current_iteration_num in tqdm(range(n_iters), 'Profile'):
activations = {}
assert (len(partition_config.model_inputs()) == len(model_inputs))
for (name, t) in zip(partition_config.model_inputs(), model_inputs):
activations[name] = move_tensors(t, 'cpu')
parts = deque(range(n_partitions))
while (len(parts) > 0):
run_and_profile_partitions(activations, add_comm_times_to_balance, async_pipeline, b_times, bw_GBps, communication_stats, current_iteration_num, different_links_between_accelerators, f_times, is_parameter, nocommb_times, nocommf_times, parallel_comm_and_comp_ratio, partition_config, parts, recomputation, stages_on_same_gpu, warnings_list)
(fm, fs) = mean_std(f_times)
(bm, bs) = mean_std(b_times)
(ncfm, ncfs) = mean_std(nocommf_times)
(ncbm, ncbs) = mean_std(nocommb_times)
return ProfileResult(fm, fs, bm, bs, communication_stats, ncfm, ncfs, ncbm, ncbs, warnings_list)
|
def run_and_profile_partitions(activations, add_comm_times_to_balance, async_pipeline, b_times, bw_GBps, communication_stats, current_iteration_num, different_links_between_accelerators, f_times, is_parameter, nocommb_times, nocommf_times, parallel_comm_and_comp_ratio, partition_config, parts, recomputation, stages_on_same_gpu, warnings_list):
stage_id = parts.popleft()
if stages_on_same_gpu:
my_gpu_set = stages_on_same_gpu[stage_id]
if my_gpu_set:
pass
else:
my_gpu_set = {}
is_last_partition = partition_config.is_last_forward_stage(stage_id)
is_first_partition = partition_config.is_first_forward_stage(stage_id)
partition_specific_recomputation = recomputation
if (async_pipeline and is_last_partition):
partition_specific_recomputation = False
inputs_requires_grad = partition_config.get_inputs_req_grad_for_stage_tuple(stage_id)
if all(((tensor in activations) for tensor in partition_config.get_all_stage_inputs(stage_id))):
inputs = []
inputs_rcv_from_stage = []
in_size_mb = 0
for (tensor, tensor_input_info) in partition_config.get_all_stage_inputs(stage_id).items():
t = activations[tensor]
sender_stage_id = tensor_input_info['created_by']
if (sender_stage_id == (- 1)):
assert (tensor in partition_config.model_inputs())
else:
is_same_gpu = (sender_stage_id in my_gpu_set)
if (not is_same_gpu):
in_size_mb += tensor_sizes(t)
if (tensor in is_parameter):
t.requires_grad_()
inputs.append(t)
inputs_rcv_from_stage.append(sender_stage_id)
in_size_mb /= 1000000.0
if different_links_between_accelerators:
recv_sizes_by_gpu = defaultdict(float)
for (t, sender_stage_id) in zip(inputs, inputs_rcv_from_stage):
if (sender_stage_id == (- 1)):
continue
is_same_gpu = (sender_stage_id in my_gpu_set)
if is_same_gpu:
continue
for together in stages_on_same_gpu:
if (len(together) > 0):
raise NotImplementedError()
sender_gpu_id = sender_stage_id
recv_sizes_by_gpu[sender_gpu_id] += (tensor_sizes(t) / 1000000.0)
max_recv_time = (max((list(recv_sizes_by_gpu.values()) + [0])) / bw_GBps)
recv_time = max_recv_time
else:
recv_time = (in_size_mb / bw_GBps)
model = partition_config.stage_to_model[stage_id]
with force_out_of_place(model):
if torch.cuda.is_available():
(f_time, b_time, outputs) = cuda_time(model, inputs, recomputation=partition_specific_recomputation, inputs_requires_grad=inputs_requires_grad)
else:
(f_time, b_time, outputs) = cpu_time(model, inputs, recomputation=partition_specific_recomputation, inputs_requires_grad=inputs_requires_grad)
if (len(partition_config.get_all_stage_outputs(stage_id)) != len(outputs)):
raise RuntimeError()
out_size_mb = 0
for ((o, o_info), t) in zip(partition_config.get_all_stage_outputs(stage_id).items(), outputs):
sent_to = o_info['used_by']
if ((len(sent_to) > 1) and o_info['req_grad']):
if (current_iteration_num == 0):
warning = f'tensor {o} sent to more than 1 target. Inaccurate (backward) communication time analysis'
warnings_list.append(warning)
warnings.warn(warning)
if ((current_iteration_num == 0) and isinstance(t, torch.Tensor)):
if (not t.is_contiguous()):
warning = f'Partition{stage_id} output:{o} is not contiguous!'
warnings.warn(warning)
warnings_list.append(warning)
if isinstance(t, torch.nn.Parameter):
is_parameter.add(o)
activations[o] = move_and_detach(t, 'cpu')
t_mb = (tensor_sizes(t) / 1000000.0)
sent_to_same_gpu = ((ii in my_gpu_set) for ii in sent_to)
for (target_stage_id, target_is_on_same_gpu) in zip(sent_to, sent_to_same_gpu):
if (not target_is_on_same_gpu):
out_size_mb += t_mb
if different_links_between_accelerators:
volume_to_gpu = defaultdict(int)
target_stage_ids = [info['used_by'] for info in partition_config.get_all_stage_outputs(stage_id).values()]
for (target_stage_id, t) in zip(target_stage_ids, outputs):
target_is_on_same_gpu = (target_stage_id in my_gpu_set)
for together in stages_on_same_gpu:
if (len(together) > 0):
raise NotImplementedError()
target_gpu_id = target_stage_id
t_mb = (tensor_sizes(t) / 1000000.0)
if (not target_is_on_same_gpu):
volume_to_gpu[target_gpu_id] += t_mb
send_time = (max((list(volume_to_gpu.values()) + [0])) / bw_GBps)
else:
send_time = (out_size_mb / bw_GBps)
del outputs
if is_last_partition:
send_time = 0.0
stats = {'input size': in_size_mb, 'recieve_time': recv_time, 'out': out_size_mb, 'send time': send_time}
communication_stats[stage_id] = stats
nocommf_times[stage_id].append(f_time)
nocommb_times[stage_id].append(b_time)
if different_links_between_accelerators:
raise NotImplementedError()
else:
bwd_send_time = (in_size_mb / bw_GBps)
if add_comm_times_to_balance:
if (not parallel_comm_and_comp_ratio):
if (not is_last_partition):
f_time += send_time
if (not is_first_partition):
b_time += bwd_send_time
else:
PARALLEL_RATIO = parallel_comm_and_comp_ratio
bwd_plus_fwd = (b_time + f_time)
if (not is_last_partition):
lb = extra_communication_time_lower_bound(bwd_plus_fwd, send_time)
ub = extra_communication_time_upper_bound(bwd_plus_fwd, send_time)
extra_fwd_send_time = apply_ratio(ub, lb, PARALLEL_RATIO)
f_time += extra_fwd_send_time
if (not is_first_partition):
lb = extra_communication_time_lower_bound(bwd_plus_fwd, bwd_send_time)
ub = extra_communication_time_upper_bound(bwd_plus_fwd, bwd_send_time)
extra_bwd_send_time = apply_ratio(ub, lb, PARALLEL_RATIO)
b_time += extra_bwd_send_time
f_times[stage_id].append(f_time)
b_times[stage_id].append(b_time)
else:
parts.append(stage_id)
|
@contextmanager
def force_out_of_place(model: torch.nn.Module):
state = dict()
for m in model.modules():
if (hasattr(m, 'inplace') and isinstance(m.inplace, bool)):
state[m] = m.inplace
m.inplace = False
(yield)
for (m, s) in state.items():
m.inplace = s
|
def mean_std(times, drop=1):
means = dict()
stds = dict()
for (i, ts) in times.items():
for _ in range(drop):
max_v = max(ts)
vs_cand = [t for t in ts if (t < max_v)]
if (len(vs_cand) == 0):
break
ts = vs_cand
arr = np.array(ts)
means[i] = np.mean(arr)
stds[i] = np.std(arr)
return (means, stds)
|
def cuda_time(partition, inputs, recomputation=True, inputs_requires_grad=False):
partition = partition.to('cuda')
partition.device = 'cuda'
b_time = cuda_backward(partition, inputs, recomputation=recomputation, inputs_requires_grad=inputs_requires_grad)
for p in partition.parameters():
p.grad = None
(f_time, outputs) = cuda_forward(partition, inputs, recomputation=recomputation)
partition = partition.cpu()
partition.device = 'cpu'
return (f_time, b_time, outputs)
|
def move_and_detach(ts, device):
def f(t):
if isinstance(t, torch.Tensor):
return t.detach().to(device)
return t
return nested_map(f, ts)
|
def tensor_sizes(ts):
def f(t):
if isinstance(t, torch.Tensor):
return (t.nelement() * t.element_size())
return 1
return sum(map(f, flatten(ts)))
|
def set_req_grad(ts, inputs_requires_grad):
if isinstance(inputs_requires_grad, bool):
it = itertools.cycle([inputs_requires_grad])
elif isinstance(inputs_requires_grad, (tuple, list)):
it = iter(inputs_requires_grad)
else:
raise NotImplementedError()
def f(t):
if isinstance(t, torch.Tensor):
return t.requires_grad_((next(it) and t.is_floating_point()))
return t
return nested_map(f, ts)
|
def get_grad_tensors(flattened_outputs):
'Infer grad_tensors to be used with:\n torch.autograd.backward(tensors=flattened_outputs, grad_tensors=grad_tensors)\n '
grad_tensors = []
for out in flattened_outputs:
if (isinstance(out, torch.Tensor) and out.requires_grad):
grad_tensors.append(torch.randn_like(out))
return grad_tensors
|
def infer_grad_tensors_for_partition(partition, inputs):
outputs = partition(*inputs)
flattened_outputs = flatten(outputs)
grad_tensors = get_grad_tensors(flattened_outputs)
return grad_tensors
|
def cuda_backward(partition, inputs, recomputation=True, inputs_requires_grad=False):
'Measure forward/backward time of a partition on the GPU\n '
inputs = set_req_grad(move_and_detach(inputs, 'cuda'), inputs_requires_grad)
grad_tensors = infer_grad_tensors_for_partition(partition, inputs)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
if recomputation:
torch.cuda.synchronize(device='cuda')
start.record()
outputs = partition(*inputs)
flattened_outputs = flatten(outputs)
else:
outputs = partition(*inputs)
flattened_outputs = flatten(outputs)
torch.cuda.synchronize(device='cuda')
start.record()
flattened_outputs = filter((lambda t: (isinstance(t, torch.Tensor) and t.requires_grad)), flattened_outputs)
torch.autograd.backward(tensors=flattened_outputs, grad_tensors=grad_tensors)
end.record()
torch.cuda.synchronize(device='cuda')
b_time = start.elapsed_time(end)
return b_time
|
def cuda_forward(partition, inputs, recomputation=True):
inputs = move_tensors(inputs, 'cuda')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize(device='cuda')
with torch.set_grad_enabled((not recomputation)):
start.record()
outputs = partition(*inputs)
end.record()
torch.cuda.synchronize(device='cuda')
f_time = start.elapsed_time(end)
return (f_time, outputs)
|
def cpu_time(partition, inputs, recomputation=True, inputs_requires_grad=False):
' measure forward/backward time of a partition on the CPU\n '
partition = partition.to('cpu')
partition.device = 'cpu'
b_time = cpu_backward(partition, inputs, recomputation=recomputation, inputs_requires_grad=inputs_requires_grad)
for p in partition.parameters():
p.grad = None
(f_time, outputs) = cpu_forward(partition, inputs, recomputation=recomputation)
return (f_time, b_time, outputs)
|
def cpu_forward(partition, inputs, recomputation=True):
inputs = move_tensors(inputs, 'cpu')
with torch.set_grad_enabled((not recomputation)):
start = time.time()
outputs = partition(*inputs)
end = time.time()
f_time = (1000 * (end - start))
return (f_time, outputs)
|
def cpu_backward(partition, inputs, recomputation=True, inputs_requires_grad=False):
inputs = set_req_grad(move_and_detach(inputs, 'cpu'), inputs_requires_grad)
grad_tensors = infer_grad_tensors_for_partition(partition, inputs)
start = time.time()
outputs = partition(*inputs)
flattened_outputs = flatten(outputs)
if (not recomputation):
start = time.time()
flattened_outputs = filter((lambda t: (isinstance(t, torch.Tensor) and t.requires_grad)), flattened_outputs)
torch.autograd.backward(tensors=flattened_outputs, grad_tensors=grad_tensors)
end = time.time()
b_time = (1000 * (end - start))
return b_time
|
def cuda_computation_times(model, inputs):
' measure forward/backward time of a partition on the GPU\n '
if (not isinstance(inputs, (tuple, list, dict))):
inputs = (inputs,)
model.cuda()
inputs = move_tensors(inputs, 'cuda')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize(device='cuda')
start.record()
if isinstance(inputs, (tuple, list)):
outputs = model(*inputs)
elif isinstance(inputs, dict):
outputs = model(**inputs)
else:
raise NotImplementedError(str(type(inputs)))
loss = sum((o.norm() for o in filter((lambda t: (isinstance(t, torch.Tensor) and t.requires_grad)), flatten(outputs))))
loss.backward()
end.record()
torch.cuda.synchronize(device='cuda')
fb_time = start.elapsed_time(end)
return fb_time
|
def run_analysis(sample, model, n_workers, bw_GBps=12, verbose=True):
send_mb = (sum([(p.nelement() * p.element_size()) for p in model.parameters()]) / 1000000.0)
single_send_time = (send_mb / bw_GBps)
num_sends = (n_workers * math.log2(n_workers))
total_send_time = (num_sends * single_send_time)
comp_time = cuda_computation_times(model, sample)
utilization = (comp_time / (comp_time + total_send_time))
expected_speedup = (utilization * n_workers)
d = dict(n_workers=n_workers, send_mb=send_mb, single_send_time=single_send_time, num_sends=num_sends, total_send_time=total_send_time, comp_time=comp_time, utilization=utilization, expected_speedup=expected_speedup)
return (expected_speedup, d)
|
def pipe_model(model: nn.Module, batch_dim: int, model_args: tuple=(), model_kwargs: Optional[Dict]=None, n_iter: int=10, nparts: int=4, depth: int=1000, basic_blocks: Optional[Union[(List[nn.Module], Tuple[nn.Module])]]=None, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None, use_layers_only_graph: bool=True, output_file: Optional[str]=None, generate_explicit_del: bool=False, generate_activation_propagation: bool=True, recomputation: bool=False, partitioning_method: str='ACYCLIC', METIS_opt: Optional[Dict]=None, acyclic_opt: Optional[Dict]=None, binpack_opt: Optional[Dict]=None, mpipe_opt: Optional[Dict]=None, force_no_recomp_scopes: Optional[Callable[([str], bool)]]=None, save_memory_mode: bool=False, trace_on_gpu=False, use_graph_profiler: bool=True, use_network_profiler: bool=False, profile_ops: bool=True, graph: Optional[Graph]=None, async_pipe=False, trace_cache_name=None, profiles_cache_name=None, dont_use_async_meta_alg=False) -> Graph:
'\n Attempts to partition a model to given number of bins (parts).\n This will produce a python file with the partitions and config.\n\n Parameters:\n ------------\n model:\n the network we wish to model\n batch_dim:\n the batch dimension of the sample batch\n model_args:\n a sample input to use for tracing\n model_kwargs:\n additional kwargs dictionary to pass to the model\n n_iter:\n number of profiling iteration used to gather statistics\n nparts:\n the number of partitions\n depth:\n how far down we go in the model tree determines the detail level of the graph\n basic_blocks:\n an optional list of modules that if encountered will not be broken down\n node_weight_function:\n an optional weight function for the nodes should be a function from Node to int\n if not given a default weight of 1 will be given to all nodes\n edge_weight_function:\n an optional weight function for the edges should be a function (Node,Node) to int\n if not given a default value of 1 will be given to all edges\n use_layers_only_graph:\n whether to partition a smaller version of the graph containing only the layers (usefull fo big models with lots of unprofiled ops)\n output_file:\n the file name in which to save the partition config\n if not given defaults to generated_{modelClass}{actualNumberOfPartitions}\n generate_explicit_del:\n whether to generate del statements to explicitly delete variables when they are no longer used\n default False\n generate_activation_propagation:\n in cases where a stage sends an activation to multiple stages.\n for example 0->[1,3,4]\n decide whether to have each stage send the activation to the next target\n 0->1->3->4\n or have it sent directly from the source\n partitioning_method:\n partitioning method to use\n METIS_opt:\n dict of additional kwargs to pass to the METIS partitioning algorithm\n acyclic_opt:\n dict of additional kwargs to pass to the acyclic partitioning algorithm\n binpack_opt:\n dict of additional kwargs to pass to the binback partitioning algorithm\n force_no_recomp_scopes:\n fn(scope):\n returns true if we want to force recomputation scope_specific_recomp\n default is lambda x: False\n use_graph_profiler:\n whether to use the new graph based profiler\n default True\n use_network_profiler:\n whether to use the older model based network_profiler\n default False\n profile_ops:\n whether to also profile ops when using the GraphProfiler\n default True\n save_memory_mode:\n minimize memory footprint during profiling\n sacrifice speed for memory\n default False\n graph:\n an existing graph to repartition\n default None\n '
if (basic_blocks is None):
basic_blocks = ()
graph = partition_model(model, model_args=model_args, model_kwargs=model_kwargs, n_iter=n_iter, nparts=nparts, max_depth=depth, basic_blocks=basic_blocks, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, use_layers_only_graph=use_layers_only_graph, recomputation=recomputation, partitioning_method=partitioning_method, METIS_opt=METIS_opt, acyclic_opt=acyclic_opt, binpack_opt=binpack_opt, mpipe_opt=mpipe_opt, force_no_recomp_scopes=force_no_recomp_scopes, use_graph_profiler=use_graph_profiler, use_network_profiler=use_network_profiler, profile_ops=profile_ops, save_memory_mode=save_memory_mode, trace_on_gpu=trace_on_gpu, graph=graph, async_pipe=async_pipe, trace_cache_name=trace_cache_name, profiles_cache_name=profiles_cache_name, dont_use_async_meta_alg=dont_use_async_meta_alg)
compile_partitioned_model(graph, model, batch_dim, output_file=output_file, generate_explicit_del=generate_explicit_del, generate_activation_propagation=generate_activation_propagation)
print('-I- generated code')
return graph
|
def partition_model(model: nn.Module, model_args: tuple=(), model_kwargs: Optional[Dict]=None, n_iter: int=10, nparts: int=4, max_depth: int=100, basic_blocks: Optional[Union[(List[nn.Module], Tuple[nn.Module])]]=None, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None, use_layers_only_graph: bool=True, recomputation: bool=True, partitioning_method: str='ACYCLIC', METIS_opt: Optional[Dict]=None, acyclic_opt: Optional[Dict]=None, binpack_opt: Optional[Dict]=None, mpipe_opt: Optional[Dict]=None, force_no_recomp_scopes: Optional[Callable[([str], bool)]]=None, use_graph_profiler: bool=True, use_network_profiler: bool=False, profile_ops: bool=True, save_memory_mode: bool=False, trace_on_gpu=False, graph: Optional[Graph]=None, use_virtual_stages: bool=True, async_pipe=False, trace_cache_name=None, profiles_cache_name=None, dont_use_async_meta_alg=False) -> Graph:
'\n profiles the network and return a graph representing the partition\n\n Parameters:\n -------------\n model:\n the network we wish to model\n model_args:\n a sample input to use for tracing\n model_kwargs:\n additional kwargs dictionary to pass to the model\n n_iter:\n number of profiling iteration used to gather statistics\n nparts:\n the number of partitions\n max_depth:\n how far down we go in the model tree determines the detail level of the graph\n basic_blocks:\n an optional list of modules that if encountered will not be broken down\n node_weight_function:\n an optional weight function for the nodes should be a function from Node to int\n if not given a default weight of 1 will be given to all nodes\n edge_weight_function:\n an optional weight function for the edges should be a function (Node,Node) to int\n if not given a default value of 1 will be given to all edges\n use_layers_only_graph:\n whether to partition a smaller version of the graph containing only the layers (usefull fo big models with lots of unprofiled ops)\n partitioning_method:\n partitioning method to use\n default ACYCLIC\n METIS_opt:\n dict of additional kwargs to pass to the METIS partitioning algorithm\n acyclic_opt:\n dict of additional kwargs to pass to the acyclic partitioning algorithm\n binpack_opt:\n dict of additional kwargs to pass to the binpack partitioning algorithm\n use_graph_profiler:\n whether to use the new graph based profiler\n default True\n use_network_profiler:\n whether to use the older model based network_profiler\n default False\n profile_ops:\n whether to also profile ops when using the GraphProfiler\n default True\n save_memory_mode:\n minimize memory footprint during profiling\n sacrifice speed for memory\n default False\n graph:\n an existing graph to repartition\n default None\n use_virtual_stages:\n will try to use virtual stages if partitioning method supports it.\n '
if (basic_blocks is None):
basic_blocks = ()
if (METIS_opt is None):
METIS_opt = dict()
if (acyclic_opt is None):
acyclic_opt = dict()
if (binpack_opt is None):
binpack_opt = dict()
if (mpipe_opt is None):
mpipe_opt = dict()
if ((not async_pipe) or (not recomputation) or dont_use_async_meta_alg):
if (graph is None):
graph = compute_and_maybe_cache(build_profiled_graph, profiles_cache_name, model, _cache_cls_to_use=GraphCache, model_args=model_args, model_kwargs=model_kwargs, use_network_profiler=use_network_profiler, use_graph_profiler=use_graph_profiler, save_memory_mode=save_memory_mode, trace_on_gpu=trace_on_gpu, profile_ops=profile_ops, recomputation=recomputation, n_iter=n_iter, max_depth=max_depth, basic_blocks=basic_blocks, force_no_recomp_scopes=force_no_recomp_scopes, trace_cache_name=trace_cache_name)
if (nparts > 1):
graph = partition_profiled_graph(graph, model, nparts, partitioning_method, node_weight_function, edge_weight_function, use_virtual_stages, use_layers_only_graph, METIS_opt, acyclic_opt, binpack_opt, mpipe_opt)
else:
graph = build_graph_with_nparams_and_grad_reqs(model, model_args, model_kwargs, max_depth, basic_blocks, save_memory_mode, trace_on_gpu, res_cache_name=trace_cache_name)
(weights, max_memory_usage_r, max_memory_usage_nr) = compute_and_maybe_cache(get_full_profiles, profiles_cache_name, graph, model, model_args, model_kwargs, n_iter, profile_ops, max_depth, basic_blocks, force_no_recomp_scopes, save_memory_mode, use_graph_profiler, use_network_profiler)
partition_profiled_graph_fn = functools.partial(partition_profiled_graph, model=model, nparts=nparts, partitioning_method=partitioning_method, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, use_virtual_stages=use_virtual_stages, use_layers_only_graph=use_layers_only_graph, METIS_opt=METIS_opt, acyclic_opt=acyclic_opt, binpack_opt=binpack_opt, mpipe_opt=mpipe_opt)
graph = partition_and_match_weights_until_last_partition_is_with_no_recomputation(graph, weights, partitioning_method, partition_profiled_graph_fn, max_memory_usage_r=max_memory_usage_r, max_memory_usage_nr=max_memory_usage_nr)
return graph
|
def get_full_profiles(graph, model, model_args, model_kwargs, n_iter, profile_ops, max_depth, basic_blocks, force_no_recomp_scopes, save_memory_mode, use_graph_profiler, use_network_profiler):
print('-I- profiling model (recomp)')
(recomputation_times, max_mem_usage_bytes_r) = get_profiles(graph, model, model_args=model_args, model_kwargs=model_kwargs, use_network_profiler=use_network_profiler, use_graph_profiler=use_graph_profiler, save_memory_mode=save_memory_mode, profile_ops=profile_ops, recomputation=True, n_iter=n_iter, max_depth=max_depth, basic_blocks=basic_blocks, force_no_recomp_scopes=force_no_recomp_scopes)
print('-I- profiling model (no recomp)')
warnings.warn('Need to reset max mem usage!!')
for node in graph.nodes:
node.max_memory_bytes = 0
(no_recomputation_times, max_mem_usage_bytes_nr) = get_profiles(graph, model, model_args=model_args, model_kwargs=model_kwargs, use_network_profiler=use_network_profiler, use_graph_profiler=use_graph_profiler, save_memory_mode=save_memory_mode, profile_ops=profile_ops, recomputation=False, n_iter=n_iter, max_depth=max_depth, basic_blocks=basic_blocks, force_no_recomp_scopes=force_no_recomp_scopes)
warnings.warn('Need to reset max mem usage!!')
for node in graph.nodes:
node.max_memory_bytes = 0
for n in graph.nodes:
if (n.scope not in no_recomputation_times):
no_recomputation_times[n.scope] = ExecTimes(0, 0)
if (n.scope not in recomputation_times):
recomputation_times[n.scope] = ExecTimes(0, 0)
weights = {n.id: FullExecTimes(recomputation_times[n.scope], no_recomputation_times[n.scope]) for n in graph.nodes}
for node in graph.nodes:
t = max_mem_usage_bytes_r.get(node.scope, None)
if (t is not None):
node.max_memory_bytes = t
print('-I- model profiled')
return (weights, max_mem_usage_bytes_r, max_mem_usage_bytes_nr)
|
def partition_profiled_graph(graph, model, nparts, partitioning_method, node_weight_function, edge_weight_function, use_virtual_stages, use_layers_only_graph, METIS_opt, acyclic_opt, binpack_opt, mpipe_opt):
partitioning_method = partitioning_method.lower()
if (partitioning_method == 'metis'):
print('-I- using METIS partitioning algorithm')
graph = metis_partition(graph, nparts, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, use_layers_only_graph=use_layers_only_graph, use_virtual_stages=use_virtual_stages, **METIS_opt)
elif (partitioning_method == 'acyclic'):
print('-I- using Acyclic Partitioning algorithm')
acyclic_partition(model, graph, nparts, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, use_layers_graph=use_layers_only_graph, **acyclic_opt)
elif (partitioning_method == '2dbin'):
if ('n_clusters' not in binpack_opt):
if ('analyze_n_clusters' not in binpack_opt):
warnings.warn('expected --n_clusters or --analyze_n_clusters to be given to binpack_opt. will set n_clusters=2 as default')
binpack_opt['n_clusters'] = 2
else:
warnings.warn('Will infer `n_clusters` with user assistance')
(graph, stage_to_gpu_map) = partition_2dbin_pack(graph, num_gpus=nparts, node_weight_function=node_weight_function, **binpack_opt)
elif (partitioning_method == 'mpipe'):
(graph, stage_to_gpu_map) = partition_mpipe(model, graph, num_gpus=nparts, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, **mpipe_opt)
elif (partitioning_method == 'pipedream'):
t1 = node_weight_function.MULT_FACTOR
t2 = edge_weight_function.MULT_FACTOR
assert (t1 == t2)
warnings.warn('forcing mult factor to 1')
node_weight_function.MULT_FACTOR = 1.0
edge_weight_function.MULT_FACTOR = 1.0
t3 = edge_weight_function.ensure_positive
edge_weight_function.ensure_positive = False
graph = partition_pipedream(graph, num_gpus=nparts, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, num_machines_in_first_level=None)
node_weight_function.MULT_FACTOR = t1
edge_weight_function.MULT_FACTOR = t2
edge_weight_function.ensure_positive = t3
del t1
del t2
del t3
else:
raise NotImplementedError(partitioning_method)
return graph
|
def build_profiled_graph(model: nn.Module, model_args: tuple=(), model_kwargs: Optional[Dict]=None, use_network_profiler: bool=False, use_graph_profiler: bool=True, save_memory_mode: bool=False, trace_on_gpu=False, profile_ops: bool=True, recomputation: bool=False, n_iter: int=10, max_depth: int=1000, basic_blocks: Optional[List[nn.Module]]=None, force_no_recomp_scopes: Optional[Callable[([str], bool)]]=None, trace_cache_name=None) -> Graph:
"\n Builds a graph representation of the model.\n Profiles execution times of model's operations (nodes)\n Infers gradient requirements for nodes.\n\n The representation is semantically identical to the forward pass.\n\n Parameters:\n ------------------\n model:\n the network we wish to model\n args:\n a sample input to use for tracing\n kwargs:\n additional kwargs dictionary to pass to the model\n n_iter:\n number of profiling iteration used to gather statistics\n max_depth:\n how far down we go in the model tree determines the detail level of the graph\n basic_blocks:\n an optional list of modules that if encountered will not be broken down\n node_weight_function:\n an optional weight function for the nodes should be a function from Node to int\n if not given a default weight of 1 will be given to all nodes\n edge_weight_function:\n an optional weight function for the edges should be a function (Node,Node) to int\n if not given a default value of 1 will be given to all edges\n use_layers_only_graph:\n whether to partition a smaller version of the graph containing only the layers (useful fo big models with lots of unprofiled ops)\n use_graph_profiler:\n whether to use the new graph based profiler\n default True\n use_network_profiler:\n whether to use the older model based network_profiler\n default False\n profile_ops:\n whether to also profile ops when using the GraphProfiler\n default True\n save_memory_mode:\n minimize memory footprint during profiling\n sacrifice speed for memory\n default False\n "
graph = build_graph_with_nparams_and_grad_reqs(model, model_args, model_kwargs, max_depth, basic_blocks, save_memory_mode, trace_on_gpu, res_cache_name=trace_cache_name)
print('-I- profiling model')
(weights, max_mem_usage_bytes) = get_profiles(graph, model, model_args=model_args, model_kwargs=model_kwargs, use_network_profiler=use_network_profiler, use_graph_profiler=use_graph_profiler, save_memory_mode=save_memory_mode, profile_ops=profile_ops, recomputation=recomputation, n_iter=n_iter, max_depth=max_depth, basic_blocks=basic_blocks, force_no_recomp_scopes=force_no_recomp_scopes)
for n in graph.nodes:
n.weight = weights.get(n.scope, ExecTimes(0, 0))
print('-I- model profiled')
return graph
|
def build_graph_with_nparams_and_grad_reqs(model, model_args, model_kwargs, max_depth, basic_blocks, save_memory_mode, trace_on_gpu, res_cache_name=None) -> Graph:
if res_cache_name:
return compute_and_cache(build_graph_with_nparams_and_grad_reqs, res_cache_name, model, model_args, model_kwargs, max_depth, basic_blocks, save_memory_mode, trace_on_gpu, res_cache_name=None, _cache_cls_to_use=GraphCache)
if save_memory_mode:
if (not trace_on_gpu):
print('-I- tracing on CPU')
with torch.no_grad():
(model, model_args, model_kwargs) = move_tensors((model, model_args, model_kwargs), 'cpu')
else:
print('-I- tracing on GPU')
with torch.no_grad():
(model, model_args, model_kwargs) = move_tensors((model, model_args, model_kwargs), 'cuda')
model_device = next(model.parameters()).device
print(f'-I- tracing device: {model_device}')
print('-I- tracing model')
graph = trace_module(model, args=model_args, kwargs=model_kwargs, depth=max_depth, basic_blocks=basic_blocks)
print('-I- graph built')
print('-I- inferring gradient requirements')
if save_memory_mode:
with torch.no_grad():
(model, model_args, model_kwargs) = move_tensors((model, model_args, model_kwargs), 'cpu')
infer_req_grad(graph, model, args=model_args, kwargs=model_kwargs)
print('-I- inferred gradient requirements')
print('-I- infering s_contiguous')
infer_is_contiguous(graph, model, args=model_args, kwargs=model_kwargs)
print('-I- inferred infer_is_contiguous')
print('-I- inferring params per node')
graph.calculate_params_per_node(model)
return graph
|
def get_profiles(graph: Graph, model: nn.Module, model_args: tuple=(), model_kwargs: Optional[Dict]=None, use_network_profiler: bool=False, use_graph_profiler: bool=True, save_memory_mode: bool=False, profile_ops: bool=True, recomputation: bool=False, n_iter: int=10, max_depth: int=1000, basic_blocks: Optional[List[nn.Module]]=None, force_no_recomp_scopes: Optional[Callable[([str], bool)]]=None):
if (basic_blocks is None):
basic_blocks = ()
if (model_kwargs is None):
model_kwargs = dict()
if use_graph_profiler:
print(f'-I- using graph profiler with op profiling = {profile_ops} save_memory_mode = {save_memory_mode}')
if save_memory_mode:
(model, model_args, model_kwargs) = move_tensors((model, model_args, model_kwargs), 'cpu')
profiler = GraphProfiler(recomputation=recomputation, n_iter=n_iter, profile_ops=profile_ops, force_no_recomp_scopes=force_no_recomp_scopes, save_memory_mode=save_memory_mode)
pre_hook = pre_hook_factory(profiler.time_forward)
post_hook = post_hook_factory(profiler.time_backward)
execute_graph(model, graph, model_args=model_args, model_kwargs=model_kwargs, pre_hook=pre_hook, post_hook=post_hook, enforce_out_of_place=True)
mem_usage_bytes = profiler.set_max_memory_usage(graph)
weights = profiler.get_weights()
elif use_network_profiler:
warnings.warn('network profiler is deprecated, use graph profiler')
print(f'-I- using network profiler with save_memory_mode = {save_memory_mode}')
assert (not profile_ops), 'op profiling is not supported in the network profiler'
weights = profile_network(model, model_args, kwargs=model_kwargs, basic_blocks=basic_blocks, max_depth=max_depth, n_iter=n_iter, recomputation=recomputation, save_memory_mode=save_memory_mode, force_no_recomp_scopes=force_no_recomp_scopes)
mem_usage_bytes = None
else:
raise ValueError('missing profiling method')
assert (weights is not None)
if (mem_usage_bytes is None):
mem_usage_bytes = dict()
return (weights, mem_usage_bytes)
|
class TorchCache():
def __init__(self, cache_name, overwrite=False):
self.cache_name = cache_name
self.exists = os.path.exists(cache_name)
self.overwrite = overwrite
self.v = None
def __enter__(self):
if self.exists:
print(f'loading from cache: {self.cache_name}')
self.v = torch.load(self.cache_name)
else:
print(f'computing value for {self.cache_name}')
return self
def __exit__(self, type, value, traceback):
if ((not self.exists) or self.overwrite):
print(f'saving to cache: {self.cache_name}')
exception_happened = (type is not None)
if (not exception_happened):
assert (self.v is not None), 'You should enter a value'
torch.save(self.v, self.cache_name)
else:
print('exception_happened')
|
class PickleCache():
def __init__(self, cache_name, overwrite=False):
self.cache_name = cache_name
self.exists = os.path.exists(cache_name)
self.overwrite = overwrite
self.v = None
def __enter__(self):
if self.exists:
print(f'loading from cache: {self.cache_name}')
with open(self.cache_name, 'rb') as f:
self.v = pickle.load(f)
else:
print(f'computing value for {self.cache_name}')
return self
def __exit__(self, type, value, traceback):
if ((not self.exists) or self.overwrite):
print(f'saving to cache: {self.cache_name}')
exception_happened = (type is not None)
if (not exception_happened):
assert (self.v is not None), 'You should enter a value'
with open(self.cache_name, 'wb') as f:
pickle.dump(self.v, f)
else:
print('exception_happened')
|
class GraphCache():
def __init__(self, cache_name, overwrite=False):
self.cache_name = cache_name
self.exists = os.path.exists(cache_name)
self.overwrite = overwrite
self.v = None
self.compute_anyway = False
def __enter__(self):
if self.exists:
try:
print(f'loading from cache: {self.cache_name}')
self.v = Graph.deserialize(self.cache_name)
return self
except Exception as e:
self.compute_anyway = True
warnings.warn(f'loading from cache failed, (check its consistency!). Will compute value. overwrite={self.overwrite}')
print(f'computing value for {self.cache_name}')
return self
def __exit__(self, type, value, traceback):
if ((not self.exists) or self.overwrite):
print(f'saving to cache: {self.cache_name}')
exception_happened = (type is not None)
if (not exception_happened):
assert (self.v is not None), 'You should enter a value'
assert isinstance(self.v, Graph)
self.v.serialize(self.cache_name)
else:
print('exception_happened')
assert isinstance(self.v, Graph)
|
def compute_and_cache(compute_function, cache_name, *args, _cache_cls_to_use=TorchCache, **kw):
'\n Compute or load from cache, optionally save results to cache.\n Return computed value\n Examples:\n # compute big\n # compute_and_cache(lambda: torch.ones(10), "big")\n # compute big, then small\n # compute_and_cache(lambda: torch.randn(10) * compute_and_cache(lambda: torch.ones(10), "big"), "small")\n '
with _cache_cls_to_use(cache_name, overwrite=False) as cache:
if ((not cache.exists) or getattr(cache, 'compute_anyway', False)):
cache.v = compute_function(*args, **kw)
return cache.v
|
def compute_and_maybe_cache(compute_function, cache_name, *args, _cache_cls_to_use=TorchCache, **kw):
if cache_name:
return compute_and_cache(compute_function, cache_name, *args, _cache_cls_to_use=_cache_cls_to_use, **kw)
else:
return compute_function(*args, **kw)
|
def compile_partitioned_model(graph: Graph, model: Module, batch_dim: int, generate_explicit_del: bool=False, generate_activation_propagation: bool=True, output_file: Optional[str]=None):
'\n generates the code for the partitioned model.\n The partitions can be consumed using the `create_pipeline_configuration` method in the generated code\n\n Parameters:\n graph:\n the partitioned graph of the module\n module:\n the module itself\n batch_dim:\n the batch dimension of the input\n generate_explicit_del:\n whether to generate del statements to explicitly delete variables when they are no longer used\n default False\n generate_activation_propagation:\n in cases where a stage sends an activation to multiple stages.\n for example 0->[1,3,4]\n decide whether to have each stage send the activation to the next target\n 0->1->3->4\n or have it sent directly from the source\n output_file:\n optional path to the generated code. if None uses generated_{model_name}{numberOfPatitions}.py\n '
re_assign = True
try:
ensure_inputs_are_used(graph, assert_same_stages=True)
ensure_no_unnecessary_tuple_sends(graph, assert_same_stages=True)
except AssertionError as e:
if re_assign:
warnings.warn('Re assigning partition indices after graph changed. ignore previous stage to GPU map.')
re_assign_partition_indices(graph)
else:
raise e
layer_classes = {scope: type(layer) for (layer, scope, _) in traverse_model(model, depth=graph.depth, basic_blocks=graph.basic_blocks)}
is_param_dict = {scope: t.requires_grad for (t, scope) in traverse_params_buffs(model)}
stages = group_nodes_by_stage_id(graph.nodes)
stage_depth_from_end = get_stages_depth_from_end(graph)
lines = generate_imports(layer_classes)
lines.append(stage_connections_str(graph))
partitions_code = []
ios = dict()
for (idx, stage_nodes) in stages.items():
class_name = f'Partition{idx}'
layers = [n for n in stage_nodes if (n.type == NodeTypes.LAYER)]
buffs_params = [n for n in stage_nodes if (n.type == NodeTypes.BUFF_PARAM)]
(class_decl, scope_to_class_field) = generate_init_method(graph, stage_nodes, class_name, layers, is_param_dict, buffs_params)
state_methods_functions = generate_partition_state_methods()
(forward_function, io) = generate_forward_method(idx, graph, stage_nodes, graph.outputs, scope_to_class_field, stage_depth_from_end=stage_depth_from_end[idx], generate_explicit_del=generate_explicit_del, generate_activation_propagation=generate_activation_propagation, move_tensors=False)
partitions_code.append(class_decl)
partitions_code.extend(forward_function)
partitions_code.append('')
partitions_code.append(state_methods_functions)
ios[idx] = io
if (output_file is None):
output_file = f'generated_{graph.model_name}{len(stages)}'
elif output_file.endswith('.py'):
output_file = output_file[:(- 3)]
(create_pipeline_configuration_str, config) = create_pipeline_configuration(graph, ios, layer_classes, batch_dim, generate_activation_propagation)
lines.append(create_pipeline_configuration_str)
lines += partitions_code
lines.append(generate_help_functions())
path = pathlib.Path((output_file + '.py'))
path.parent.mkdir(parents=True, exist_ok=True)
if path.exists():
warnings.warn(f'Overriding previous path {path}')
os.remove(path)
with open(path, 'w') as f:
f.write('\n'.join(lines))
return str(path)
|
def group_nodes_by_stage_id(nodes: Iterable[Node]) -> Dict[(int, List[Node])]:
'\n Groups nodes by their stage_id\n '
ids = {n.stage_id for n in nodes}
stages = OrderedDict()
for i in sorted(ids):
stages[i] = []
for n in nodes:
stages[n.stage_id].append(n)
return stages
|
def generate_imports(layer_classes: Dict[(str, Module)]) -> List[str]:
'\n generates imports to torch torch.nn, torch.nn.functionl as F and torch.Tensor,\n and to every layer used and various other small things\n '
imports = [f'import {namespace}' for namespace in used_namespaces()]
imports.extend(['from torch import Tensor', 'import torch.nn as nn', 'from itertools import chain', 'from typing import Optional, Tuple, Iterator, Iterable, OrderedDict, Dict', 'import collections', ''])
unique_classes = set(layer_classes.values())
imports.append('from typing import Type')
for cls in unique_classes:
imports.append(f'from {inspect.getmodule(cls).__name__} import {cls.__name__}')
disclaimer = '# this is an auto generated file do not edit unless you know what you are doing\n\n'
imports.append(disclaimer)
return imports
|
def generate_help_functions() -> str:
'generates traverse_model, layerDict, traverse_params_buffs, tensorDict functions,\n to be used in the create_pipeline_configuration function and\n parameters,named_parameters,buffers,named_buffers,cpu,cuda,to,state_dict,load_state_dict\n to be used by the partitions themselves\n '
lines = [inspect.getsource(f) for f in ([traverse_model, layerDict, traverse_params_buffs, tensorDict, move_tensors, nested_map, flatten, unflatten, _unflatten] + get_state_methods())]
return '\n\n'.join(lines)
|
def stage_connections_str(graph: Graph) -> str:
'creates a diagram that illustrates the connectivity between partitions,\n to be embedded in the generated file\n '
(adj_matrix, num_partitions) = stages_adj_lists(graph)
lines = ['# partition adjacency', f"# model inputs {adj_matrix[0]['outputs']}"]
for (i, line) in enumerate(adj_matrix[1:(- 1)]):
lines.append(f'# partition {i} {line}')
lines.append(f"# model outputs {adj_matrix[(num_partitions + 1)]['inputs']}")
n_output_stages = len(adj_matrix[(num_partitions + 1)]['inputs'])
if (n_output_stages != 1):
warnings.warn(f'Got {n_output_stages} output stages, expected 1')
return ('\n'.join(lines) + '\n')
|
def stages_adj_lists(graph):
num_partitions = graph.num_partitions
adj_matrix = [{'inputs': set(), 'outputs': set()} for i in range((num_partitions + 2))]
for node in graph.nodes:
if (node.type is NodeTypes.IN):
for n in node.out_edges:
adj_matrix[(n.stage_id + 1)]['inputs'].add(graph.input_kw_ids.get(node.id, node.scope))
adj_matrix[0]['outputs'].add(n.stage_id)
continue
if (node in graph.outputs):
adj_matrix[(num_partitions + 1)]['inputs'].add(node.stage_id)
adj_matrix[(node.stage_id + 1)]['outputs'].add(f'output')
for n in node.out_edges:
if (n.stage_id != node.stage_id):
adj_matrix[(node.stage_id + 1)]['outputs'].add(n.stage_id)
adj_matrix[(n.stage_id + 1)]['inputs'].add(node.stage_id)
return (adj_matrix, num_partitions)
|
def dict_stages_adj_lists(graph):
(adj_matrix, num_partitions) = stages_adj_lists(graph)
dict_adj_matrix = {}
keys = ((['model_inputs'] + list(range(num_partitions))) + ['model_outputs'])
for (key, v) in zip(keys, adj_matrix):
dict_adj_matrix[key] = v
return (dict_adj_matrix, num_partitions)
|
def get_stages_depth_from_end(graph) -> Dict[(int, int)]:
(dict_adj_matrix, num_partitions) = dict_stages_adj_lists(graph)
edges = set()
for (i, d) in dict_adj_matrix.items():
if (i in {'model_inputs', 'model_outputs'}):
continue
for x in d['inputs']:
edges.add((i, x))
for x in d['outputs']:
edges.add((x, i))
G = nx.DiGraph(list(edges))
def longest_depth_length(target):
return (reduce(max, map(len, nx.all_simple_edge_paths(G, source=f'output', target=target))) - 1)
distance_dict = {i: longest_depth_length(i) for i in range(num_partitions)}
for (i, v) in distance_dict.items():
if (v < 0):
warnings.warn(f'Stage {i} was not used in output calculation. distance_dict={distance_dict}')
if (len(set(distance_dict.values())) < num_partitions):
all_depths = sorted(set(distance_dict.values()))
d = {}
pard = {}
for x in all_depths:
d[x] = [s for (s, xx) in distance_dict.items() if (xx == x)]
if (len(d[x]) > 1):
pard[x] = d[x]
warnings.warn(f"Detected parallel stages. Naive pipelines can't run this. parallel stages={pard}")
return distance_dict
|
def create_pipeline_configuration(graph: Graph, ios: Dict[(int, Dict[(str, List[str])])], model_blocks: Dict[(str, Module)], batch_dim: int, generate_activation_propagation: bool) -> Tuple[(str, Dict)]:
'Generates the create_pipeline_configuration method which given a model creates his partitioned counterpart\n '
batch_size = sorted(graph.inputs, key=(lambda n: n.id))[0].tensor_shape[batch_dim]
def is_batched(shape):
def f(s):
return ((s is not None) and (len(s) > (batch_dim + 1)) and (s[batch_dim] == batch_size))
return nested_map(f, shape)
basic_blocks = ','.join(map((lambda block: block.__name__), set(model_blocks.values())))
(model_inputs, model_outputs) = create_model_in_out_config(graph, is_batched)
stages = create_stages_config(ios, is_batched)
config = {'batch_dim': batch_dim, 'depth': graph.depth, 'basic_blocks': f'({basic_blocks})', 'model_inputs': model_inputs, 'model_outputs': model_outputs, 'stages': stages}
if generate_activation_propagation:
config = generate_config_with_input_propagation(config)
config = generate_config_without_nested(config)
try:
config['stage_to_device_map'] = gen_stage_to_device_map(graph)
except Exception as e:
warnings.warn(f'could not create stage to device map: {str(e)}')
lines = [f'def create_pipeline_configuration({GET_STAGES_ON_CPU_NAME}=False, batch_size={batch_size}):']
lines.extend([f'config = {pretty_format_obj(config)}', ''])
lines.append(f'''
{tab}# switching batch size''')
lines.append(generate_switch_batch_size())
lines.append(f'''
{tab}return config''')
return ((('\n' + f'''
{tab}'''.join(lines)) + '\n'), config)
|
def create_stages_config(ios: Dict, is_batched: Callable[([torch.Size], bool)], stage_to_device_map=None) -> Dict:
'generates the stages portion of the config\n stages:\n id\n stage_cls\n stage_inputs\n id\n shape\n dtype\n is_batched\n req_grad\n created_by\n stage_outputs\n id\n shape\n dtype\n is_batched\n req_grad\n used_by\n devices\n '
config = dict()
for (idx, io) in ios.items():
inputs = io['inputs']
outputs = io['outputs']
input_shapes = io['input_shapes']
input_dtypes = io['input_dtypes']
output_dtypes = io['output_dtypes']
output_shapes = io['output_shapes']
inputs_req_grad = io['inputs_req_grad']
outputs_req_grad = io['outputs_req_grad']
created_by = io['created_by']
used_by = io['used_by']
stage_depth = io['depth']
stage_inputs = dict()
for (i, s, r, d, src) in zip(inputs, input_shapes, inputs_req_grad, input_dtypes, created_by):
stage_inputs[i] = {'shape': s, 'dtype': d, 'req_grad': r, 'is_batched': is_batched(s), 'created_by': src}
stage_outputs = dict()
for (o, s, r, d, dsts) in zip(outputs, output_shapes, outputs_req_grad, output_dtypes, used_by):
stage_outputs[o] = {'shape': s, 'dtype': d, 'req_grad': r, 'is_batched': is_batched(s), 'used_by': dsts}
config[idx] = {'stage_cls': f'Partition{idx}', 'inputs': stage_inputs, 'outputs': stage_outputs, 'devices': f"['cpu' if {GET_STAGES_ON_CPU_NAME} else 'cuda:{idx}']", 'stage_depth': stage_depth}
return config
|
def create_model_in_out_config(graph: Graph, is_batched: Callable[([torch.Size], bool)]) -> Tuple[(Dict, Dict)]:
'create the config of model inputs and outputs\n model_inputs\n id\n shape\n dtype\n is_batched\n used_by\n model_outputs\n id\n shape\n dtype\n is_batched\n created_by\n '
sorted_model_inputs = sorted(graph.inputs, key=(lambda n: n.id))
input_ids = [f'{graph.input_kw_ids.get(node.id, node.scope)}' for node in sorted_model_inputs]
input_shapes = [n.tensor_shape for n in sorted_model_inputs]
input_dtypes = [n.tensor_dtype for n in sorted_model_inputs]
input_destinations = [list({o.stage_id for o in n.out_edges}) for n in sorted_model_inputs]
sorted_model_outputs = sorted(graph.outputs, key=(lambda n: n.id))
output_ids = [n.scope for n in sorted_model_outputs]
output_shapes = [n.tensor_shape for n in sorted_model_outputs]
output_dtypes = [n.tensor_dtype for n in sorted_model_outputs]
output_sources = [o.stage_id for o in sorted_model_outputs]
model_inputs = dict()
for (i, s, d, dsts) in zip(input_ids, input_shapes, input_dtypes, input_destinations):
model_inputs[i] = {'shape': s, 'dtype': d, 'is_batched': is_batched(s), 'used_by': dsts}
model_outputs = dict()
for (o, s, d, src) in zip(output_ids, output_shapes, output_dtypes, output_sources):
model_outputs[o] = {'shape': s, 'dtype': d, 'is_batched': is_batched(s), 'created_by': src}
return (model_inputs, model_outputs)
|
def generate_switch_batch_size():
s = "batch_dim = config['batch_dim']\n for d in chain(config['model_inputs'].values(),config['model_outputs'].values()):\n if d['is_batched']:\n shape = d['shape']\n d['shape'] = torch.Size(shape[:batch_dim] + (batch_size,) + shape[batch_dim+1:])\n \n for s in config['stages'].values():\n for d in chain(s['inputs'].values(),s['outputs'].values()):\n if d['is_batched']:\n shape = d['shape']\n d['shape'] = torch.Size(shape[:batch_dim] + (batch_size,) + shape[batch_dim+1:])"
return s
|
def generate_config_without_nested(dict_config: Dict) -> Dict:
config_without_nested = deepcopy(dict_config)
new_model_inputs = dict()
for (input_id, input_cfg) in config_without_nested['model_inputs'].items():
if (not isinstance(input_cfg['is_batched'], bool)):
flattened_is_batched = flatten(input_cfg['is_batched'])
flattened_shape = flatten(input_cfg['shape'])
flattened_dtype = flatten(input_cfg['dtype'])
for (idx, (is_batched, shape, dtype)) in enumerate(zip(flattened_is_batched, flattened_shape, flattened_dtype)):
cfg = {'shape': shape, 'dtype': dtype, 'is_batched': is_batched, 'used_by': input_cfg['used_by']}
new_model_inputs[(input_id + f'_{idx}')] = cfg
else:
new_model_inputs[input_id] = input_cfg
config_without_nested['model_inputs'] = new_model_inputs
new_model_outputs = dict()
for (output_id, output_cfg) in config_without_nested['model_outputs'].items():
if (not isinstance(output_cfg['is_batched'], bool)):
flattened_is_batched = flatten(output_cfg['is_batched'])
flattened_shape = flatten(output_cfg['shape'])
flattened_dtype = flatten(output_cfg['dtype'])
for (idx, (is_batched, shape, dtype)) in enumerate(zip(flattened_is_batched, flattened_shape, flattened_dtype)):
cfg = {'shape': shape, 'dtype': dtype, 'is_batched': is_batched, 'created_by': output_cfg['created_by']}
new_model_outputs[(output_id + f'_{idx}')] = cfg
else:
new_model_outputs[output_id] = output_cfg
config_without_nested['model_outputs'] = new_model_outputs
for (stage_id, stage) in config_without_nested['stages'].items():
new_stage_outputs = dict()
for (output_id, output_cfg) in stage['outputs'].items():
if (not isinstance(output_cfg['is_batched'], bool)):
flattened_is_batched = flatten(output_cfg['is_batched'])
flattened_shape = flatten(output_cfg['shape'])
flattened_dtype = flatten(output_cfg['dtype'])
flatten_req_grad = flatten(output_cfg['req_grad'])
for (idx, (is_batched, shape, dtype, req_grad)) in enumerate(zip(flattened_is_batched, flattened_shape, flattened_dtype, flatten_req_grad)):
cfg = {'shape': shape, 'dtype': dtype, 'req_grad': req_grad, 'is_batched': is_batched, 'used_by': output_cfg['used_by']}
new_stage_outputs[(output_id + f'_{idx}')] = cfg
else:
new_stage_outputs[output_id] = output_cfg
stage['outputs'] = new_stage_outputs
new_stage_inputs = dict()
for (input_id, input_cfg) in stage['inputs'].items():
if (not isinstance(input_cfg['is_batched'], bool)):
flattened_is_batched = flatten(input_cfg['is_batched'])
flattened_shape = flatten(input_cfg['shape'])
flattened_dtype = flatten(input_cfg['dtype'])
flatten_req_grad = flatten(input_cfg['req_grad'])
for (idx, (is_batched, shape, dtype, req_grad)) in enumerate(zip(flattened_is_batched, flattened_shape, flattened_dtype, flatten_req_grad)):
cfg = {'shape': shape, 'dtype': dtype, 'req_grad': req_grad, 'is_batched': is_batched, 'created_by': input_cfg['created_by']}
new_stage_inputs[(input_id + f'_{idx}')] = cfg
else:
new_stage_inputs[input_id] = input_cfg
stage['inputs'] = new_stage_inputs
return config_without_nested
|
def generate_config_with_input_propagation(dict_config: Dict) -> Dict:
new_config = deepcopy(dict_config)
new_model_outputs = new_config['model_outputs']
for (name, cfg) in dict_config['model_outputs'].items():
old_src = cfg['created_by']
used_by = dict_config['stages'][old_src]['outputs'][name]['used_by']
if (len(used_by) > 1):
new_src = max(used_by)
new_model_outputs[name]['created_by'] = new_src
else:
new_model_outputs[name] = new_model_outputs.pop(name)
for (stage_id, stage_cfg) in dict_config['stages'].items():
new_stage_cfg = new_config['stages'][stage_id]
for (name, cfg) in stage_cfg['inputs'].items():
old_src = cfg['created_by']
if (old_src == (- 1)):
new_stage_cfg['inputs'][name] = new_stage_cfg['inputs'].pop(name)
continue
used_by = dict_config['stages'][old_src]['outputs'][name]['used_by']
if (len(used_by) > 1):
new_src = max((dst for dst in used_by if (dst < stage_id)), default=old_src)
new_stage_cfg['inputs'][name]['created_by'] = new_src
new_stage_cfg['inputs'][(name + f'_{stage_id}')] = new_stage_cfg['inputs'].pop(name)
else:
new_stage_cfg['inputs'][name] = new_stage_cfg['inputs'].pop(name)
for (name, cfg) in stage_cfg['outputs'].items():
if (len(cfg['used_by']) > 1):
new_dst = min((dst for dst in cfg['used_by'] if (dst > stage_id)))
new_stage_cfg['outputs'][name]['used_by'] = [new_dst]
new_stage_cfg['outputs'][(name + f'_{new_dst}')] = new_stage_cfg['outputs'].pop(name)
else:
new_stage_cfg['outputs'][name] = new_stage_cfg['outputs'].pop(name)
return new_config
|
def generate_forward_method(stage_id: int, graph: Graph, partition_nodes: List[Node], model_outputs: List[Node], partition_fields: Dict[(Node, str)], stage_depth_from_end: int, generate_explicit_del=False, generate_activation_propagation=True, move_tensors=False) -> Tuple[(List[str], Dict[(str, List)])]:
'Generate a forward method of a partition'
inputs = get_sorted_partition_inputs(graph, partition_nodes)
enforce_out_of_place_for_partition_inputs(partition_nodes, inputs)
i = 0
input_ids = []
for n in inputs:
if (n.id in graph.input_kw_ids):
input_ids.append(graph.input_kw_ids[n.id])
else:
input_ids.append(f'x{i}')
i += 1
input_sources = get_input_source_stages(inputs)
ready_expressions = dict()
remove_buffs_params = []
for (k, v) in partition_fields.items():
if (('self.b_' in v) or ('self.p_' in v)):
ready_expressions[k] = v
remove_buffs_params.append(k)
for k in remove_buffs_params:
partition_fields.pop(k)
input_scopes = [graph.input_kw_ids.get(node.id, node.scope) for node in inputs]
ready_expressions.update(zip(inputs, input_ids))
lines = [generate_declaration(input_ids, partition_fields, ready_expressions, move_tensors=move_tensors)]
outputs = get_partition_outputs(partition_nodes, model_outputs)
if generate_activation_propagation:
outputs = apply_input_propagation(stage_id, outputs, inputs)
outputs = sorted(outputs, key=(lambda node: node.id))
output_destinations = get_output_destination_stages(graph, outputs)
out_scopes = [graph.input_kw_ids.get(n.id, n.scope) for n in outputs]
body = generate_body(outputs, partition_nodes, partition_fields, ready_expressions)
if generate_explicit_del:
body = add_del_statements(body)
body = (dtab + f'''
{dtab}'''.join(body))
lines.append(body)
input_shapes = [n.tensor_shape for n in inputs]
output_shapes = [n.tensor_shape for n in outputs]
input_dtypes = [n.tensor_dtype for n in inputs]
output_dtypes = [n.tensor_dtype for n in outputs]
inputs_req_grad = [n.req_grad for n in inputs]
outputs_req_grad = [n.req_grad for n in outputs]
io = {'inputs': input_scopes, 'outputs': out_scopes, 'input_shapes': input_shapes, 'output_shapes': output_shapes, 'input_dtypes': input_dtypes, 'output_dtypes': output_dtypes, 'inputs_req_grad': inputs_req_grad, 'outputs_req_grad': outputs_req_grad, 'created_by': input_sources, 'used_by': output_destinations, 'depth': stage_depth_from_end}
return (lines, io)
|
def get_output_destination_stages(graph, outputs):
output_destinations = []
for n in outputs:
destinations = []
if (n.id in graph.output_ids):
destinations.append((- 1))
destinations.extend((o.stage_id for o in n.out_edges))
destinations = set(destinations)
destinations.discard(n.stage_id)
output_destinations.append(list(destinations))
return output_destinations
|
def get_input_source_stages(inputs):
input_sources = []
for n in inputs:
if (n.type is NodeTypes.IN):
input_sources.append((- 1))
else:
input_sources.append(n.stage_id)
return input_sources
|
def generate_declaration(input_ids: List[str], partition_fields: Dict[(Node, str)], input_args: Dict[(Node, str)], move_tensors=False) -> str:
'Generates the forward function declaration and the variable map of inputs and layers\n '
lines = [(tab + f'''def forward(self, *args):
''')]
for (node, field) in chain(partition_fields.items(), input_args.items()):
lines.append(f'''{dtab}# {node.scope} <=> {field}
''')
if (len(input_ids) == 0):
return ''.join(lines)
if move_tensors:
lines.extend([f'''
{dtab}# moving inputs to current device no op if already on the correct device
''', f"{dtab}{', '.join(input_ids)} = move_tensors(unflatten(args, self.input_structure), self.device)"])
else:
lines.extend([f"{dtab}{', '.join(input_ids)} = unflatten(args, self.input_structure)"])
if (len(input_ids) == 1):
lines[(- 1)] += '[0]'
return ''.join(lines)
|
def generate_body(outputs: List[Node], partition: List[Node], partition_layer_nodes_to_field_id: Dict[(Node, str)], ready_expressions: Dict[(Node, str)]) -> List[str]:
'Generates the forward function body and return statement\n '
uses = node_uses(partition, set(outputs))
for e in ready_expressions:
uses[e] = 100000
statements = generate_statements(partition, partition_layer_nodes_to_field_id, ready_expressions, uses)
return_statement = generate_return_statement(outputs, ready_expressions)
statements.append(return_statement)
return statements
|
def generate_statements(partition_nodes: List[Node], partition_layer_nodes_to_field_id: Dict[(Node, str)], ready_expressions: Dict[(Node, str)], uses: Dict[(Node, int)]) -> List[str]:
' Generate statements according to topological ordering of the partition\n constants will be inlined, variable names will be reused\n '
statements = []
available_names = deque()
variable_name_generator = variableNameGenerator()
namespaces = used_namespaces()
tensor_creation_ops_names = {f.__name__ for f in tensor_creation_ops.keys()}
tensor_creation_ops_names_without_device_kw = {f.__name__ for f in tensor_creation_ops_without_device_kw.keys()}
for node in sorted(partition_nodes, key=(lambda n: n.id)):
if (node in ready_expressions):
continue
scope = node.scope
node_type = node.type
if (node_type is NodeTypes.CONSTANT):
ready_expressions[node] = generate_constant(node)
continue
variable_name = allocate_variable(node, ready_expressions, uses, available_names, variable_name_generator)
if (node_type is NodeTypes.LAYER):
parameter_list = generate_parameter_list(node.args, node.kwargs, ready_expressions)
statements.append(f'{variable_name} = {partition_layer_nodes_to_field_id[node]}({parameter_list})')
elif (node_type is NodeTypes.PRIMITIVE):
statement = generate_container_construct(ready_expressions, node, variable_name)
statements.append(statement)
else:
op_path = scope.rsplit('/', maxsplit=1)[1].rsplit('_', maxsplit=1)[0]
(namespace, func_name) = op_path.split('::')
if (namespace in namespaces):
should_inject_device = ((namespace == 'torch') and (func_name in tensor_creation_ops_names))
if (should_inject_device and (func_name in tensor_creation_ops_names_without_device_kw)):
warnings.warn(f"can't inject device for tensor_creation_op: {func_name}, may fail due device problem")
should_inject_device = False
parameter_list = generate_parameter_list(node.args, node.kwargs, ready_expressions, should_inject_device=should_inject_device)
statements.append(f'{variable_name} = {namespace}.{func_name}({parameter_list})')
else:
param_list = generate_parameter_list(node.args, node.kwargs, ready_expressions, string=False)
self_arg = param_list[0]
if ('__' not in func_name):
statements.append(f"{variable_name} = {self_arg}.{func_name}({', '.join(param_list[1:])})")
else:
statements.extend(generate_magic(variable_name, self_arg, func_name, param_list))
ready_expressions[node] = variable_name
return statements
|
def allocate_variable(node, ready_expressions, uses, available_names, variable_name_generator):
for i in node.in_edges:
uses[i] -= 1
if (uses[i] == 0):
available_names.append(ready_expressions[i])
if (len(available_names) > 0):
return available_names.pop()
else:
return next(variable_name_generator)
|
def generate_container_construct(ready_expressions, node, variable_name):
'generate a dict/list/tuple/set/etc. object which has special syntax\n '
if ('prim::DictConstruct' in node.scope):
kwargs = []
for (a, kws) in node.kwargs.items():
for k in kws:
kwargs.append(f"'{k}':{ready_expressions[a]}")
statement = f'{variable_name} = {{{kwargs}}}'
elif ('prim::SetConstruct' in node.scope):
parameter_list = generate_parameter_list(node.args, node.kwargs, ready_expressions)
statement = f'{variable_name} = {{{parameter_list}}}'
elif ('prim::ListConstruct' in node.scope):
parameter_list = generate_parameter_list(node.args, node.kwargs, ready_expressions)
statement = f'{variable_name} = [{parameter_list}]'
elif ('prim::TupleConstruct' in node.scope):
parameter_list = generate_parameter_list(node.args, node.kwargs, ready_expressions)
if (len(node.args) == 1):
parameter_list += ','
statement = f'{variable_name} = ({parameter_list})'
else:
assert ('prim::SliceConstruct' in node.scope)
parameter_list = generate_parameter_list(node.args, node.kwargs, ready_expressions)
statement = f'{variable_name} = slice({parameter_list})'
return statement
|
def generate_constant(node):
assert (node.type is NodeTypes.CONSTANT)
v = node.constant_value
if (isinstance(v, torch.device) or (v == 'cpu') or (isinstance(v, str) and ('cuda' in v))):
return 'self.device'
elif (isinstance(v, str) and ('__getattribute__' not in list(node.out_edges)[0].scope)):
return f"'{v}'"
elif (isinstance(v, float) and (v in [float('inf'), float('-inf')])):
return f"float('{v}')"
else:
return str(v)
|
def generate_magic(variable_name, self_arg, func_name, param_list):
if (func_name == '__getattribute__'):
statement = [f'{variable_name} = {self_arg}.{param_list[1]}']
elif (func_name == '__getitem__'):
statement = [f'{variable_name} = {self_arg}[{param_list[1]}]']
elif (func_name == '__setitem__'):
statement = [f'{self_arg}[{param_list[1]}] = {param_list[2]}']
if (variable_name != self_arg):
statement.append(f'{variable_name} = {self_arg}')
elif (func_name == '__call__'):
statement = [f"{variable_name} = {self_arg}({', '.join(param_list[1:])})"]
elif (func_name in arithmetic_ops):
statement = [f'{variable_name} = {self_arg} {arithmetic_ops[func_name]} {param_list[1]}']
elif (func_name in inplace_arithmetic_ops):
statement = [f'{self_arg} {inplace_arithmetic_ops[func_name]} {param_list[1]}']
if (variable_name != self_arg):
statement.append(f'{variable_name} = {self_arg}')
elif (func_name in r_arithmetic_ops):
statement = [f'{variable_name} = {param_list[1]} {r_arithmetic_ops[func_name]} {self_arg}']
elif (func_name in logical_ops):
statement = [f'{variable_name} = {self_arg} {logical_ops[func_name]} {param_list[1]}']
elif (func_name in conversion_ops):
statement = [f'{variable_name} = {conversion_ops[func_name]}({self_arg})']
elif (func_name in magics):
statement = [f'{variable_name} = {magics[func_name]}({self_arg})']
elif (func_name in unary_ops):
statement = [f'{variable_name} = {unary_ops[func_name]}{self_arg}']
else:
statement = [f"{variable_name} = {self_arg}.{func_name}({', '.join(param_list[1:])})"]
return statement
|
def generate_parameter_list(node_args, node_kwargs, ready_expressions, should_inject_device=False, string=True):
has_device_arg = any(((a.value_type is torch.device) for a in node_args))
has_device_arg |= any(((a.value_type is torch.device) for a in node_kwargs.keys()))
args = [ready_expressions[a] for a in node_args]
kwargs = []
for (a, kws) in node_kwargs.items():
for k in kws:
kwargs.append(f'{k}={ready_expressions[a]}')
if (should_inject_device and (not has_device_arg)):
kwargs.append('device=self.device')
if string:
return ', '.join((args + kwargs))
return (args + kwargs)
|
def generate_return_statement(output_nodes: List[Node], ready_expressions: Dict[(Node, str)]):
' generate the return statement and descriptive comment\n '
scope_comment = f'''
{dtab}# '''.join(map((lambda n: n.scope), output_nodes))
comment = f'''# Returning:
{dtab}# {scope_comment}'''
if (len(output_nodes) == 1):
output = output_nodes[0]
if (output.value_type in {list, tuple, set}):
statement = f'return list(flatten({ready_expressions[output]}))'
else:
statement = f'return ({ready_expressions[output]},)'
else:
outputs = ', '.join([ready_expressions[o] for o in output_nodes])
statement = f'return list(flatten(({outputs})))'
return f'''{comment}
{dtab}{statement}'''
|
def add_del_statements(statements: List[str]) -> Iterator[str]:
'\n perform liveliness analysis and insert delete variables when they are no longer used\n '
new_statements = [statements[(- 1)]]
variable_name_matcher = re.compile('t_[0-9]+|x[0-9]+')
inplace_arithmetic_matcher = re.compile('\\d \\S=')
alive = set(variable_name_matcher.findall(statements[(- 1)]))
for s in reversed(statements[:(- 1)]):
if ('#' in s):
new_statements.append(s)
else:
variables = variable_name_matcher.findall(s)
if (not variables):
new_statements.append(s)
continue
for v in variables[1:]:
if (v not in alive):
new_statements.append(f'del {v}')
alive.add(v)
if ((not inplace_arithmetic_matcher.findall(s)) and (variables[0] not in variables[1:])):
alive.discard(variables[0])
new_statements.append(s)
return reversed(new_statements)
|
def node_uses(partition: List[Node], outputs: Set[Node]) -> Dict[(Node, int)]:
uses = defaultdict((lambda : 0))
for node in partition:
if (node in outputs):
uses[node] += 1
uses[node] += len(list(filter((lambda n: (n.stage_id == node.stage_id)), node.out_edges)))
if (node.type is NodeTypes.CONSTANT):
uses[node] = 100000
return uses
|
def variableNameGenerator() -> Iterator[str]:
'return an infinite generator yielding\n names t_0 , t_1,...\n '
def f():
temp_idx = (- 1)
while True:
temp_idx += 1
(yield f't_{temp_idx}')
return iter(f())
|
def enforce_out_of_place_for_partition_inputs(partition: List[Node], partition_inputs: List[Node], warn=True):
for n in partition:
if ((n.type != NodeTypes.OP) or (n.value_type != torch.Tensor)):
continue
(op_path, idx) = n.scope.rsplit('/', maxsplit=1)[1].rsplit('_', maxsplit=1)
(namespace, func_name) = op_path.split('::')
inplace_torch_function = (('torch' in namespace) and (func_name[(- 1)] == '_'))
inplace_tensor_function = ((namespace == 'Tensor') and (func_name[(- 1)] == '_') and (not func_name.startswith('__')))
inplace_tensor_magic = ((namespace == 'Tensor') and (func_name in inplace_arithmetic_ops))
if (inplace_tensor_magic or inplace_tensor_function or inplace_torch_function):
u = n.in_edges[0]
if (not ((u.value_type is torch.Tensor) and u.req_grad and (u in partition_inputs))):
continue
if inplace_tensor_magic:
n.scope = (n.scope.rsplit('/', maxsplit=1)[0] + f'/{namespace}::__{func_name[3:]}_{idx}')
elif (((namespace == 'Tensor') and hasattr(torch.Tensor, func_name[:(- 1)])) or ((namespace != 'Tensor') and hasattr(import_module(namespace), func_name[:(- 1)]))):
n.scope = (n.scope.rsplit('/', maxsplit=1)[0] + f'/{namespace}::{func_name[:(- 1)]}_{idx}')
if warn:
warnings.warn(f'Enforcing out of place for {op_path}: changed to: {n.scope}')
|
def apply_input_propagation(stage_id: int, outputs: List[Node], inputs: List[Node]) -> Set[Node]:
for i in inputs:
if (i.type != NodeTypes.IN):
destinations = {o.stage_id for o in i.out_edges}
if (stage_id < max(destinations)):
outputs.append(i)
return set(outputs)
|
def generate_init_method(graph: Graph, nodes: List[Node], class_name: str, layers: List[Node], is_param_dict: Dict[(str, bool)], buffs_and_params: List[Node]) -> Tuple[(str, Dict[(Node, str)])]:
'creates the partition constructor and the mapping between layers and field ids\n '
device_id = re.search('\\d+$', class_name).group()
class_decl = f'class {class_name}(nn.Module):'
(layer_scopes_field, tensor_scope_field) = generate_layer_and_tensor_scopes(layers, buffs_and_params)
init_dec = f"{tab}def __init__(self, layers, tensors, device='cuda:{device_id}'):"
super_init = f'{dtab}super().__init__()'
(layers_init, partition_layers_to_fields) = generate__init__layer_statements(layers)
(params, buffs) = ([], [])
for n in buffs_and_params:
if is_param_dict[n.scope]:
params.append(n)
else:
buffs.append(n)
(tensor_init, partition_buffs_and_params_to_fields) = generate__init__buff_and_param_statements(buffs, params)
lookup = generate_lookup(partition_layers_to_fields, partition_buffs_and_params_to_fields)
partition_fields = partition_layers_to_fields
partition_fields.update(partition_buffs_and_params_to_fields)
device = f'{dtab}self.device = torch.device(device)'
move = f'{dtab}self.to(self.device)'
structure = nested_map((lambda x: 1), [n.req_grad for n in get_sorted_partition_inputs(graph, nodes)])
cfg = f'{dtab}self.input_structure = {pretty_format_obj(structure)}'
return (('\n'.join([class_decl, layer_scopes_field, tensor_scope_field, init_dec, super_init, layers_init, tensor_init, device, cfg, lookup, move]) + '\n'), partition_fields)
|
def generate_layer_and_tensor_scopes(layers: List[Node], buffs_and_params: List[Node]):
scope_field = ['LAYER_SCOPES = [']
for n in layers:
scope_field.append(f"{tab}'{n.scope}',")
scope_field.append(']')
scope_field = (tab + f'''
{dtab}'''.join(scope_field))
tensor_field = ['TENSORS = [']
for n in buffs_and_params:
tensor_field.append(f"{tab}'{n.scope}',")
tensor_field.append(']')
tensor_field = (tab + f'''
{dtab}'''.join(tensor_field))
return (scope_field, tensor_field)
|
def generate__init__layer_statements(layers: List[Node]) -> Tuple[(str, Dict[(Node, str)])]:
' Generates partition field initialization statements\n and save the layer scopes in the self.scopes field\n '
statements = ['# Initialize partition layers', 'for idx, layer_scope in enumerate(self.LAYER_SCOPES):', f"{tab}self.add_module(f'l_{{idx}}' ,layers[layer_scope])"]
partition_fields = dict(zip(layers, [f'self.l_{idx}' for (idx, _) in enumerate(layers)]))
return ((f'''
{dtab}''' + f'''
{dtab}'''.join(statements)), partition_fields)
|
def generate__init__buff_and_param_statements(buffers: List[Node], parameters: List[Node]) -> Tuple[(str, Dict[(Node, str)])]:
' Generate the init statements to initialize the partitions free floating buffers and parameters\n free floating means that those tensors are not part of any layer in this partition\n '
statements = ['# Initialize partition tensors (params and buffs)', 'b = p = 0', 'for tensor_scope in self.TENSORS:', f'{tab}tensor = tensors[tensor_scope]', f'{tab}if isinstance(tensor, nn.Parameter):', f"{dtab}self.register_parameter(f'p_{{p}}', tensor)", f'{dtab}p += 1', f'{tab}else:', f"{dtab}self.register_buffer(f'b_{{b}}', tensor)", f'{dtab}b += 1']
tensor_ids = dict(zip(buffers, [f'self.b_{idx}' for (idx, _) in enumerate(buffers)]))
tensor_ids.update(dict(zip(parameters, [f'self.p_{idx}' for (idx, _) in enumerate(parameters)])))
return (((f'''
{dtab}''' + f'''
{dtab}'''.join(statements)) + '\n'), tensor_ids)
|
def generate_lookup(layers_to_id: Dict[(Node, str)], tensors_to_id: Dict[(Node, str)]) -> str:
lookup = []
for (field_node, field_id) in chain(layers_to_id.items(), tensors_to_id.items()):
fields = re.findall('\\[[a-zA-Z0-9_]*\\]', field_node.scope)
fields = map((lambda s: s[1:(- 1)]), fields)
prefix = '.'.join(fields)
lookup.append(f"'{field_id[5:]}': '{prefix}'")
lookup = f''',
{dtab}{dtab}{dtab}'''.join(lookup)
return f'{dtab}self.lookup = {{{lookup}}}'
|
def generate_partition_state_methods() -> str:
' generate partition methods state_dict() load_state_dict() named_buffers() and named_parameters()\n our custom implementation guarantees 100% compatibility with the original model same names will be used\n '
state_dict = generate_state_dict_method()
load_state_dict = generate_load_state_dict_method()
named_parameters = generate_named_parameters_method()
named_buffers = generate_named_buffers_method()
(cpu, cuda, to) = generate_cpu_cuda_to_methods()
return ('\n'.join([state_dict, load_state_dict, named_parameters, named_buffers, cpu, cuda, to]) + '\n\n')
|
def generate_state_dict_method() -> str:
'Generates the state_dict method\n ensuring same keys are used as in the base model\n '
state_dict_method = ['def state_dict(self, *args, **kwargs):', '# we return the state dict of this part as it should be in the original model', 'return state_dict(self, *args, **kwargs)']
return (f'{tab}' + f'''
{dtab}'''.join(state_dict_method))
|
def generate_named_parameters_method() -> str:
'Generates the named_parameters method\n ensuring we use the names given to the parameters in the un-partitioned model\n '
named_parameters_method = ['def named_parameters(self, *args, **kwargs):', '# we return the named parameters of this part as it should be in the original model', 'return named_parameters(self, *args, **kwargs)']
return (f'''
{tab}''' + f'''
{dtab}'''.join(named_parameters_method))
|
def generate_named_buffers_method() -> str:
'Generates the named_buffers method\n ensuring we use the names given to the buffers in the un-partitioned model\n '
named_buffers_method = ['def named_buffers(self, *args, **kwargs):', f'# we return the named buffers of this part as it should be in the original model', 'return named_buffers(self, *args, **kwargs)']
return (f'''
{tab}''' + f'''
{dtab}'''.join(named_buffers_method))
|
def generate_load_state_dict_method() -> str:
'Generates the load_state_dict method\n ensuring that weights will be assigned to their correct counterparts inside the partition\n '
func = ['def load_state_dict(self, *args, **kwargs):', 'return load_state_dict(self, *args, **kwargs)']
return (f'''
{tab}''' + f'''
{dtab}'''.join(func))
|
def generate_cpu_cuda_to_methods() -> Tuple[(str, str, str)]:
'generates the cpu cuda and to methods of the partitions\n the generated code keeps track of on which device the partition is placed\n\n Returns:\n Tuple[str, str, str] the generated code\n '
cpu = f'''
{tab}def cpu(self):
{dtab}return cpu(self)
'''
cuda = [f'{tab}def cuda(self, device=None):', f'''return cuda(self, device=device)
''']
to = [f'{tab}def to(self, *args, **kwargs):', 'return to(self, *args, **kwargs)']
return (cpu, f'''
{dtab}'''.join(cuda), f'''
{dtab}'''.join(to))
|
def get_state_methods():
return [state_dict, load_state_dict, named_buffers, named_parameters, cpu, cuda, to]
|
def state_dict(partition, *args, **kwargs):
state = nn.Module.state_dict(partition, *args, **kwargs)
lookup = partition.lookup
result = dict()
for (k, v) in state.items():
if (k in lookup):
result[lookup[k]] = v
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
result[new_k] = v
return result
|
def load_state_dict(partition, state_dict, strict=True):
reverse_lookup = {v: k for (k, v) in partition.lookup.items()}
device = partition.device
keys = list(partition.state_dict(None).keys())
new_state = dict()
for k in keys:
if (k in reverse_lookup):
new_state[reverse_lookup[k]] = state_dict[k].to(device)
continue
idx = k.rfind('.')
to_replace = k[:idx]
if (to_replace in reverse_lookup):
key = (reverse_lookup[to_replace] + k[idx:])
new_state[key] = state_dict[k].to(device)
nn.Module.load_state_dict(partition, new_state, strict=strict)
|
def named_parameters(partition, prefix='', recurse=True):
params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def named_buffers(partition, prefix='', recurse=True):
params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def cpu(partition):
partition.device = torch.device('cpu')
return nn.Module.cpu(partition)
|
def cuda(partition, device=None):
if (device is None):
device = torch.cuda.current_device()
partition.device = torch.device(device)
return nn.Module.cuda(partition, partition.device)
|
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args[0]):
device = args[0].device
if (not (device is None)):
partition.device = torch.device(device)
return nn.Module.to(partition, *args, **kwargs)
|
def pretty_format_obj(obj, dict_prefix=dtab) -> str:
if isinstance(obj, torch.Size):
return str(obj)
elif isinstance(obj, (list, tuple, set)):
elements = [pretty_format_obj(t) for t in obj]
if ((len(elements) == 1) and isinstance(obj, tuple)):
elements[0] += ','
elements = ', '.join(elements)
if isinstance(obj, tuple):
(l, r) = ('(', ')')
elif isinstance(obj, list):
(l, r) = ('[', ']')
else:
(l, r) = ('{', '}')
return ((l + elements) + r)
elif isinstance(obj, dict):
items = []
for (k, v) in obj.items():
if isinstance(k, str):
k = f"'{k}'"
else:
assert isinstance(k, int)
items.append(f'{k}: {pretty_format_obj(v, (dict_prefix + tab))}')
try:
items[0] = (f'''
{dict_prefix}''' + items[0])
except IndexError as e:
items.append(f'''
{dict_prefix}''')
warnings.warn('empty dict in configuration')
return (('{' + f''',
{dict_prefix}'''.join(items)) + '}')
elif (obj is type(None)):
return 'None'
elif (obj in [torch.Size, torch.device, torch.dtype]):
return f'torch.{obj.__name__}'
elif isinstance(obj, type):
return obj.__name__
return str(obj)
|
def get_sorted_partition_inputs(graph: Graph, partition: List[Node]) -> List[Node]:
'return a list of all nodes that are input to this partition\n\n sorted by id\n '
inputs = set()
for node in partition:
if (node.type is NodeTypes.IN):
inputs.add(node)
inputs.update([n for n in node.in_edges if ((n.stage_id != node.stage_id) or (n.type == NodeTypes.IN))])
def key_fn(n):
first = (0 if (n.id in graph.input_kw_ids) else 1)
second = n.id
return (first, second)
return sorted(inputs, key=key_fn)
|
def get_partition_outputs(partition: List[Node], model_outputs: List[Node]) -> List[Node]:
' return all nodes that are outputs of the partition\n\n '
def is_output(n):
part_output = ((n.type != NodeTypes.IN) and any(((o.stage_id != n.stage_id) for o in n.out_edges)))
return (part_output or (n in model_outputs))
return [n for n in partition if is_output(n)]
|
def ensure_inputs_are_used(graph: Graph, assert_same_stages=True):
if assert_same_stages:
n2 = graph.num_partitions
b4 = {n.stage_id for n in graph.nodes}
for n in graph.nodes:
if (n.type != NodeTypes.IN):
continue
assert (len(n.out_edges) > 0), 'inputs must be used'
min_node = min(n.out_edges, key=(lambda u: u.stage_id))
n.stage_id = min_node.stage_id
n.gpu_id = min_node.gpu_id
if assert_same_stages:
after = {n.stage_id for n in graph.nodes}
n3 = graph.num_partitions
assert (n2 == n3), f'Accidentally killed a stage {(n2, n3)}, {(b4 - after)}'
|
def ensure_no_unnecessary_tuple_sends(graph: Graph, assert_same_stages=True):
if assert_same_stages:
n2 = graph.num_partitions
b4allstages = {n.stage_id for n in graph.nodes}
for n in graph.nodes:
if ((n.type != NodeTypes.OP) or ('tuple::__getitem__' not in n.scope)):
continue
getitem_node = n
tuple_node = n.in_edges[0]
index_node = n.in_edges[1]
if (index_node.type is NodeTypes.CONSTANT):
b4_ids = {getitem_node.stage_id, index_node.stage_id, tuple_node.stage_id}
b4_stage_ids = [getitem_node.stage_id, index_node.stage_id, tuple_node.stage_id]
b4_gpu_ids = [getitem_node.gpu_id, index_node.gpu_id, tuple_node.gpu_id]
getitem_node.stage_id = index_node.stage_id = tuple_node.stage_id
getitem_node.gpu_id = index_node.gpu_id = tuple_node.gpu_id
after = {getitem_node.stage_id}
change = (b4_ids - after)
if change:
for (x, b4_stage_id, b4_gpu_id) in zip([getitem_node, index_node, tuple_node], b4_stage_ids, b4_gpu_ids):
if (b4_stage_id != getitem_node.stage_id):
warnings.warn(f'changed {x.id}: stage:{b4_stage_id}->{getitem_node.stage_id} gpu:{b4_gpu_id}->{getitem_node.gpu_id}')
if assert_same_stages:
after = {n.stage_id for n in graph.nodes}
n3 = graph.num_partitions
assert (n2 == n3), f'Accidentally killed a stage {(n2, n3)}, {(b4allstages - after)}'
|
class META_ALGORITH(enum.Enum):
SINGLE_LEVEL = 1
MULTI_LEVEL = 2
def __repr__(self):
return self.name
|
class ALGORITHM(enum.Enum):
SIMPLE_MOVES = 1
ADVANCED_MOVES = 2
GLOBAL_MOVES = 3
FIDUCCIA_MATTHEYSES_MOVES = 4
def __repr__(self) -> str:
return self.name
|
class Objective(enum.Enum):
EDGE_CUT = 1
STAGE_TIME = 2
def __repr__(self) -> str:
return self.name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.