code stringlengths 17 6.64M |
|---|
def convert_handle_missing_print(bins, graph, verbose=False):
node_to_stage_map = {}
stage_to_gpu_map = defaultdict(set)
for (gpu_id, bin_nodes) in bins.items():
for n in bin_nodes:
n: Node
stage_to_gpu_map[n.stage_id].add(gpu_id)
node_to_stage_map[n.id] = n.stage_id
stage_to_gpu_map = {i: sorted(v) for (i, v) in stage_to_gpu_map.items()}
stage_to_gpu_map = handle_missing_stages(bins, graph, node_to_stage_map, stage_to_gpu_map)
stage_to_nodes_map = defaultdict(list)
for (i, v) in node_to_stage_map.items():
stage_to_nodes_map[v].append(i)
if verbose:
print('stage_to_gpu_map:')
pprint(stage_to_gpu_map)
print('node_to_stage_map:')
pprint(node_to_stage_map)
print('stage_to_nodes_map:')
pprint(stage_to_nodes_map)
return stage_to_gpu_map
|
def handle_missing_stages(bins, graph, node_to_stage_map, stage_to_gpu_map):
to_check = sorted(stage_to_gpu_map.keys())
if ((to_check[0] != 0) or (to_check[(- 1)] != (len(to_check) - 1))):
print(f'-V- stages gone, stages_ids_before: {to_check} reassigning...')
stage_to_fixed = {prev_s: i for (i, prev_s) in enumerate(to_check)}
for (n, prev_s) in list(node_to_stage_map.items()):
if (prev_s in stage_to_fixed):
fix = stage_to_fixed[prev_s]
node_to_stage_map[n] = fix
for n in graph.nodes:
if (n.stage_id in stage_to_fixed):
n.stage_id = stage_to_fixed[n.stage_id]
stage_to_gpu_map = defaultdict(set)
for (gpu_id, bin_nodes) in bins.items():
for n in bin_nodes:
stage_to_gpu_map[n.stage_id].add(gpu_id)
stage_to_gpu_map = {i: sorted(v) for (i, v) in stage_to_gpu_map.items()}
return stage_to_gpu_map
|
def _lworker(args):
(times, work_graph, best_objective) = lworker(*args)
return (times, work_graph.state(), best_objective)
|
def lworker(model, L, P, edge_weight_function, node_weight_function, round_limit, saved_work_graph_without_par_edges, node_mem_estimator: NodeMemoryEstimator, basic_blocks, special_blocks, depth):
work_graph = Graph.from_state(saved_work_graph_without_par_edges)
hierarchy = coarsening(model, work_graph, edge_weight_function, node_weight_function, L, P, basic_blocks, special_blocks, depth)
last_graph: Graph = hierarchy[(- 1)][(- 2)]
print(f'After coarsening: got best effort graph with {len(last_graph)} nodes (required: L={L})')
bins = greedy_best_fit(last_graph, P, node_weight_function, node_mem_estimator)
times = {i: sum((node_weight_function(x) for x in bins[i])) for i in bins}
print('bin times greedy:')
pprint(times)
for (i, bin_nodes) in bins.items():
for n in bin_nodes:
n.gpu_id = i
last_graph.topo_sort(verbose=False, change_graph=False)
id_to_node_worked_on = {n.topo_sort_id: n for n in last_graph.non_input_nodes}
n_stages = stages_from_bins(last_graph, bins, id_to_node_worked_on=id_to_node_worked_on, assert_missing_in_bins=False, verbose=False)
post_process_partition(last_graph, edge_weight_function=edge_weight_function, verbose_check_outputs=False)
print(f'Got {n_stages} stages after initial assignment.')
first_graph = hierarchy[0][0]
work_graph = first_graph
full_uf: UnionFind = hierarchy[(- 1)][(- 1)]
component_mapping = full_uf.component_mapping()
for (topo_sort_id, node) in id_to_node_worked_on.items():
for i in component_mapping[node.id]:
a = first_graph[i]
b = last_graph[node.id]
a.stage_id = b.stage_id
a.gpu_id = b.gpu_id
(best_objective, refine_improvement) = refine(work_graph, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, round_limit=round_limit)
return (times, work_graph, best_objective, refine_improvement)
|
def partition_mpipe(model, graph: Graph, num_gpus: int, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None, node_mem_estimator: NodeMemoryEstimator=NodeMemoryEstimator(), use_layers_graph: bool=True, round_limit=(- 1), nprocs=1, L_list=None, basic_blocks=(), special_blocks=(), depth=1000, **kwargs):
print('mpipe got kwargs:', kwargs.keys())
assert use_layers_graph
graph.topo_sort()
if use_layers_graph:
(work_graph, lookup) = graph.new_graph_without_constants()
else:
(work_graph, lookup) = (graph, None)
P = num_gpus
saved_work_graph = work_graph
L_to_res = dict()
if (not L_list):
L_list = [(2 * P)]
warnings.warn(f'no L_list given. using mine {L_list}')
print(f'mpipe L_list: {L_list}')
if ((nprocs > 1) and (len(L_list) > 1)):
warnings.warn('experimental: parallel run on L.')
worker_args = [(model, L, P, edge_weight_function, node_weight_function, round_limit, saved_work_graph.get_copy_without_parallel_edges.state(), node_mem_estimator, basic_blocks, special_blocks, depth) for L in L_list]
with multiprocessing.Pool(min(nprocs, len(L_list))) as pool:
results = pool.map(_lworker, worker_args)
for (L, (times, work_graph_state, best_objective, refine_improvement)) in zip(L_list, results):
work_graph = Graph.from_state(work_graph_state)
L_to_res[L] = (work_graph, times, best_objective, refine_improvement)
else:
for L in L_list:
try:
(times, work_graph, best_objective, refine_improvement) = lworker(model, L, P, edge_weight_function, node_weight_function, round_limit, saved_work_graph.get_copy_without_parallel_edges().state(), node_mem_estimator, basic_blocks, special_blocks, depth)
L_to_res[L] = (work_graph, times, best_objective, refine_improvement)
except Exception as e:
if (L_list == 1):
raise e
else:
warnings.warn(f'partitioning failed to L={L}, trying others')
print(str(e))
continue
best_Ls2 = None
minmax = None
L_to_minmax = dict()
best_L = None
best_objective_so_far = None
best_objective_so_far = None
L_to_best_objective = dict()
L_to_refinement_improvement = dict()
L_to_num_stages = dict()
for (L, (work_graph, times, best_objective, refine_improvement)) in L_to_res.items():
worstcase = max(times.values())
if (best_Ls2 is None):
minmax = worstcase
best_Ls2 = L
elif (worstcase < minmax):
best_Ls2 = L
minmax = worstcase
L_to_minmax[L] = worstcase
if (best_L is None):
best_objective_so_far = best_objective
best_L = L
elif (best_objective_so_far > best_objective):
best_L = L
best_objective_so_far = best_objective
L_to_best_objective[L] = best_objective
L_to_refinement_improvement[L] = refine_improvement
nstages = work_graph.num_partitions
if (None in work_graph.unique_partitions_ids):
nstages -= 1
L_to_num_stages[L] = nstages
L = best_L
if (not L_to_res):
raise RuntimeError('ERROR: MPIPE Could not find any legal assignment, see errors')
work_graph = L_to_res[L][0]
print(f'Best L is {L}')
print('L_to_minmax (stage2):', L_to_minmax)
print('L to refinement improvement', L_to_refinement_improvement)
print('L_to_num_stages:', L_to_num_stages)
print('L_to_best_objective', L_to_best_objective)
for node in saved_work_graph.non_input_nodes:
node.gpu_id = work_graph[node.id].gpu_id
node.stage_id = work_graph[node.id].stage_id
work_graph = saved_work_graph
work_graph = post_process_partition(work_graph)
if use_layers_graph:
graph.induce_layer_partition(work_graph, lookup)
bins = defaultdict(list)
for node in graph.nodes:
if (node.gpu_id is None):
continue
bins[node.gpu_id].append(node)
stage_to_gpu_map = convert_handle_missing_print(bins, graph)
return (graph, stage_to_gpu_map)
|
def main():
from autopipe.autopipe.api import build_profiled_graph
import torch
from torch.nn import Sequential, Linear
IN_FEATURES = 320
OUT_FEATURES = 8
n_encoder_decoder = 12
l = []
for i in range(n_encoder_decoder):
l.append(Linear(IN_FEATURES, IN_FEATURES))
l.append(Linear(IN_FEATURES, OUT_FEATURES))
for i in range(n_encoder_decoder):
l.append(Linear(OUT_FEATURES, OUT_FEATURES))
model = Sequential(*l)
inputs = torch.randn(IN_FEATURES, IN_FEATURES)
model = model.cuda()
inputs = inputs.cuda()
graph = build_profiled_graph(model, model_args=(inputs,), n_iter=50)
node_weight_function = NodeWeightFunction(bwd_to_fwd_ratio=1, MULT_FACTOR=100000)
edge_weight_function = EdgeWeightFunction(bw_GBps=12, bwd_to_fwd_ratio=0, MULT_FACTOR=100000, penalty=100000)
(graph, stage_to_gpu_map) = partition_mpipe(model, graph=graph, num_gpus=2, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, use_layers_graph=True)
|
def post_process_partition(graph: Graph, edge_weight_function=None, verbose_on_error=True, assert_output_types=False, verbose_check_outputs=False) -> Graph:
"\n process the partition and optimize it\n called as part of partition_graph method\n\n Parameters:\n ----------\n graph:\n the Graph object that was partitioned\n verbose_on_error:\n print extra info when cycle can't be solved\n "
re_assign_partition_indices(graph)
if has_stage_cycles(graph):
if os.environ.get('DEBUG', False):
graph.save_as_pdf(f'{graph.model_name}_before_fix', '.')
(problems, info) = get_problematic_partitions(graph)
for (p, i) in zip(problems, info):
print('===Problem===')
print(p)
for ii in i:
print(ii)
error = 'error cycle detected mutual dependency between partitions'
raise AssertionError(error)
check_partition_outputs(graph, assert_output_types=assert_output_types, edge_weight_function=edge_weight_function, verbose=verbose_check_outputs)
return graph
|
def check_partition_outputs(graph, assert_output_types=False, edge_weight_function=None, verbose=True):
(is_valid, error) = is_output_only_tensors(graph, edge_weight_function)
if assert_output_types:
assert is_valid, error
elif ((not is_valid) and verbose):
print('Output between partitions is tricky, but allowing this')
print_all_problematic_outputs_between_partitions(graph, edge_weight_function)
|
def get_problematic_partitions(graph):
' For debug when cycle are detected '
problems = []
info = []
for u in graph.nodes:
for v in u.out_edges:
if (v.stage_id < u.stage_id):
problems.append([u.stage_id, v.stage_id])
info.append([(u.id, u.stage_id, u.scope), (v.id, v.stage_id, v.scope)])
return (problems, info)
|
def is_output_only_tensors(graph: Graph, edge_weight_function=None):
'\n check if we only send tensors between partitions\n '
for n in graph.nodes:
if (n.value_type in {type(None), list, tuple, dict, set, int, bool, float, str, slice, torch.Size, torch.dtype}):
for o in n.out_edges:
if (n.stage_id != o.stage_id):
msg = f'invalid output type at partition boundary {n.stage_id}=>{o.stage_id}'
msg += f'''
output is {n.scope} of type {n.value_type}'''
if (edge_weight_function is not None):
msg += f' weight {edge_weight_function(n, o)}'
return (False, msg)
return (True, '')
|
def print_all_problematic_outputs_between_partitions(graph: Graph, edge_weight_function=None):
'\n check if we only send tensors between partitions\n '
problems = []
valid_state = True
for n in graph.nodes:
if (n.value_type in {type(None), list, tuple, dict, set, int, bool, float, str, slice, torch.Size, torch.dtype}):
for o in n.out_edges:
if (n.stage_id != o.stage_id):
msg = f'invalid output type at partition boundary {n.stage_id}=>{o.stage_id}'
msg += f'''
output is {n.scope} of type {n.value_type}'''
if (edge_weight_function is not None):
msg += f' weight {edge_weight_function(n, o)}'
valid_state = False
problems.append(msg)
s = ((f'''Valid outputs states = {valid_state}
''' + 'problems:\n') + '\n'.join(problems))
print(s)
|
class Refiner():
def __init__(self, graph: Graph, node_weight_function: NodeWeightFunction, edge_weight_function: EdgeWeightFunction):
u = graph.unique_partitions_ids
if (None in u):
raise NotImplementedError('please remove None stage_id')
n_stages = len(u)
assert (min(u) == 0)
assert (max(u) == (n_stages - 1))
cwf = CoarsenedWeightFunction(edge_weight_function=edge_weight_function, node_weight_function=node_weight_function)
self.nwf = node_weight_function
self.ewf = edge_weight_function
self.cwf = cwf
self.graph = graph
self.n_stages = n_stages
stage_to_nodes = defaultdict(set)
for n in graph.non_input_nodes:
stage_to_nodes[n.stage_id].add(n)
self.stage_to_nodes = stage_to_nodes
stage_to_gpu = {stage_id: next(iter(nodes)).gpu_id for (stage_id, nodes) in stage_to_nodes.items()}
self.stage_to_gpu = stage_to_gpu
u = set(stage_to_gpu.values())
assert ((len(u) - 1) == max(u))
assert (min(u) == 0)
num_gpus = len(u)
self.num_gpus = num_gpus
stage_to_split_comp_cost = {stage_id: cwf.calculate_comp(nodes) for (stage_id, nodes) in stage_to_nodes.items()}
stage_to_comp_cost = {stage_id: sum(v) for (stage_id, v) in stage_to_split_comp_cost.items()}
self.stage_to_comp_cost = stage_to_comp_cost
self.stage_to_split_comp_cost = stage_to_split_comp_cost
gpu_to_comp_cost = {gpu_id: 0 for gpu_id in range(num_gpus)}
for (stage_id, nodes) in stage_to_nodes.items():
gpu_id = stage_to_gpu[stage_id]
gpu_to_comp_cost[gpu_id] += stage_to_comp_cost[stage_id]
self.gpu_to_comp_cost = gpu_to_comp_cost
stage_borders = dict()
for (stage_id, nodes) in stage_to_nodes.items():
stage_borders[stage_id] = self.cwf.calculate_borders(nodes)
self.stage_borders = stage_borders
self.stage_to_cost = self.calc_stage_to_cost()
self.best_objective = self.calc_objective()
self.initial_objective = self.best_objective
def calc_stage_to_cost(self):
cwf = self.cwf
gpu_to_comp_cost = self.gpu_to_comp_cost
stage_borders = self.stage_borders
stage_to_nodes = self.stage_to_nodes
stage_to_gpu = self.stage_to_gpu
stage_to_cost = {stage_id: cwf(nodes, boarders=stage_borders[stage_id], total_gpu_comp_cost=gpu_to_comp_cost[stage_to_gpu[stage_id]], total_stage_comp_cost_fwd=self.stage_to_split_comp_cost[stage_id][0], total_stage_comp_cost_bwd=self.stage_to_split_comp_cost[stage_id][1]) for (stage_id, nodes) in stage_to_nodes.items()}
return stage_to_cost
def calc_total_comm(self):
cwf = self.cwf
gpu_to_comp_cost = self.gpu_to_comp_cost
stage_borders = self.stage_borders
stage_to_nodes = self.stage_to_nodes
stage_to_gpu = self.stage_to_gpu
total_comm = 0
for (stage_id, nodes) in stage_to_nodes.items():
boarders = stage_borders[stage_id]
if boarders:
(outgoing_edges, _, incomming_edges, _) = boarders
else:
(outgoing_edges, _, incomming_edges, _) = cwf.calculate_borders(nodes)
(comm_bwd, comm_fwd) = cwf.calculate_comm_forward_and_backward(incomming_edges, outgoing_edges)
total_comm += comm_bwd
total_comm += comm_fwd
return total_comm
def calc_objective(self):
return max(self.stage_to_cost.values())
def percents_of_relative_objective_improvement(self):
return ((self.initial_objective / self.best_objective) - 1)
def update_on_move(self, nodes: Iterable[Node], new_stage_id: int, escape_minima=False):
prev_stage_id = next(iter(nodes)).stage_id
prev_comm = self.calc_total_comm()
self._apply_move(nodes, new_stage_id)
new_objective = self.calc_objective()
if (new_objective < self.best_objective):
self.best_objective = new_objective
return True
elif (new_objective == self.best_objective):
self.best_objective = new_objective
curr_comm = self.calc_total_comm()
comm_sign = (prev_comm - curr_comm)
if ((comm_sign > 0) or ((comm_sign == 0) and escape_minima)):
print('objective is the same, reducing comm')
return True
self._apply_move(nodes, prev_stage_id)
return False
def _apply_move(self, nodes: Iterable[Node], new_stage_id: int):
prev_stage_id = next(iter(nodes)).stage_id
prev_gpu_id = self.stage_to_gpu[prev_stage_id]
new_gpu_id = self.stage_to_gpu[new_stage_id]
nwf = self.nwf
for node in nodes:
node.stage_id = new_stage_id
node.gpu_id = new_gpu_id
self.stage_to_nodes[prev_stage_id].remove(node)
self.stage_to_nodes[new_stage_id].add(node)
if (len(self.stage_to_nodes[prev_stage_id]) == 0):
warnings.warn(f'stage {prev_stage_id} eliminated in refinement')
tmp = nwf.ratio
assert (tmp == 1)
nwf.ratio = 0
comp_cost_fwd = nwf(node)
nwf.ratio = (- 1)
comp_cost_bwd = nwf(node)
nwf.ratio = 1
comp_cost = (comp_cost_bwd + comp_cost_fwd)
self.stage_to_comp_cost[prev_stage_id] -= comp_cost
self.stage_to_comp_cost[new_stage_id] += comp_cost
x = self.stage_to_split_comp_cost[prev_stage_id]
self.stage_to_split_comp_cost[prev_stage_id] = ((x[0] - comp_cost_fwd), (x[1] - comp_cost_bwd))
y = self.stage_to_split_comp_cost[new_stage_id]
self.stage_to_split_comp_cost[new_stage_id] = ((y[0] + comp_cost_fwd), (y[1] + comp_cost_bwd))
self.gpu_to_comp_cost[prev_gpu_id] -= comp_cost
self.gpu_to_comp_cost[new_gpu_id] += comp_cost
self.stage_borders[prev_stage_id] = self.cwf.calculate_borders(self.stage_to_nodes[prev_stage_id])
self.stage_borders[new_stage_id] = self.cwf.calculate_borders(self.stage_to_nodes[new_stage_id])
self.stage_to_cost = self.calc_stage_to_cost()
@staticmethod
def is_fwd_move_valid_local(node):
cur_stage = node.stage_id
others = [nn.stage_id for nn in node.out_edges]
for i in others:
if (i == cur_stage):
return False
return True
@staticmethod
def is_fwd_move_valid_topo(node, dst_stage_id):
for nn in node.out_edges:
if (nn.stage_id < dst_stage_id):
return False
return True
@staticmethod
def is_bwd_move_valid_local(node):
assert (not node.kwargs), 'should be eliminated by parallel edges.'
cur_stage = node.stage_id
others = [nn.stage_id for nn in node.args]
for i in others:
if (i == cur_stage):
return False
return True
@staticmethod
def is_bwd_move_valid_topo(node, dst_stage_id):
assert (not node.kwargs), 'should be eliminated by parallel edges.'
for nn in node.args:
if (nn.stage_id > dst_stage_id):
return False
return True
|
def refine(graph: Graph, node_weight_function: NodeWeightFunction, edge_weight_function: EdgeWeightFunction, round_limit=(- 1)):
re_assign_partition_indices(graph)
refiner = Refiner(graph, node_weight_function, edge_weight_function)
rounds = 0
num_moved = 1
total_moves = 0
while ((num_moved > 0) and ((round_limit < 0) or (round_limit < rounds))):
rounds += 1
num_moved = 0
for (stage_id, borders) in reversed(refiner.stage_borders.items()):
(outgoing_edges, outgoing_nodes, incoming_edges, incoming_nodes) = borders
invalid_local_nodes = set()
valid_local_noedes = set()
for e in sorted(outgoing_edges, key=(lambda x: (x[0].topo_sort_id, (- x[1].topo_sort_id))), reverse=True):
node = e[0]
dst_stage = e[1].stage_id
if (node not in valid_local_noedes):
if ((node not in valid_local_noedes) and (node in invalid_local_nodes)):
if refiner.is_fwd_move_valid_local(node):
valid_local_noedes.add(node)
else:
invalid_local_nodes.add(node)
continue
if (not refiner.is_fwd_move_valid_topo(node, dst_stage)):
continue
moved = refiner.update_on_move(nodes=[node], new_stage_id=dst_stage, escape_minima=False)
if moved:
num_moved += 1
num_moved_fwd = num_moved
for (stage_id, borders) in reversed(refiner.stage_borders.items()):
(outgoing_edges, outgoing_nodes, incoming_edges, incoming_nodes) = borders
for e in sorted(incoming_edges, key=(lambda x: (x[0].topo_sort_id, (- x[1].topo_sort_id))), reverse=False):
dst_stage = e[0].stage_id
node = e[1]
if (not refiner.is_bwd_move_valid_topo(node, dst_stage)):
continue
moved = refiner.update_on_move(nodes=[node], new_stage_id=dst_stage, escape_minima=False)
if moved:
num_moved += 1
num_moved_bwd = (num_moved - num_moved_fwd)
total_moves += num_moved
print(f'Round {rounds}: num_moved {num_moved}, (fwd {num_moved_fwd}, bwd {num_moved_bwd})')
pori = refiner.percents_of_relative_objective_improvement()
print(f'Refinement ended after {rounds} rounds and {total_moves} moves. Relative improvement: {pori:.2%}')
return (refiner.best_objective, pori)
|
class RatioBlockCreator():
def __init__(self, graph: Graph, edge_weight_function: EdgeWeightFunction, node_weight_function: NodeWeightFunction, uf: UnionFind):
self.graph = graph
self.ewf = edge_weight_function
self.nwf = node_weight_function
self.cwf = CoarsenedWeightFunction(edge_weight_function=edge_weight_function, node_weight_function=node_weight_function)
self.uf = uf
self.protected_edges: Set[(int, int)] = set()
self.protected_nodes: Set[int] = set()
def change_protected_before_merge(self, a: Node, b: Node):
changes_list_out = []
changes_list_in = []
for x in b.out_edges:
if ((b.id, x.id) in self.protected_edges):
changes_list_out.append((b.id, x.id))
for x in b.in_edges:
if ((x.id, b.id) in self.protected_edges):
changes_list_in.append((x.id, b.id))
for edge in changes_list_out:
if (edge in self.protected_edges):
self.protected_edges.remove(edge)
self.protected_edges.add((a.id, edge[1]))
for edge in changes_list_in:
if (edge in self.protected_edges):
self.protected_edges.remove(edge)
self.protected_edges.add((edge[0], a.id))
protected_changed = ((len(changes_list_in) > 0) or (len(changes_list_out) > 0))
return protected_changed
def apply(self, L, verbose=False):
uf = self.uf
node_to_cuts = self.sorted_block_to_cuts(forward=True, descending=False)
node_to_ok_fwd = {node.id: (not self.cwf.is_comm_bounded_forward(node)) for node in self.graph.non_input_nodes}
node_to_ok_bwd = {node.id: (not self.cwf.is_comm_bounded_backward(node)) for node in self.graph.non_input_nodes}
node_to_ok_both = {x: (a and b) for (x, a, b) in zip(node_to_ok_fwd.keys(), node_to_ok_fwd.values(), node_to_ok_bwd.values())}
n_merges = 0
n_iter = 0
failed: Set[int] = set()
failed_then_merged = set()
n_ok_fwd = sum((1 for x in node_to_ok_fwd.values() if x))
n_ok_bwd = sum((1 for x in node_to_ok_bwd.values() if x))
n_ok_both = sum((1 for x in node_to_ok_both.values() if x))
def print_state():
n_nodes = len(self.graph)
n_failed = len(failed)
d = dict(iter=n_iter, nodes=n_nodes, merges=n_merges, failed=n_failed, ok_fwd=n_ok_fwd, ok_bwd=n_ok_bwd, ok_both=n_ok_both, remaining=len(node_to_cuts))
print(d)
print_state()
print('Handling nodes without merges nodes')
for (n, is_ok) in node_to_ok_fwd.items():
if is_ok:
node_to_cuts.pop(n, default=None)
self.protected_nodes.add(n)
print_state()
while (node_to_cuts and (len(self.graph) > L)):
n_iter += 1
(node_id, node_fwd_cut) = node_to_cuts.popitem()
root_left = self.graph[node_id]
saved_state = self.graph.state()
(is_success, graph_changed, merges_left, protected_node) = self.search_left(root_left, node_fwd_cut, uf=uf)
if (not is_success):
if graph_changed:
self.graph.load_state(graph_state=saved_state)
failed.add(root_left.id)
print_state()
continue
self.protected_nodes.add(protected_node.id)
merges = merges_left
n_merges += len(merges)
for (i, (a, b)) in enumerate(merges_left):
node_to_cuts.pop(b, default=None)
if node_to_ok_fwd.pop(b, None):
n_ok_fwd -= 1
if node_to_ok_bwd.pop(b, None):
n_ok_bwd -= 1
if node_to_ok_both.pop(b, None):
n_ok_both -= 1
if (b in failed):
failed.remove(b)
failed_then_merged.add(b)
uf.union(a, b, smallest_new_root=False)
if (i == (len(merges_left) - 1)):
assert (a == protected_node.id)
node = self.graph[a]
was_ok_bwd_b4_merge = node_to_ok_bwd[a]
is_ok_bwd_after_merge = (not self.cwf.is_comm_bounded_backward(node))
node_to_ok_bwd[a] = is_ok_bwd_after_merge
node_to_ok_both[a] = is_ok_bwd_after_merge
if (is_ok_bwd_after_merge and (not was_ok_bwd_b4_merge)):
n_ok_bwd += 1
if is_ok_bwd_after_merge:
n_ok_both += 1
assert (not node_to_ok_fwd[a])
node_to_ok_fwd[a] = True
n_ok_fwd += 1
if verbose:
print_state()
def update_sorted_edges_on_merges(self, edges_to_value, merges: List[Tuple[(int, int)]], allow_poped_outside=True):
assert isinstance(edges_to_value, ValueSortedDict)
uf = self.uf
uf_bwd = UnionFind()
for edge in merges:
self.uf.union(*edge, smallest_new_root=False)
uf_bwd.add(edge[0])
uf_bwd.add(edge[1])
for (a, b) in merges:
a_old = uf_bwd[uf_bwd.find(a)]
b_old = uf_bwd[uf_bwd.find(b)]
uf_bwd.union(a, b)
edge_to_remove = (a_old, b_old)
try:
del edges_to_value[edge_to_remove]
except KeyError as e:
if (not allow_poped_outside):
raise e
a_new = uf[uf.find(a)]
b_new = uf[uf.find(a)]
cur_b_node = self.graph[b_new]
cur_a_node = self.graph[a_new]
for cur_x_node in cur_b_node.out_edges:
x_new = cur_x_node.id
x_old = (uf_bwd[uf_bwd.find(x_new)] if (x_new in uf_bwd) else x_new)
edge_to_remove = (b_old, x_old)
try:
del edges_to_value[edge_to_remove]
except KeyError as e:
if (not allow_poped_outside):
raise e
edge_to_remove = (a_old, x_old)
if (edge_to_remove in edges_to_value):
del edges_to_value[edge_to_remove]
edge_to_add = (a_new, x_new)
cur_x_node.update_compound_weights_from_uf(uf)
value_of_edge_to_add = self.ewf(cur_a_node, cur_x_node)
edges_to_value[edge_to_add] = value_of_edge_to_add
for cur_x_node in cur_b_node.in_edges:
x_new = cur_x_node.id
x_old = (uf_bwd[uf_bwd.find(x_new)] if (x_new in uf_bwd) else x_new)
edge_to_remove = (x_old, b_old)
try:
del edges_to_value[edge_to_remove]
except KeyError as e:
if (not allow_poped_outside):
raise e
edge_to_remove = (x_old, a_old)
if (edge_to_remove in edges_to_value):
del edges_to_value[edge_to_remove]
edge_to_add = (x_new, a_new)
cur_x_node.update_compound_weights_from_uf(uf)
value_of_edge_to_add = self.ewf(cur_x_node, cur_a_node)
edges_to_value[edge_to_add] = value_of_edge_to_add
def search_left(self, root_left: Node, comm_objective, uf) -> Tuple[(bool, bool, List[Tuple[(int, int)]], Union[(Node, None)])]:
partial_uf = UnionFind()
merges = []
graph_changed = False
assert (self.nwf.ratio == 1)
assert (self.ewf.ratio == 0)
current_comp = self.nwf(root_left)
if (current_comp >= comm_objective):
warnings.warn("early exit without merges shouldn't happen")
return (True, graph_changed, merges, root_left)
cur_merged = root_left
source = root_left
visited = {source}
queue = deque([(source, reversed(source.args))])
while queue:
(parent, children) = queue[0]
try:
child = next(children)
if (child not in visited):
visited.add(child)
if ((child.id in self.protected_nodes) or (child.type == NodeTypes.IN)):
continue
can_merge_without_cycles = (not check_cycle2(self.graph, child, cur_merged))
if can_merge_without_cycles:
queue.append((child, reversed(child.args)))
assert (child in cur_merged.args)
self.graph.merge(child.id, cur_merged.id, edge_weight_function=self.ewf, uf=uf, partial_uf=partial_uf)
self.graph.topo_sort(change_graph=False, resort_edges=False)
graph_changed = True
partial_uf.add(child.id)
partial_uf.add(cur_merged.id)
partial_uf.union(child.id, cur_merged.id)
merges.append((child.id, cur_merged.id))
cur_merged = child
current_comp = self.nwf(cur_merged)
comm_objective = sum((self.ewf(cur_merged, nn) for nn in cur_merged.out_edges))
if (current_comp >= comm_objective):
return (True, graph_changed, merges, cur_merged)
except StopIteration:
queue.popleft()
assert (current_comp < comm_objective)
merges = []
return (False, graph_changed, merges, None)
def sorted_graph_forward_edges(self, descending=False) -> Dict[(Tuple[(Node, Node)], float)]:
edges = list()
for node in self.graph.non_input_nodes:
edges.extend([(node, e) for e in node.out_edges])
if (not descending):
d = ValueSortedDict({(e[0].id, e[1].id): self.ewf(*e) for e in edges})
else:
d = ValueSortedDict((lambda x: (- x)), {(e[0].id, e[1].id): self.ewf(*e) for e in edges})
return d
def sorted_block_to_cuts(self, forward=True, descending=False) -> Dict[(Tuple[(Node, Node)], float)]:
if forward:
t = {node.id: sum((self.ewf(node, nn) for nn in node.out_edges)) for node in self.graph.non_input_nodes}
else:
t = {node.id: self.cwf.calculate_comm_backward([(nn, node) for nn in node.in_edges]) for node in self.graph.non_input_nodes}
if (not descending):
d = ValueSortedDict(t)
else:
d = ValueSortedDict((lambda x: (- x)), t)
return d
|
class C1(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(10, 10)
self.dropout = nn.Dropout(0.5, inplace=False)
self.layer2 = torch.nn.Linear(10, 10)
def forward(self, x):
x = self.layer1(x)
x = self.dropout(x)
x = self.layer2(x)
return x
|
class MyTestCase(unittest.TestCase):
def test_something(self):
depth = 1000
model_args = (torch.randn((1, 10)),)
model = nn.Sequential(C1(), C1(), C1())
graph: Graph = build_profiled_graph(model, model_args=model_args, n_iter=1, max_depth=depth)
args = SimpleNamespace(bwd_to_fwd_ratio=1, bw=12, weight_mult_factor=10000.0, auto_infer_node_bwd_to_fwd_ratio=False, penalize_non_tensors=False, edge_penalty=10000.0)
(node_weight_function, edge_weight_function) = get_weight_functions(args)
uf = UnionFind(elements=[n.id for n in graph.non_input_nodes])
basic_blocks = ()
args.special_blocks = ['C1']
special_blocks = choose_blocks(model, args, blocks_arg_name='special_blocks')
(prev_graph, matching, graph, uf, uf2, sb_names) = coarsen_prefixes(model, graph, node_weight_function, edge_weight_function, uf, basic_blocks=basic_blocks, special_blocks=special_blocks, depth=depth)
print(sb_names)
self.assertEqual((graph.num_nodes - graph.num_inputs), 3)
|
def calc_data_parall_comm_time_orig(num_machines, total_parameter_size, network_bandwidth):
return (((4 * (num_machines - 1)) * total_parameter_size) / (network_bandwidth * num_machines))
|
def calc_data_parall_comm_time_fixed(num_machines, total_parameter_size, network_bandwidth):
return (2 * (((4 * (num_machines - 1)) * total_parameter_size) / network_bandwidth))
|
def partition_pipedream(graph: Graph, num_gpus: int, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None, use_layers_graph: bool=True, num_machines_in_first_level=None, memory_size=10000000000.0, verbose=True, force_stright_pipeline=True, node_mem_estimator: NodeMemoryEstimator=NodeMemoryEstimator()):
print('PipeDream Partitioning')
num_machines = num_gpus
network_bandwidth = (edge_weight_function.bw * 1000000.0)
assert use_layers_graph
graph.topo_sort()
if use_layers_graph:
(work_graph, lookup) = graph.new_graph_without_constants()
else:
(work_graph, lookup) = (graph, None)
saved_work_graph = work_graph
work_graph = work_graph.get_copy_without_parallel_edges()
non_input_nodes = list(work_graph.non_input_nodes)
num_non_input_nodes = len(non_input_nodes)
A: List[List[Tuple[(Any, Any)]]] = []
for i in range(len(non_input_nodes)):
row_A = []
for j in range(num_machines):
row_A.append((None, None))
A.append(row_A)
print('-I- Initializing data-parallel stage T_i_j')
cum_sum = 0.0
cum_activation_size = 0.0
cum_parameter_size = 0.0
something_works = False
for (i, node) in enumerate(work_graph.non_input_nodes):
cum_sum += node_weight_function(node)
cum_activation_size += node_mem_estimator(node)
cum_parameter_size += node.num_parameters
max_m = (1 if force_stright_pipeline else num_machines)
for j in range(max_m):
stashed_data_size = cum_activation_size
if (stashed_data_size > memory_size):
A[i][j] = (None, None)
continue
data_parallel_communication_time = calc_data_parall_comm_time(num_machines=(j + 1), total_parameter_size=cum_parameter_size, network_bandwidth=network_bandwidth)
if ((num_machines_in_first_level is not None) and (j != (num_machines_in_first_level - 1))):
A[i][j] = (None, None)
else:
A[i][j] = ((max(cum_sum, data_parallel_communication_time) / (j + 1)), None)
something_works = True
print('-I- Done')
if (not something_works):
warnings.warn("can't run any node without extra memory reduction - need to combine with other memory reduction methods")
min_machines = (1 if (num_machines_in_first_level is None) else num_machines_in_first_level)
cum_times = []
cum_activation_sizes = []
cum_parameter_sizes = []
for (i, node) in enumerate(work_graph.non_input_nodes):
if (i == 0):
cum_times.append(node_weight_function(node))
cum_activation_sizes.append(node_mem_estimator(node))
cum_parameter_sizes.append(node.num_parameters)
else:
cum_times.append((cum_times[(- 1)] + node_weight_function(node)))
cum_activation_sizes.append((cum_activation_sizes[(- 1)] + node_mem_estimator(node)))
cum_parameter_sizes.append((cum_parameter_sizes[(- 1)] + node.num_parameters))
assert (edge_weight_function.ratio == 0)
print('starting the optimization vs pipeline')
for m in tqdm(range(min_machines, num_machines), desc='machines'):
for i in tqdm(range(1, len(non_input_nodes)), desc='i'):
node = non_input_nodes[i]
(min_pipeline_time, optimal_split) = A[i][m]
for j in range(i):
max_m = (2 if force_stright_pipeline else (m + 1))
for m_prime in range(1, max_m):
input_transfer_time = ((2.0 * sum((edge_weight_function(nn, node) for nn in node.in_edges))) / m_prime)
output_transfer_time = ((2.0 * sum((edge_weight_function(node, nn) for nn in node.out_edges))) / m_prime)
last_stage_time = (cum_times[i] - cum_times[j])
last_stage_parameter_size = (cum_parameter_sizes[i] - cum_parameter_sizes[j])
last_stage_activation = (cum_activation_sizes[i] - cum_activation_sizes[j])
stashed_data_size = last_stage_activation
if (stashed_data_size > memory_size):
continue
last_stage_time = max(last_stage_time, calc_data_parall_comm_time(num_machines=m_prime, total_parameter_size=last_stage_parameter_size, network_bandwidth=network_bandwidth))
last_stage_time /= m_prime
if (A[j][(m - m_prime)][0] is None):
continue
pipeline_time = max(A[j][(m - m_prime)][0], last_stage_time, input_transfer_time)
if (output_transfer_time is not None):
pipeline_time = max(pipeline_time, output_transfer_time)
if ((min_pipeline_time is None) or (min_pipeline_time > pipeline_time)):
optimal_split = (j, (m - m_prime))
min_pipeline_time = pipeline_time
A[i][m] = (min_pipeline_time, optimal_split)
metadata = A[(len(non_input_nodes) - 1)][(num_machines - 1)]
next_split = metadata[1]
remaining_machines_left = num_machines
splits = []
replication_factors = []
prev_split = len(non_input_nodes)
while (next_split is not None):
num_machines_used = ((remaining_machines_left - next_split[1]) - 1)
if verbose:
print(('Number of machines used: %d...' % num_machines_used))
print(('Split between layers %d and %d...' % (next_split[0], (next_split[0] + 1))))
splits.append((next_split[0] + 1))
compute_time = 0.0
parameter_size = 0.0
for i in range((next_split[0] + 1), prev_split):
node = non_input_nodes[i]
compute_time += node_weight_function(node)
parameter_size += node.num_parameters
dp_communication_time = calc_data_parall_comm_time(num_machines=num_machines_used, total_parameter_size=parameter_size, network_bandwidth=network_bandwidth)
node = non_input_nodes[next_split[0]]
pp_communication_time_input = (sum((edge_weight_function(nn, node) for nn in node.in_edges)) / num_machines_used)
node = non_input_nodes[(prev_split - 1)]
pp_communication_time_output = (sum((edge_weight_function(node, nn) for nn in node.out_edges)) / num_machines_used)
compute_time /= num_machines_used
dp_communication_time /= num_machines_used
if verbose:
print(('Compute time = %f, Data-parallel communication time = %f, Pipeline-parallel communication time = %f...' % (compute_time, dp_communication_time, max(pp_communication_time_input, pp_communication_time_output))))
prev_split = splits[(- 1)]
metadata = A[next_split[0]][next_split[1]]
next_split = metadata[1]
replication_factors.append(num_machines_used)
remaining_machines_left -= num_machines_used
if verbose:
print(('Number of machines used: %d...' % remaining_machines_left))
num_machines_used = remaining_machines_left
compute_time = 0.0
parameter_size = 0.0
for i in range(prev_split):
node = non_input_nodes[i]
compute_time += node_weight_function(non_input_nodes[i])
parameter_size += node.num_parameters
dp_communication_time = calc_data_parall_comm_time(num_machines=num_machines_used, total_parameter_size=parameter_size, network_bandwidth=network_bandwidth)
compute_time /= num_machines_used
dp_communication_time /= num_machines_used
if verbose:
print(('Compute time = %f, Data-parallel communication time = %f...' % (compute_time, dp_communication_time)))
print()
print('(Split start, split end) / time taken per stage / replication factor per stage:')
prev_split = 0
splits.reverse()
splits.append(len(non_input_nodes))
replication_factors.append(remaining_machines_left)
replication_factors.reverse()
for i in range(len(splits)):
time = 0.0
if verbose:
print((prev_split, splits[i]))
for j in range(prev_split, splits[i]):
time += node_weight_function(non_input_nodes[j])
if verbose:
print(time, replication_factors[i])
prev_split = splits[i]
total_time = 0.0
total_parameter_size = 0.0
for i in range(len(non_input_nodes)):
node = non_input_nodes[i]
total_time += node_weight_function(node)
total_parameter_size += node.num_parameters
data_parallel_communication_time = (calc_data_parall_comm_time(num_machines=num_machines, total_parameter_size=total_parameter_size, network_bandwidth=network_bandwidth) / num_machines)
data_parallel_total_time = ((total_time / num_machines) + data_parallel_communication_time)
pipeline_parallel_total_time = A[(len(non_input_nodes) - 1)][(num_machines - 1)][0]
if verbose:
print()
print('Time taken by single-stage pipeline:', total_time)
print('Time per stage in pipeline:', pipeline_parallel_total_time)
print('Throughput increase (compared to single machine):', (total_time / pipeline_parallel_total_time))
print('[Note that single-machine and %d-machine DP might not fit given memory constraints]')
print(('Throughput increase of %d-machine DP compared to single machine:' % num_machines), (total_time / data_parallel_total_time))
print(('Throughput increase (compared to %d-machine DP):' % num_machines), (data_parallel_total_time / pipeline_parallel_total_time))
print('Number of images that need to be admitted:', int((math.ceil((float(num_machines) / replication_factors[0])) * replication_factors[0])))
print('parameters', total_parameter_size)
print('splits', splits)
print('replication_factors', replication_factors)
print('data_parallel_communication_time', data_parallel_communication_time)
print(f'PipeDream returned {len(splits)} stages')
stage_id = 0
start = 0
for stop in splits:
for n in non_input_nodes[start:stop]:
n.stage_id = stage_id
start = stop
stage_id += 1
start = 0
params_per_stage = {i: 0 for i in range(len(splits))}
for n in non_input_nodes:
params_per_stage[n.stage_id] += n.num_parameters
print('params per stage', params_per_stage)
work_graph = post_process_partition(work_graph)
if use_layers_graph:
graph.induce_layer_partition(work_graph, lookup)
if (len(splits) != num_gpus):
graph.serialize('saved_pipedream_non_stright_pipeline_graph')
raise NotImplementedError('PipeDream returned non-straight pipeline')
return graph
|
def calc_data_parall_comm_time_orig(num_machines, total_parameter_size, network_bandwidth):
return (((4 * (num_machines - 1)) * total_parameter_size) / (network_bandwidth * num_machines))
|
def calc_data_parall_comm_time_fixed(num_machines, total_parameter_size, network_bandwidth):
return (2 * (((4 * (num_machines - 1)) * total_parameter_size) / network_bandwidth))
|
def partition_pipedream(graph: Graph, num_gpus: int, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None, use_layers_graph: bool=True, num_machines_in_first_level=None, verbose=True, force_stright_pipeline=True):
print('PipeDream Partitioning')
num_machines = num_gpus
network_bandwidth = (edge_weight_function.bw * 1000000.0)
assert use_layers_graph
graph.topo_sort()
if use_layers_graph:
(work_graph, lookup) = graph.new_graph_without_constants()
else:
(work_graph, lookup) = (graph, None)
non_input_nodes = list(work_graph.non_input_nodes)
num_non_input_nodes = len(non_input_nodes)
A: List[List[Tuple[(Any, Any)]]] = []
for i in range(len(non_input_nodes)):
row_A = []
for j in range(num_machines):
row_A.append((None, None))
A.append(row_A)
print('-I- Initializing data-parallel stage T_i_j')
cum_sum = 0.0
cum_parameter_size = 0.0
for (i, node) in enumerate(work_graph.non_input_nodes):
cum_sum += node_weight_function(node)
cum_parameter_size += node.num_parameters
max_m = (1 if force_stright_pipeline else num_machines)
for j in range(max_m):
data_parallel_communication_time = calc_data_parall_comm_time(num_machines=(j + 1), total_parameter_size=cum_parameter_size, network_bandwidth=network_bandwidth)
if ((num_machines_in_first_level is not None) and (j != (num_machines_in_first_level - 1))):
A[i][j] = (None, None)
else:
A[i][j] = (((cum_sum + (0.3 * data_parallel_communication_time)) / (j + 1)), None)
print('-I- Done')
min_machines = (1 if (num_machines_in_first_level is None) else num_machines_in_first_level)
cum_times = []
cum_activation_sizes = []
cum_parameter_sizes = []
for (i, node) in enumerate(work_graph.non_input_nodes):
if (i == 0):
cum_times.append(node_weight_function(node))
cum_parameter_sizes.append(node.num_parameters)
else:
cum_times.append((cum_times[(- 1)] + node_weight_function(node)))
cum_parameter_sizes.append((cum_parameter_sizes[(- 1)] + node.num_parameters))
assert (edge_weight_function.ratio == 0)
print('starting the optimization vs pipeline')
for m in tqdm(range(min_machines, num_machines), desc='machines'):
for i in tqdm(range(1, len(non_input_nodes)), desc='i'):
node = non_input_nodes[i]
(min_pipeline_time, optimal_split) = A[i][m]
for j in range(i):
max_m = (2 if force_stright_pipeline else (m + 1))
for m_prime in range(1, max_m):
input_transfer_time = ((2.0 * sum((edge_weight_function(nn, node) for nn in node.in_edges))) / m_prime)
output_transfer_time = ((2.0 * sum((edge_weight_function(node, nn) for nn in node.out_edges))) / m_prime)
last_stage_time = (cum_times[i] - cum_times[j])
last_stage_parameter_size = (cum_parameter_sizes[i] - cum_parameter_sizes[j])
last_stage_time += (0.3 * calc_data_parall_comm_time(num_machines=m_prime, total_parameter_size=last_stage_parameter_size, network_bandwidth=network_bandwidth))
last_stage_time /= m_prime
if (A[j][(m - m_prime)][0] is None):
continue
pipeline_time = max(A[j][(m - m_prime)][0], last_stage_time, input_transfer_time)
if (output_transfer_time is not None):
pipeline_time = max(pipeline_time, output_transfer_time)
if ((min_pipeline_time is None) or (min_pipeline_time > pipeline_time)):
optimal_split = (j, (m - m_prime))
min_pipeline_time = pipeline_time
A[i][m] = (min_pipeline_time, optimal_split)
metadata = A[(len(non_input_nodes) - 1)][(num_machines - 1)]
next_split = metadata[1]
remaining_machines_left = num_machines
splits = []
replication_factors = []
prev_split = len(non_input_nodes)
while (next_split is not None):
num_machines_used = ((remaining_machines_left - next_split[1]) - 1)
if verbose:
print(('Number of machines used: %d...' % num_machines_used))
print(('Split between layers %d and %d...' % (next_split[0], (next_split[0] + 1))))
splits.append((next_split[0] + 1))
compute_time = 0.0
parameter_size = 0.0
for i in range((next_split[0] + 1), prev_split):
node = non_input_nodes[i]
compute_time += node_weight_function(node)
parameter_size += node.num_parameters
dp_communication_time = calc_data_parall_comm_time(num_machines=num_machines_used, total_parameter_size=parameter_size, network_bandwidth=network_bandwidth)
node = non_input_nodes[next_split[0]]
pp_communication_time_input = (sum((edge_weight_function(nn, node) for nn in node.in_edges)) / num_machines_used)
node = non_input_nodes[(prev_split - 1)]
pp_communication_time_output = (sum((edge_weight_function(node, nn) for nn in node.out_edges)) / num_machines_used)
compute_time /= num_machines_used
dp_communication_time /= num_machines_used
if verbose:
print(('Compute time = %f, Data-parallel communication time = %f, Pipeline-parallel communication time = %f...' % (compute_time, dp_communication_time, max(pp_communication_time_input, pp_communication_time_output))))
prev_split = splits[(- 1)]
metadata = A[next_split[0]][next_split[1]]
next_split = metadata[1]
replication_factors.append(num_machines_used)
remaining_machines_left -= num_machines_used
if verbose:
print(('Number of machines used: %d...' % remaining_machines_left))
num_machines_used = remaining_machines_left
compute_time = 0.0
parameter_size = 0.0
for i in range(prev_split):
node = non_input_nodes[i]
compute_time += node_weight_function(non_input_nodes[i])
parameter_size += node.num_parameters
dp_communication_time = calc_data_parall_comm_time(num_machines=num_machines_used, total_parameter_size=parameter_size, network_bandwidth=network_bandwidth)
compute_time /= num_machines_used
dp_communication_time /= num_machines_used
if verbose:
print(('Compute time = %f, Data-parallel communication time = %f...' % (compute_time, dp_communication_time)))
print()
print('(Split start, split end) / time taken per stage / replication factor per stage:')
prev_split = 0
splits.reverse()
splits.append(len(non_input_nodes))
replication_factors.append(remaining_machines_left)
replication_factors.reverse()
for i in range(len(splits)):
time = 0.0
if verbose:
print((prev_split, splits[i]))
for j in range(prev_split, splits[i]):
time += node_weight_function(non_input_nodes[j])
if verbose:
print(time, replication_factors[i])
prev_split = splits[i]
total_time = 0.0
total_parameter_size = 0.0
for i in range(len(non_input_nodes)):
node = non_input_nodes[i]
total_time += node_weight_function(node)
total_parameter_size += node.num_parameters
data_parallel_communication_time = (calc_data_parall_comm_time(num_machines=num_machines, total_parameter_size=total_parameter_size, network_bandwidth=network_bandwidth) / num_machines)
data_parallel_total_time = ((total_time / num_machines) + data_parallel_communication_time)
pipeline_parallel_total_time = A[(len(non_input_nodes) - 1)][(num_machines - 1)][0]
if verbose:
print()
print('Time taken by single-stage pipeline:', total_time)
print('Time per stage in pipeline:', pipeline_parallel_total_time)
print('Throughput increase (compared to single machine):', (total_time / pipeline_parallel_total_time))
print('[Note that single-machine and %d-machine DP might not fit given memory constraints]')
print(('Throughput increase of %d-machine DP compared to single machine:' % num_machines), (total_time / data_parallel_total_time))
print(('Throughput increase (compared to %d-machine DP):' % num_machines), (data_parallel_total_time / pipeline_parallel_total_time))
print('Number of images that need to be admitted:', int((math.ceil((float(num_machines) / replication_factors[0])) * replication_factors[0])))
print('parameters', total_parameter_size)
print('splits', splits)
print('replication_factors', replication_factors)
print('data_parallel_communication_time', data_parallel_communication_time)
print(f'PipeDream returned {len(splits)} stages')
stage_id = 0
start = 0
for stop in splits:
for n in non_input_nodes[start:stop]:
n.stage_id = stage_id
start = stop
stage_id += 1
start = 0
params_per_stage = {i: 0 for i in range(len(splits))}
for n in non_input_nodes:
params_per_stage[n.stage_id] += n.num_parameters
print('params per stage', params_per_stage)
work_graph = post_process_partition(work_graph)
if use_layers_graph:
graph.induce_layer_partition(work_graph, lookup)
if (len(splits) != num_gpus):
graph.serialize('saved_pipedream_non_stright_pipeline_graph')
raise NotImplementedError('PipeDream returned non-straight pipeline')
return graph
|
def gen_stage_to_device_map(graph) -> List[int]:
'\n Args:\n graph:\n\n Returns:\n l[i] = k => stage i is on device k\n\n # HACKY lazy code copy pasta\n # TODO: can also calculate number of dummy stages per GPU.\n '
analysis_kwargs = {}
gpu_to_stages = defaultdict(set)
stage_to_gpu = dict()
for n in graph.non_input_nodes:
if ((n.gpu_id is None) or (n.type == NodeTypes.CONSTANT)):
continue
gpu_to_stages[n.gpu_id].add(n.stage_id)
if (n.stage_id in stage_to_gpu):
assert (stage_to_gpu[n.stage_id] == n.gpu_id), (stage_to_gpu[n.stage_id], n.gpu_id)
else:
assert (n.gpu_id is not None)
stage_to_gpu[n.stage_id] = n.gpu_id
if gpu_to_stages:
analysis_kwargs['stages_on_same_gpu'] = list(gpu_to_stages.values())
else:
raise RuntimeError('no GPUs.')
stage_to_gpu = [stage_to_gpu[i] for i in sorted(stage_to_gpu.keys())]
stages_on_same_gpu = analysis_kwargs['stages_on_same_gpu']
unique_stages_on_same_gpu = stages_on_same_gpu
stages_on_same_gpu = defaultdict(set)
for i in unique_stages_on_same_gpu:
for j in i:
stages_on_same_gpu[j] = i
for i in unique_stages_on_same_gpu:
assert (len(i) >= 1)
n_partitions = graph.num_partitions
pipeline_representation_stage_to_device_map = sorted_stage_to_device_map(n_partitions, stages_on_same_gpu)
return pipeline_representation_stage_to_device_map
|
def sorted_stage_to_device_map(n_partitions, stages_on_same_gpu):
pipeline_representation_stage_to_device_map = list()
for stage_id in range(n_partitions):
seen_devices = set()
if (stage_id in stages_on_same_gpu):
device_id = min(stages_on_same_gpu[stage_id])
else:
device_id = len(seen_devices)
seen_devices.add(device_id)
pipeline_representation_stage_to_device_map.append(device_id)
tmp = sorted(set(pipeline_representation_stage_to_device_map))
tmp = {v: i for (i, v) in enumerate(tmp)}
pipeline_representation_stage_to_device_map = [tmp[i] for i in pipeline_representation_stage_to_device_map]
return pipeline_representation_stage_to_device_map
|
def re_assign_partition_indices(graph: Graph):
out_edges = defaultdict(set)
for node in graph.non_input_nodes:
assert (node.stage_id is not None)
for o in node.out_edges:
assert (o.stage_id is not None)
out_edges[node.stage_id].add(o.stage_id)
for (i, e) in out_edges.items():
e.discard(i)
out_edges.default_factory = None
translation = {idx: i for (i, idx) in enumerate(topological_sort(out_edges))}
for node in graph.nodes:
try:
node.stage_id = translation[node.stage_id]
except KeyError:
assert (node in graph.inputs)
warnings.warn(f'putting {node.scope} with stage_id {node.stage_id} to stage {0}')
node.stage_id = 0
|
def topological_sort(out_edges: Dict[(int, Set[int])]) -> List[int]:
visited = {i: False for i in out_edges}
for v in list(out_edges.values()):
for x in v:
if (x not in visited):
print(f'stage {x} is probably a sink')
visited[x] = False
out_edges[x] = set()
stack = []
for i in out_edges.keys():
if (not visited[i]):
_topological_sort(out_edges, i, visited, stack)
return stack
|
def _topological_sort(out_edges: Dict[(int, Set[int])], v: int, visited: Dict[(int, bool)], stack: List[int]):
visited[v] = True
for i in out_edges[v]:
if (not visited[i]):
_topological_sort(out_edges, i, visited, stack)
stack.insert(0, v)
|
def has_stage_cycles(graph: Graph) -> bool:
for u in graph.non_input_nodes:
for v in u.out_edges:
if (v.stage_id < u.stage_id):
return True
return False
|
class NodeTypes(IntEnum):
'\n Enum representing the possible types of Nodes in the Graph\n '
IN = 1
BUFF_PARAM = 2
LAYER = 3
OP = 4
CONSTANT = 5
PRIMITIVE = 6
def __repr__(self):
return self.name
|
class Node():
def __init__(self, node_type: NodeTypes, idx, scope: str):
assert (scope is not None)
self.type = node_type
self.id = idx
self.scope = scope
self.scope_to_hold_to: Union[(str, None)] = None
self.topo_sort_id = idx
self.stage_id = 0
self.gpu_id = None
self.weight: Optional[ExecTimes] = None
self.max_memory_bytes: float = 0
self.num_parameters: Optional[int] = 0
self.compound_edge_weights = defaultdict(float)
self.out_edges: List[Node] = []
self.args: List[Node] = []
self.kwargs = defaultdict(list)
self.value_type = None
self.tensor_dtype = None
self.tensor_shape = None
self.req_grad = False
self.is_contiguous = None
self.constant_value = None
def is_graph_input(self):
return (self.type is NodeTypes.IN)
def maybe_create_compound_edge_weights(self, edge_weight_function):
if (not self.compound_edge_weights):
for nn in self.out_edges:
self.compound_edge_weights[nn.id] = edge_weight_function(self, nn)
return (True if self.out_edges else False)
return False
def update_compound_weights_from_uf(self, uf: UnionFind, allow_outside_uf=False):
items_to_handle = list(self.compound_edge_weights.items())
if allow_outside_uf:
items_to_handle = filter((lambda x: (x[0] in uf)), items_to_handle)
for (id, weight) in items_to_handle:
if (not uf.is_root(id)):
new_id = uf[uf.find(id)]
if (new_id != id):
del self.compound_edge_weights[id]
self.compound_edge_weights[new_id] = weight
def add_kwarg(self, kwarg, kwarg_node):
self.kwargs[kwarg_node].append(kwarg)
def add_arg(self, arg_node):
self.args.append(arg_node)
def add_out_edge(self, dest_node):
self.out_edges.append(dest_node)
def remove_output(self, out_node):
self.out_edges.remove(out_node)
def replace_out_edge_on_merge(self, dest_node, new_dest_node):
self.remove_output(dest_node)
self.out_edges.append(new_dest_node)
@property
def in_edges(self) -> List['Node']:
return list(chain(self.args, self.kwargs.keys()))
def replace_input(self, original, new):
must_be_in_kwargs = False
try:
self.args[self.args.index(original)] = new
except:
must_be_in_kwargs = True
if (original in self.kwargs):
self.kwargs[new] = self.kwargs.pop(original)
elif must_be_in_kwargs:
raise KeyError(f'''original is not in args and not in kwargs, original={original}
self={self}''')
def remove_input(self, original):
must_be_in_kwargs = False
try:
del self.args[self.args.index(original)]
except:
must_be_in_kwargs = True
if (original in self.kwargs):
self.kwargs.pop(original)
elif must_be_in_kwargs:
raise KeyError(f'original is not in args and not in kwargs, original={original}')
@classmethod
def from_other(cls, other: 'Node'):
node = cls(other.type, other.id, other.scope)
node.stage_id = other.stage_id
node.gpu_id = other.gpu_id
node.weight = other.weight
node.num_parameters = other.num_parameters
node.topo_sort_id = other.topo_sort_id
node.out_edges = list(other.out_edges)
node.args = list(other.args)
node.kwargs = dict(other.kwargs)
node.value_type = other.value_type
node.tensor_dtype = other.tensor_dtype
node.tensor_shape = other.tensor_shape
node.req_grad = other.req_grad
node.constant_value = other.constant_value
node.max_memory_bytes = other.max_memory_bytes
node.scope_to_hold_to = other.scope_to_hold_to
node.is_contiguous = other.is_contiguous
return node
def load_state(self, state: dict, out_edges, args, kwargs):
node = self
node.args = args
node.kwargs = kwargs
node.out_edges = out_edges
node.topo_sort_id = state['topo_sort_id']
node.stage_id = state['stage_id']
node.gpu_id = state['gpu_id']
node.weight = state['weight']
node.constant_value = state['constant_value']
node.value_type = state['value_type']
node.tensor_dtype = state['tensor_dtype']
node.tensor_shape = state['tensor_shape']
node.req_grad = state['req_grad']
node.compound_edge_weights = state['compound_edge_weights']
node.num_parameters = state['num_parameters']
node.max_memory_bytes = state['max_memory_bytes']
node.scope_to_hold_to = state['scope_to_hold_to']
node.is_contiguous = state['is_contiguous']
return node
def state_dict(self):
node = self
state = dict(id=node.id, scope=node.scope, type=node.type, topo_sort_id=node.topo_sort_id, stage_id=node.stage_id, gpu_id=node.gpu_id, weight=node.weight, num_parameters=node.num_parameters, out_edges=[n.id for n in node.out_edges], args=[n.id for n in node.args], kwargs={n.id: kw for (n, kw) in node.kwargs.items()}, value_type=node.value_type, constant_value=node.constant_value, tensor_dtype=node.tensor_dtype, tensor_shape=node.tensor_shape, req_grad=node.req_grad, compound_edge_weights=deepcopy(node.compound_edge_weights), max_memory_bytes=node.max_memory_bytes, scope_to_hold_to=node.scope_to_hold_to, is_contiguous=node.is_contiguous)
return state
def __repr__(self):
return str(f'id: {self.id}, scope:{self.scope}, type:{self.type}')
|
class Graph():
def __init__(self, nodes: Optional[GraphNodes], input_kw_ids: Optional[Dict[(int, str)]], output_ids: Optional[List[int]], depth: Optional[int], basic_blocks: Optional[Tuple[(Type[nn.Module], ...)]]):
self._nodes: GraphNodes = nodes
self.input_kw_ids = input_kw_ids
self.output_ids = output_ids
self.depth = depth
self.basic_blocks = basic_blocks
def merge(self, uid: int, vid: int, edge_weight_function: EdgeWeightFunction, dynamic_topo_sort=False, uf: Optional[UnionFind]=None, partial_uf: Optional[UnionFind]=None):
"merges u<-v\n # TODO: check if its legal merge. currently its user's responsibility.\n user has to check if does not exits a path from the set {n : u < n < v} to v.\n according to topo-sort.\n "
assert (uid != vid)
u = self._nodes[uid]
v = self._nodes[vid]
if (vid in self.output_ids):
warnings.warn(f'merging u<-v s.t v is an output node: v: {v} u:{u}')
self.output_ids[self.output_ids.index(vid)] = uid
u.remove_output(v)
v.remove_input(u)
self._update_compound_weights_on_merge(u, v, edge_weight_function, uf, partial_uf)
for a in v.in_edges:
a.maybe_create_compound_edge_weights(edge_weight_function=edge_weight_function)
weight = ExecTimes((u.weight.forward_time + v.weight.forward_time), (u.weight.backward_time + v.weight.backward_time))
u.weight = weight
for nn in v.out_edges:
u.out_edges.append(nn)
nn.replace_input(v, u)
nn.args = remove_dups(nn.args, nn)
for nn in v.args:
nn.remove_output(v)
nn.add_out_edge(u)
u.args.append(nn)
nn.out_edges = remove_dups(nn.out_edges, nn)
for (nn, nnv) in v.kwargs.values():
nn.remove_output(v)
nn.add_out_edge(u)
nn.out_edges = remove_dups(nn.out_edges, nn)
u.kwargs[nn] = nnv
u.args = remove_dups(u.args, u)
u.out_edges = remove_dups(u.out_edges, u)
del self._nodes[vid]
u.num_parameters += v.num_parameters
u.max_memory_bytes += v.max_memory_bytes
if (v.scope_to_hold_to is not None):
u.scope_to_hold_to = v.scope_to_hold_to
if dynamic_topo_sort:
raise NotImplementedError()
def _update_compound_weights_on_merge(self, u, v, edge_weight_function, uf, partial_uf: Optional[UnionFind]=None):
is_new_u_weights = u.maybe_create_compound_edge_weights(edge_weight_function)
is_new_v_weights = v.maybe_create_compound_edge_weights(edge_weight_function)
if (uf is None):
warnings.warn('merge without union find may miss compound edge weights')
else:
if (not is_new_v_weights):
v.update_compound_weights_from_uf(uf)
if partial_uf:
v.update_compound_weights_from_uf(partial_uf, allow_outside_uf=True)
if (not is_new_u_weights):
u.update_compound_weights_from_uf(uf)
if partial_uf:
u.update_compound_weights_from_uf(partial_uf, allow_outside_uf=True)
for (id, weight) in v.compound_edge_weights.items():
u.compound_edge_weights[id] += weight
for a in v.in_edges:
is_new_a_weights = a.maybe_create_compound_edge_weights(edge_weight_function)
if (not is_new_a_weights):
a.update_compound_weights_from_uf(uf)
if partial_uf:
a.update_compound_weights_from_uf(partial_uf, allow_outside_uf=True)
if (u in a.out_edges):
w = a.compound_edge_weights[u.id]
del a.compound_edge_weights[u.id]
a.compound_edge_weights[v.id] += w
def __len__(self) -> int:
return len(self._nodes)
@property
def n_stages(self) -> int:
return len({n.stage_id for n in self.nodes})
@property
def nodes(self) -> Iterable[Node]:
return self._nodes.values()
@property
def non_input_nodes(self) -> Iterable[Node]:
for n in self._nodes.values():
if (n.type is NodeTypes.IN):
continue
(yield n)
@property
def num_nodes(self) -> int:
return len(self._nodes)
@property
def inputs(self):
return (n for n in self.nodes if (n.type is NodeTypes.IN))
@property
def num_inputs(self) -> int:
return len(list(self.inputs))
@property
def num_partitions(self) -> int:
return len({n.stage_id for n in self.nodes})
@property
def unique_partitions_ids(self):
return {n.stage_id for n in self.nodes}
@property
def output_scopes(self):
return [n.scope for n in self.outputs]
@property
def outputs(self):
return [self._nodes[id] for id in self.output_ids]
@property
def model_name(self):
return self._nodes[self.output_ids[0]].scope.split('/', maxsplit=1)[0]
def asNetworkx(self, directed: bool=False, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None) -> nx.Graph:
'\n convert the graph into a weighted networkx graph.\n\n each node will have a scope,partition idx and weight associated with it.\n\n each weight will be weighted\n\n graph can be directed or undirected for a directed graph weighting functions can be given\n parallel edges will be discarded\n\n if not then weight will be set to 1.\n\n\n Parameters:\n ------------\n directed:\n whether to return a directed graph default is undirected\n node_weight_function:\n an optional weight function for the nodes should be a function from Node to int\n if not given a default weight of 1 will be given to all nodes\n edge_weight_function:\n an optional weight function for the edges should be a function (Node,Node) to int\n if not given a default value of 1 will be given to all edges\n '
try:
import networkx as nx
except ImportError as e:
print('networkx package not found')
raise e
if directed:
G = nx.DiGraph()
else:
G = nx.Graph()
for u in self.nodes:
dsts = set()
for v in u.out_edges:
if (v.id in dsts):
continue
dsts.add(v.id)
if (edge_weight_function is None):
w = 1
else:
w = int(max(1, edge_weight_function(u, v)))
G.add_edge(u.id, v.id, weight=w)
for n in self.nodes:
if (node_weight_function is None):
w = 1
else:
w = node_weight_function(n)
G.nodes[n.id]['weight'] = int(w)
return G
def build_dot(self, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None):
'\n return a graphviz representation of the graph\n Parameters\n ----------\n node_weight_function:\n optional function to get node weights\n edge_weight_function:\n optional function to get edge weights\n '
theme = {'background_color': '#FFFFFF', 'fill_color': '#E8E8E8', 'outline_color': '#000000', 'font_color': '#000000', 'font_name': 'Times', 'font_size': '10', 'margin': '0,0', 'padding': '1.0,0.5'}
from graphviz import Digraph
dot = Digraph()
dot.attr('graph', concentrate='true', bgcolor=theme['background_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'], margin=theme['margin'], rankdir='TB', pad=theme['padding'])
dot.attr('node', shape='box', style='filled', margin='0,0', fillcolor=theme['fill_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
dot.attr('edge', style='solid', color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
colors = {0: 'grey', 1: 'green', 2: 'red', 3: 'yellow', 4: 'orange', 5: 'brown', 6: 'purple', 7: 'pink', 8: 'cyan', 9: 'gold', 10: 'darkolivegreen', 11: 'seagreen', 12: 'thistle', 13: 'plum', 14: 'deeppink', 15: 'lightyellow', 16: 'tan'}
for node in self.nodes:
node_id = node.id
scope = node.scope
value_type = node.value_type
node_label = f'''{scope}
idx: {node_id}
value type: {value_type}'''
if (node in self.outputs):
node_label += '\nmodel output'
if (node.type is NodeTypes.IN):
node_label += '\nmodel input'
if (node.id in self.input_kw_ids):
node_label += f'''
kwarg: {self.input_kw_ids[node.id]}'''
if (node.type is NodeTypes.CONSTANT):
node_label += f'''
value: {node.constant_value}'''
if issubclass(node.value_type, Tensor):
node_label += f'''
tensor of type: {node.tensor_dtype}
shape: {node.tensor_shape}'''
if node.weight:
node_label = f'''{node_label}
Profile:'''
for (k, v) in node.weight._asdict().items():
node_label += f'''
{k}:{v}'''
if ('time' in k):
node_label += ' ms'
elif (('memory' in k) or ('size' in k)):
node_label += ' MB'
if node_weight_function:
node_label += f'''
weight: {node_weight_function(node)}'''
dot.node(str(node_id), label=node_label, fillcolor=colors[node.stage_id])
(args, kwargs) = (node.args, node.kwargs)
for (idx, i) in enumerate(args):
edge_label = f'arg: {idx}'
if edge_weight_function:
edge_label += f'''
weight: {edge_weight_function(i, node)}'''
dot.edge(str(i.id), str(node_id), label=edge_label)
for (i, kws) in kwargs.items():
for kw in kws:
edge_label = f'kwarg: {kw}'
if edge_weight_function:
edge_label += f'''
weight: {edge_weight_function(i, node)}'''
dot.edge(str(i.id), str(node_id), label=edge_label)
return dot
def display(self, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None):
'\n display the graph in Jupyter\n\n Parameters\n ----------\n edge_weight_function:\n optional edge weight function\n node_weight_function:\n optional node weight function\n '
try:
from IPython.core.display import display_svg
display_svg(self.build_dot(node_weight_function=node_weight_function, edge_weight_function=edge_weight_function), raw=False)
except ImportError:
print('only works in ipython notebooks')
def save_as_pdf(self, file_name: str, directory: str, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None):
'\n save the rendered graph to a pdf file\n\n Parameters\n ----------\n file_name:\n the name of the saved file\n directory:\n directory to store the file in\n '
dot = self.build_dot(edge_weight_function=edge_weight_function, node_weight_function=node_weight_function)
dot.format = 'pdf'
import os
if os.path.exists(f'{directory}/{file_name}.pdf'):
os.remove(f'{directory}/{file_name}.pdf')
dot.render(file_name, directory=directory, cleanup=True)
return self
def serialize(self, path: str):
'\n serializes the graph to the given path\n can later be restored using Graph.deserialize(path)\n\n Parameters:\n -----------\n path:\n the path to store the graph object file will be called path.graph\n '
with open(path, 'wb') as f:
pickle.dump(self.state(), f)
def state(self):
'\n returns a dictionary containing the graphs state\n '
node_states = dict()
for node in self.nodes:
state = node.state_dict()
node_states[node.id] = state
return {'node_data': node_states, 'input_kw_ids': self.input_kw_ids, 'output_ids': self.output_ids, 'depth': self.depth, 'basic_blocks': self.basic_blocks}
def load_state(self, graph_state):
output_ids = graph_state['output_ids']
depth = graph_state['depth']
basic_blocks = graph_state['basic_blocks']
input_kw_ids = graph_state['input_kw_ids']
node_states = graph_state['node_data']
nodes = dict()
for state in sorted(node_states.values(), key=(lambda s: s['id'])):
node = Node(state['type'], state['id'], state['scope'])
nodes[node.id] = node
for state in sorted(node_states.values(), key=(lambda s: s['id'])):
node = nodes[state['id']]
args = [nodes[n] for n in state['args']]
kwargs = {nodes[n]: kw for (n, kw) in state['kwargs'].items()}
out_edges = sorted({nodes[n] for n in state['out_edges']}, key=(lambda x: x.id))
node.load_state(state, out_edges, args, kwargs)
self._nodes = nodes
self.basic_blocks = basic_blocks
self.depth = depth
self.output_ids = output_ids
self.input_kw_ids = input_kw_ids
return self
@classmethod
def deserialize(cls, path: str) -> 'Graph':
'\n deserializes the graph from the path returning a Graph object\n\n Parameters:\n -------------\n path:\n the path to where the graph is stored\n '
with open(path, 'rb') as f:
graph_data = pickle.load(f)
return cls(None, None, None, None, None).load_state(graph_data)
@classmethod
def from_state(cls, state) -> 'Graph':
return cls(None, None, None, None, None).load_state(state)
@classmethod
def from_other(cls, graph: 'Graph') -> 'Graph':
return cls(None, None, None, None, None).load_state(graph.state())
def new_graph_without_constants(self) -> Tuple[('Graph', Dict[(int, int)])]:
"\n creates a graph g with nodes of type CONSTANT \n or nodes who solely depend on constants are removed\n leaving only inputs layers and params/buffers\n\n returns the created graph and a map between g's indices and self indices\n "
new_nodes = dict()
output_ids = []
new_graph = Graph.from_other(self)
num_removed = 0
lookup = dict()
for node in new_graph._nodes.values():
is_constant = (node.type is NodeTypes.CONSTANT)
op_without_inputs = ((node.type in [NodeTypes.PRIMITIVE, NodeTypes.OP]) and (len(node.in_edges) == 0))
input_or_buff_param_with_one_use_at_end = ((node.type in [NodeTypes.IN, NodeTypes.BUFF_PARAM]) and (len(node.out_edges) == 1))
if input_or_buff_param_with_one_use_at_end:
input_or_buff_param_with_one_use_at_end &= ((list(node.out_edges)[0].id - node.id) >= (len(self) / 2))
if (is_constant or op_without_inputs or input_or_buff_param_with_one_use_at_end):
if (input_or_buff_param_with_one_use_at_end and (not (is_constant or op_without_inputs))):
warnings.warn(f'HACK: input_or_buff_param_with_one_use_at_end=True, for {node.scope},list(node.out_edges)[0].id={list(node.out_edges)[0].id} node.id={node.id}, len(self)={len(self)}')
for o in node.out_edges:
o.kwargs.pop(node, None)
o.args = [n for n in o.args if (n is not node)]
num_removed += 1
else:
old_id = node.id
new_id = (old_id - num_removed)
if (node.id in new_graph.output_ids):
output_ids.append(new_id)
node.id = new_id
new_nodes[new_id] = node
lookup[new_id] = old_id
new_graph._nodes = new_nodes
new_graph.output_ids = output_ids
return (new_graph, lookup)
def induce_layer_partition(self, layers_graph, layers_to_original: Dict[(int, int)]) -> 'Graph':
assert (len(self) >= len(layers_graph))
old_to_new = {v: k for (k, v) in layers_to_original.items()}
for node in sorted(self.nodes, key=(lambda n: n.id), reverse=True):
if (node.id in old_to_new):
take_from = layers_graph[old_to_new[node.id]]
node.stage_id = take_from.stage_id
node.gpu_id = take_from.gpu_id
else:
first = sorted(node.out_edges, key=(lambda n: n.stage_id))[0]
node.stage_id = first.stage_id
node.gpu_id = first.gpu_id
assert (node.stage_id >= 0)
return self
def __getitem__(self, idx):
return self._nodes[idx]
def __contains__(self, node_id):
return (node_id in self._nodes)
def selfcheck(self):
visited = set()
try:
for n in self.nodes:
for u in n.in_edges:
assert (u.id < n.id)
assert (n in u.out_edges), (n.scope, u.scope)
visited.add(u)
assert (n not in n.in_edges)
for o in n.out_edges:
assert (o.id > n.id)
assert (n in o.in_edges), (n.scope, o.scope)
visited.add(o)
visited.add(n)
assert (n not in n.out_edges)
assert (len(visited) == self.num_nodes)
except AssertionError as e:
self.save_as_pdf('selfcheck_error', '.')
raise e
return self
def split_to_stages(self) -> Dict[(int, 'Graph')]:
'return a sub graph for each stage in the graph\n\n Returns:\n Dict[int,Graph] \n '
stages = dict()
tmp = Graph(None, None, None, None, None).load_state(self.state())
groups = defaultdict(list)
for n in tmp.nodes:
if (n.type != NodeTypes.IN):
groups[n.stage_id].append(n)
for (stage_id, group) in groups.items():
stage_nodes = dict()
stage_inputs = dict()
stage_output_ids = []
stage_input_kws = dict()
for n in sorted(group, key=(lambda w: w.id)):
stage_nodes[n.id] = n
if ((n.id in self.output_ids) or any(((o.stage_id != stage_id) for o in n.out_edges))):
stage_output_ids.append(n.id)
n.out_edges = [o for o in n.out_edges if (o.stage_id == stage_id)]
to_replace = dict()
for u in n.in_edges:
if ((u.stage_id != stage_id) or (u.type is NodeTypes.IN)):
if (u.id in stage_inputs):
stage_input = stage_inputs[u.id]
else:
stage_input = Node.from_other(u)
stage_input.type = NodeTypes.IN
stage_input.args = []
stage_input.kwargs = dict()
stage_input.stage_id = stage_id
stage_input.out_edges = [o for o in u.out_edges if (o.stage_id == stage_id)]
stage_inputs[u.id] = stage_input
stage_nodes[u.id] = stage_input
to_replace[u] = stage_input
if (u.id in self.input_kw_ids):
stage_input_kws[u.id] = self.input_kw_ids[u.id]
for (old, new) in to_replace.items():
n.replace_input(old, new)
new.add_out_edge(n)
stages[stage_id] = Graph(stage_nodes, stage_input_kws, stage_output_ids, self.depth, self.basic_blocks)
return stages
def get_copy_without_parallel_edges(self) -> 'Graph':
'the control flow graph can contain parallel in/out edges\n those edges are important for control flow but are detrimental for partitioning\n this function creates a new Graph without parallel edges'
copy = Graph(None, None, None, None, None).load_state(self.state())
for n in copy.nodes:
n.out_edges = set(n.out_edges)
in_edges = n.in_edges
n.args = set(in_edges)
n.kwargs.clear()
if (n in n.out_edges):
warnings.warn(f'Node with self loop {n}')
n.out_edges.remove(n)
if (n in n.args):
warnings.warn(f'Node with self loop {n}')
n.args.remove(n)
n.args = sorted(n.args, key=(lambda x: x.id))
n.out_edges = sorted(n.out_edges, key=(lambda x: x.id))
return copy
def topo_sort(self, verbose=False, change_graph=True, resort_edges=False):
def print_if_verbose(*args, **kw):
if verbose:
print(*args, **kw)
G = nx.DiGraph()
for u in self.nodes:
dsts = set()
for v in u.out_edges:
if (v.id in dsts):
continue
dsts.add(v.id)
G.add_edge(u.id, v.id)
nx_graph = self.asNetworkx(directed=True)
def key(node):
return None
topo_sorted = nx.dag.lexicographical_topological_sort(nx_graph, key=key)
topo_sorted = list(topo_sorted)
for (topo_sort_id, node_id) in enumerate(topo_sorted):
self[node_id].topo_sort_id = topo_sort_id
if (not change_graph):
return
if ((ids_sort := sorted(topo_sorted)) != topo_sorted):
print('-W- topo_sort: node_ids are not topo sorted!')
print_if_verbose('topo_sorted:', topo_sorted)
print_if_verbose('node_ids', ids_sort)
replace_ids_with_topo = True
else:
print('-I- topo_sort: node_ids are topo sorted')
replace_ids_with_topo = False
if replace_ids_with_topo:
print('-I- replacing node_ids by topo_sort_ids for graph')
for node in self.nodes:
node.id = node.topo_sort_id
input_kw_ids = {}
for (i, v) in self.input_kw_ids.items():
topo_sort_id = self[i].topo_sort_id
if (topo_sort_id != i):
print_if_verbose(f'-I- topo_sort: changed id to input {v}')
input_kw_ids[topo_sort_id] = v
self.input_kw_ids = input_kw_ids
output_ids = []
for i in self.output_ids:
topo_sort_id = self[i].topo_sort_id
if (topo_sort_id != i):
print_if_verbose(f'-I- topo_sort: changed id to output {i}')
output_ids.append(topo_sort_id)
self.output_ids = output_ids
_nodes = {n.topo_sort_id: n for n in self.nodes}
self._nodes = _nodes
def forward_dfs_and_check_if_in_set(self, source: Node, set_to_check: Set[Node], depth_limit: Optional[int]=None):
if (depth_limit is None):
depth_limit = len(self)
nodes = [source]
visited = set()
for start in nodes:
if (start in visited):
continue
if (start in set_to_check):
return True
visited.add(start)
stack = [(start, depth_limit, iter(start.out_edges))]
while stack:
(parent, depth_now, children) = stack[(- 1)]
try:
child = next(children)
if (child in visited):
continue
if (child in set_to_check):
return True
visited.add(child)
if (depth_now > 1):
stack.append((child, (depth_now - 1), iter(child.out_edges)))
except StopIteration:
stack.pop()
return False
def calculate_params_per_node(self, model: Module):
layers = layerDict(model, self.depth, self.basic_blocks)
tensors = tensorDict(model)
for n in self.nodes:
if (n.scope in layers):
x = sum((t.numel() for t in layers[n.scope].parameters()))
elif ((n.value_type is Parameter) and (n.scope in tensors)):
x = tensors[n.scope].numel()
else:
x = 0
n.num_parameters = x
|
def remove_dups(lnodes: List[Node], myself):
s = set(lnodes)
if (myself in s):
s.remove(myself)
return sorted(s, key=(lambda x: x.topo_sort_id))
|
class PreHook(abc.ABC):
'\n pre hook will be called before the node executes and should have the following signature\n\n def hook (node: Node, function: Callable, args: tuple, kwargs: dict) -> Tuple[Optional[Tuple], Optional[Dict]]:\n\n the hook can modify the args/kwargs or return None\n '
@abc.abstractmethod
def __call__(self, node: Node, function: Callable, args: tuple, kwargs: dict) -> Tuple[(Optional[Tuple], Optional[Dict])]:
pass
|
class PostHook(abc.ABC):
'\n posthook will be called after the node executes and should have the following signature\n\n def hook (node: Node, function: Callable, args: tuple, kwargs: dict,outputs) ->Optional:\n\n the hook can modify the output or return None\n '
@abc.abstractmethod
def __call__(self, node: Node, function: Callable, args: tuple, kwargs: Dict, outputs: Any) -> Optional:
pass
|
def pre_hook_factory(fn) -> PreHook:
class FunctionalPreHook(PreHook):
def __call__(self, *args, **kwargs):
return fn(*args, **kwargs)
return FunctionalPreHook()
|
def post_hook_factory(fn) -> PostHook:
class FunctionalPostHook(PostHook):
def __call__(self, *args, **kwargs):
return fn(*args, **kwargs)
return FunctionalPostHook()
|
def execute_graph(model: nn.Module, graph: Graph, model_args=(), model_kwargs=None, pre_hook: Optional[PreHook]=None, post_hook: Optional[PostHook]=None, enforce_out_of_place=True):
if (model_kwargs is None):
model_kwargs = dict()
if (not isinstance(model_args, tuple)):
model_args = (model_args,)
if (pre_hook is None):
pre_hook = IdentityPreHook()
if (post_hook is None):
post_hook = IdentityPostHook()
pre_hook = apply_pre_hook(pre_hook)
post_hook = apply_post_hook(post_hook)
nodes: List[Node] = sorted(graph.nodes, key=(lambda n: n.id))
uses = {n: len(n.out_edges) for n in nodes}
for n in graph.outputs:
uses[n] += 1
ready_expressions = dict(zip(nodes, model_args))
for node in graph.inputs:
if (node.id in graph.input_kw_ids):
ready_expressions[node] = model_kwargs[graph.input_kw_ids[node.id]]
del model_args
del model_kwargs
tensors = tensorDict(model)
ready_expressions.update({n: tensors[n.scope] for n in nodes if (n.type is NodeTypes.BUFF_PARAM)})
del tensors
layers = layerDict(model, graph.depth, graph.basic_blocks)
namespaces = used_namespaces()
for node in nodes:
if (node in ready_expressions):
continue
if (node.type is NodeTypes.CONSTANT):
v = node.constant_value
ready_expressions[node] = v
continue
(args, kwargs) = fetch_args_kwargs(node, ready_expressions)
if (node.type is NodeTypes.LAYER):
l = layers[node.scope]
with (force_out_of_place(l) if enforce_out_of_place else nullcontext()):
(args, kwargs) = pre_hook(node, l, args, kwargs)
outputs = l(*args, **kwargs)
outputs = post_hook(node, l, args, kwargs, outputs)
ready_expressions[node] = outputs
elif (node.type is NodeTypes.PRIMITIVE):
ready_expressions[node] = create_container_construct(node, args, kwargs)
else:
assert (node.type is NodeTypes.OP)
outputs = call_function(namespaces, node, args, kwargs, pre_hook, post_hook, enforce_out_of_place=enforce_out_of_place)
ready_expressions[node] = outputs
del args
del kwargs
for n in node.in_edges:
uses[n] -= 1
if (uses[n] == 0):
ready_expressions.pop(n)
if (uses[node] == 0):
ready_expressions.pop(node)
return [ready_expressions[n] for n in graph.outputs]
|
def create_container_construct(node, args, kwargs):
if ('prim::DictConstruct' in node.scope):
return kwargs
elif ('prim::SetConstruct' in node.scope):
return set(args)
elif ('prim::ListConstruct' in node.scope):
return list(args)
elif ('prim::TupleConstruct' in node.scope):
return tuple(args)
else:
assert ('prim::SliceConstruct' in node.scope)
return slice(*args)
|
def call_function(namespaces, node, args, kwargs, pre_hook, post_hook, enforce_out_of_place=True):
(op_path, idx) = node.scope.rsplit('/', maxsplit=1)[1].rsplit('_', maxsplit=1)
(namespace, func_name) = op_path.split('::')
forced_out_of_place = (enforce_out_of_place and (node.type is NodeTypes.OP))
if forced_out_of_place:
inplace_torch_function = (('torch' in namespace) and (func_name[(- 1)] == '_'))
inplace_tensor_function = ((namespace == 'Tensor') and (func_name[(- 1)] == '_') and (not func_name.startswith('__')))
inplace_tensor_magic = ((namespace == 'Tensor') and (func_name in inplace_arithmetic_ops))
if (inplace_tensor_magic or inplace_tensor_function or inplace_torch_function):
debug_str = f'converted {namespace}.{func_name} '
if inplace_tensor_magic:
out_of_place = ('__' + func_name[3:])
else:
out_of_place = func_name[:(- 1)]
if (((namespace == 'Tensor') and hasattr(Tensor, out_of_place)) or ((namespace != 'Tensor') and hasattr(import_module(namespace), out_of_place))):
func_name = out_of_place
debug_str += f'to {namespace}.{func_name}'
if (namespace in namespaces):
function = getattr(import_module(namespace), func_name)
elif ('__' not in func_name):
function = getattr(type(args[0]), func_name)
elif (func_name == '__getattribute__'):
return getattr(args[0], args[1])
else:
assert (len(kwargs) == 0), 'no kwarg in magic method'
if hasattr(operator, func_name):
function = getattr(operator, func_name)
else:
function = getattr(type(args[0]), func_name)
if forced_out_of_place:
original_scope = node.scope
node.scope = (node.scope.rsplit('/', maxsplit=1)[0] + f'/{namespace}::{func_name}_{idx}')
(args, kwargs) = pre_hook(node, function, args, kwargs)
output = function(*args, **kwargs)
output = post_hook(node, function, args, kwargs, output)
if forced_out_of_place:
node.scope = original_scope
return output
|
def fetch_args_kwargs(node, ready_expressions):
args = [ready_expressions[n] for n in node.args]
kwargs = dict()
for (n, kws) in node.kwargs.items():
for k in kws:
kwargs[k] = ready_expressions[n]
return (args, kwargs)
|
def apply_pre_hook(pre_hook):
@wraps(pre_hook)
def wrapper(node: Node, function: Callable, args: tuple, kwargs: dict):
(modified_args, modified_kwargs) = pre_hook(node, function, args, kwargs)
if (not (modified_args is None)):
args = modified_args
if (not (modified_kwargs is None)):
kwargs = modified_kwargs
return (args, kwargs)
return wrapper
|
def apply_post_hook(post_hook):
@wraps(post_hook)
def wrapper(node: Node, function: Callable, args: tuple, kwargs: dict, outputs):
modified_outputs = post_hook(node, function, args, kwargs, outputs)
if (not (modified_outputs is None)):
outputs = modified_outputs
return outputs
return wrapper
|
class IdentityPreHook(PreHook):
def __call__(self, node: Node, function: Callable, args: tuple, kwargs: dict) -> Tuple[(Optional[Tuple], Optional[Dict])]:
return (args, kwargs)
|
class IdentityPostHook(PostHook):
def __call__(self, node: Node, function: Callable, args: tuple, kwargs: Dict, outputs: Any) -> Optional:
return outputs
|
def infer_is_contiguous(graph: Graph, model: torch.nn.Module, args=None, kwargs=None):
if (args is None):
args = ()
if (kwargs is None):
kwargs = dict()
with torch.no_grad():
visitor = Visitor()
execute_graph(model, graph, model_args=args, model_kwargs=kwargs, pre_hook=pre_hook_factory(visitor.prehook), post_hook=post_hook_factory(visitor.posthook))
|
class Visitor():
def prehook(self, node: Node, function: Callable, args: tuple, kwargs: Dict):
for (n, a) in zip(node.args, args):
n.is_contiguous = (n.is_contiguous or Visitor.is_contiguous(a))
for (n, kws) in node.kwargs.items():
v = kwargs[kws[0]]
n.is_contiguous = (n.is_contiguous or Visitor.is_contiguous(v))
return (detach_tensors(args), detach_tensors(kwargs))
def posthook(self, node: Node, function: Callable, args: tuple, kwargs: Dict, outputs: Any):
node.is_contiguous = Visitor.is_contiguous(outputs)
return detach_tensors(outputs)
@staticmethod
def is_contiguous(ts):
def f(t):
if isinstance(t, torch.Tensor):
return t.is_contiguous()
return False
return nested_map(f, ts)
|
def infer_req_grad(graph: Graph, model: torch.nn.Module, args=None, kwargs=None):
if (args is None):
args = ()
if (kwargs is None):
kwargs = dict()
with torch.enable_grad():
visitor = Visitor()
execute_graph(model, graph, model_args=args, model_kwargs=kwargs, pre_hook=pre_hook_factory(visitor.prehook), post_hook=post_hook_factory(visitor.posthook))
|
class Visitor():
def prehook(self, node: Node, function: Callable, args: tuple, kwargs: Dict):
for (n, a) in zip(node.args, args):
n.req_grad = (n.req_grad or Visitor.req_grad(a))
for (n, kws) in node.kwargs.items():
v = kwargs[kws[0]]
n.req_grad = (n.req_grad or Visitor.req_grad(v))
return (detach_tensors(args), detach_tensors(kwargs))
def posthook(self, node: Node, function: Callable, args: tuple, kwargs: Dict, outputs: Any):
node.req_grad = Visitor.req_grad(outputs)
return detach_tensors(outputs)
@staticmethod
def req_grad(ts):
def f(t):
if isinstance(t, torch.Tensor):
return t.requires_grad
return False
return nested_map(f, ts)
|
def profile_network(net: nn.Module, sample_batch: tuple=(), kwargs: Optional[Dict]=None, basic_blocks: Optional[List[nn.Module]]=None, max_depth=100, n_iter=10, save_memory_mode=False, recomputation=False, force_no_recomp_scopes=None) -> Dict[(str, ExecTimes)]:
"\n profiles a network's computation time(forward/backward)\n returns a dictionary from layer_scope to ExecTimes\n\n Parameters\n ----------\n net:\n the network we wish to profile a nn.Module\n\n sample_batch:\n a sample batch that will be used to measure execution time of network\n can be single/multiple inputs\n\n kwargs:\n keyword args to pass to the profiled model\n\n basic_blocks:\n a tuple of nn.Module classes that the profiler will regard as a cohesive unit\n for eg. if basic_blocks = nn.Sequential then the profiler will break it down to its components\n\n max_depth:\n determines how far the profiler will go in the model tree\n\n n_iter:\n number of iteration to use for profiling\n the profiling will be averaged across all iterations, after throwing several outliers\n\n "
if (kwargs is None):
kwargs = {}
if (basic_blocks is None):
basic_blocks = ()
if (not isinstance(sample_batch, tuple)):
sample_batch = (sample_batch,)
if (force_no_recomp_scopes is None):
def f(s):
return False
else:
f = force_no_recomp_scopes
layers_dict = _wrap_profiled_layers(net, max_depth, basic_blocks, save_memory_mode=save_memory_mode, recomputation=recomputation, force_no_recomp_scopes=f)
for _ in range((n_iter + 1)):
_perform_forward_backward_pass(net, *sample_batch, save_memory_mode=save_memory_mode, **kwargs)
backward_times = [layer.avg_time(forward=False) for layer in layers_dict.values()]
forward_times = [layer.avg_time(forward=True) for layer in layers_dict.values()]
layers_profile = {name: ExecTimes(forward, backward) for (name, forward, backward) in zip(layers_dict.keys(), forward_times, backward_times)}
_unwrap_layers(net)
return layers_profile
|
def _perform_forward_backward_pass(net, *sample_batch: tuple, save_memory_mode=False, **kwargs: Dict):
if save_memory_mode:
device = torch.device('cuda')
else:
device = get_device((sample_batch, kwargs))
if (device.type == 'cuda'):
torch.cuda.synchronize(device=device)
out = net(*sample_batch, **kwargs)
torch.cuda.synchronize(device=device)
else:
out = net(*sample_batch, **kwargs)
for p in net.parameters():
assert (p.grad is None)
return out
|
def _wrap_profiled_layers(module: nn.Module, depth, basic_blocks: List[nn.Module], save_memory_mode=False, recomputation=False, force_no_recomp_scopes=(lambda s: False)):
layers_dict = {}
for (sub_layer, scope, parent) in traverse_model(module, depth, basic_blocks=basic_blocks):
name = scope[(scope.rfind('[') + 1):(- 1)]
scope_specific_recomp = recomputation
if force_no_recomp_scopes(scope):
scope_specific_recomp = False
wrapper = Wrapper(sub_layer, scope, save_memory_mode=save_memory_mode, recomputation=scope_specific_recomp)
parent.add_module(name, wrapper)
layers_dict[scope] = wrapper
return layers_dict
|
def _unwrap_layers(module: nn.Module):
for (name, sub_module) in module.named_children():
if isinstance(sub_module, Wrapper):
sub_module.on_unwrap()
module.add_module(name, sub_module.layer)
else:
_unwrap_layers(sub_module)
|
class Wrapper(nn.Module):
'\n A module whose purpose is to profile a given layer\n when the wrapper performs forward propagation it records the following metrics:\n forward_time: the execution time of a forward pass of the underlying layer in milliseconds\n backward_time: the execution time of a backward pass of the underlying layer in milliseconds\n with slight changes when recomputation is set to True.\n '
def __init__(self, sub_module: nn.Module, scope: str, save_memory_mode=False, recomputation=False):
super(Wrapper, self).__init__()
assert isinstance(recomputation, bool)
self.layer = sub_module
self.forward_time = []
self.backward_time = []
self.scope = scope
self.save_memory_mode = save_memory_mode
self.recomputation = recomputation
self.device = None
if save_memory_mode:
self.layer.to('cpu')
def forward(self, *inputs: tuple, **kwargs: Dict):
'\n Perform forward and backward pass of the underlying layer and measure metrics\n '
if self.save_memory_mode:
self.device = torch.device('cuda')
else:
self.device = get_device((inputs, kwargs, self.parameters(), self.buffers()))
if self.save_memory_mode:
self.layer.to(self.device)
detached_inputs = set_req_grad_for_parameters(inputs)
with torch.set_grad_enabled((not self.recomputation)):
(forward_time, outputs, _) = time_op(self.device, self.layer, *detached_inputs, **kwargs)
self.forward_time.append(forward_time)
if self.recomputation:
(forward_time, outputs, _) = time_op(self.device, self.layer, *detached_inputs, **kwargs)
flattened_outputs = flatten(outputs)
grad_tensors = []
has_grad_fn = False
for out in flatten(outputs):
if isinstance(out, torch.Tensor):
grad_tensors.append(torch.randn_like(out))
if ((out.grad_fn is not None) or out.requires_grad):
has_grad_fn = True
else:
grad_tensors.append(None)
if has_grad_fn:
(backward_time, _, _) = time_op(self.device, torch.autograd.backward, tensors=flattened_outputs, grad_tensors=grad_tensors)
for p in self.parameters():
p.grad = None
for p in detached_inputs:
p.grad = None
else:
(backward_time, _) = (0.0, 0.0)
if self.recomputation:
backward_time = (forward_time + backward_time)
self.backward_time.append(backward_time)
if self.save_memory_mode:
self.layer.to('cpu')
return outputs
def avg_time(self, forward=False):
if forward:
return avg_time(self.forward_time)
else:
return avg_time(self.backward_time)
def __iter__(self):
return iter(self.layer)
def __getitem__(self, key):
return self.layer[key]
def __setitem__(self, key, value):
self.layer[key] = value
def __delitem__(self, idx):
delattr(self.layer, idx)
def __len__(self):
return len(self.layer)
def __contains__(self, key):
return (key in self.layer)
def __getattr__(self, name):
try:
return super().__getattr__(name)
except Exception:
return getattr(self.layer, name)
def on_unwrap(self):
if self.save_memory_mode:
self.layer.to('cuda')
|
def time_op(device, func, *inputs: tuple, **kwargs):
cuda_mem = 0
if (device.type == 'cuda'):
torch.cuda.reset_max_memory_allocated(device=device)
base_mem = torch.cuda.memory_allocated(device=device)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize(device=device)
start.record()
out = func(*inputs, **kwargs)
end.record()
torch.cuda.synchronize(device=device)
exec_time = start.elapsed_time(end)
peak_usage = torch.cuda.max_memory_allocated(device=device)
cuda_mem = (peak_usage - base_mem)
else:
start = time.time()
out = func(*inputs, **kwargs)
end = time.time()
exec_time = (1000 * (end - start))
return (exec_time, out, cuda_mem)
|
def avg_time(times):
max_v = max(times)
return (sum([t for t in times if (t < max_v)]) / (len(times) - 1))
|
def set_req_grad_for_parameters(ts):
' For model parameters which are sent across the pipeline, grad requirements at profiling are always true\n # TODO: support freezing\n '
def f(t):
if (not isinstance(t, torch.Tensor)):
return t
req_grad = (t.requires_grad if isinstance(t, torch.nn.Parameter) else False)
return t.detach().requires_grad_(req_grad)
return nested_map(f, ts)
|
class GraphProfiler():
def __init__(self, recomputation=False, n_iter=10, force_no_recomp_scopes=None, profile_ops=True, save_memory_mode=False):
self.profile_memory = save_memory_mode
if (not save_memory_mode):
warnings.warn('Will not profile memory (since save_memory_mode=False)')
self.forward_times = defaultdict(list)
self.backward_times = defaultdict(list)
self.forward_mem = defaultdict(list)
self.backward_mem = defaultdict(list)
self.recomputation = recomputation
NUM_OUTLIERS = 2
assert (n_iter > 0)
self.n_iter = (n_iter + NUM_OUTLIERS)
self.not_profiled = dict(fwd=list(), bwd=list())
if (force_no_recomp_scopes is None):
self.force_no_recomp_scopes = (lambda s: False)
else:
self.force_no_recomp_scopes = force_no_recomp_scopes
self.profile_ops = profile_ops
self.save_memory_mode = save_memory_mode
def time_forward(self, node, function, args, kwargs):
if self.save_memory_mode:
(function, args, kwargs) = move_tensors((function, args, kwargs), 'cuda')
if self.profile_memory:
torch.cuda.reset_peak_memory_stats()
base_mem = torch.cuda.memory_allocated()
with force_out_of_place(function):
if self.should_profile(node, function, args, kwargs, is_bwd=False, output=None):
recomputation = (self.recomputation and (not self.force_no_recomp_scopes(node.scope)))
for _ in range(self.n_iter):
(args, kwargs) = detach_tensors((args, kwargs))
with torch.set_grad_enabled((not recomputation)):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize(device='cuda')
start.record()
function(*args, **kwargs)
end.record()
torch.cuda.synchronize(device='cuda')
if self.profile_memory:
peak_usage = torch.cuda.max_memory_allocated()
self.forward_mem[node].append((peak_usage - base_mem))
self.forward_times[node].append(start.elapsed_time(end))
elif (node.value_type is torch.Tensor):
self.not_profiled['fwd'].append(node)
return detach_tensors((args, kwargs))
def time_backward(self, node, function, args, kwargs, output):
with force_out_of_place(function):
if self.should_profile(node, function, args, kwargs, is_bwd=True, output=output):
recomputation = (self.recomputation and (not self.force_no_recomp_scopes(node.scope)))
if (not recomputation):
self.backward_no_recomputation(node, function, args, kwargs, output)
else:
self.backward_recomputation(node, function, args, kwargs, output)
elif (node.value_type is torch.Tensor):
self.not_profiled['bwd'].append(node)
if self.save_memory_mode:
(function, output) = move_tensors((function, output), 'cpu')
return detach_tensors(output)
def backward_no_recomputation(self, node, function, args, kwargs, output):
for _ in range(self.n_iter):
torch.cuda.synchronize(device='cuda')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
tensors = self.only_tensors_that_require_grad(output)
grads = GraphProfiler.get_grads(tensors)
torch.cuda.synchronize(device='cuda')
if self.profile_memory:
torch.cuda.reset_peak_memory_stats()
base_mem = torch.cuda.memory_allocated()
start.record()
torch.autograd.backward(tensors=tensors, grad_tensors=grads, retain_graph=True)
end.record()
torch.cuda.synchronize(device='cuda')
self.backward_times[node].append(start.elapsed_time(end))
if self.profile_memory:
peak_usage = torch.cuda.max_memory_allocated()
self.backward_mem[node].append((peak_usage - base_mem))
GraphProfiler.delete_grads(node, function, (args, kwargs))
def backward_recomputation(self, node, function, args, kwargs, output):
for _ in range(self.n_iter):
(args, kwargs) = detach_tensors((args, kwargs))
with torch.enable_grad():
torch.cuda.synchronize(device='cuda')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
mid1 = torch.cuda.Event(enable_timing=True)
mid2 = torch.cuda.Event(enable_timing=True)
grads = GraphProfiler.pre_get_grads(function, args, kwargs)
torch.cuda.synchronize(device='cuda')
if self.profile_memory:
torch.cuda.reset_peak_memory_stats()
base_mem = torch.cuda.memory_allocated()
start.record()
output = function(*args, **kwargs)
mid1.record()
tensors = self.only_tensors_that_require_grad(output)
mid2.record()
torch.autograd.backward(tensors=tensors, grad_tensors=grads)
end.record()
torch.cuda.synchronize(device='cuda')
self.backward_times[node].append((start.elapsed_time(end) - mid1.elapsed_time(mid2)))
if self.profile_memory:
peak_usage = torch.cuda.max_memory_allocated()
self.backward_mem[node].append((peak_usage - base_mem))
GraphProfiler.delete_grads(node, function, (args, kwargs))
def get_weights(self):
weights = dict()
for (node, f_times) in self.forward_times.items():
f_time = GraphProfiler.avg_time(f_times)
if (node in self.backward_times):
b_time = GraphProfiler.avg_time(self.backward_times[node])
else:
b_time = 0
weights[node.scope] = ExecTimes(f_time, b_time)
return weights
def set_max_memory_usage(self, graph: Graph, ignore_retain_graph=True):
d = dict()
for node in graph.nodes:
if ((node not in self.forward_mem) or (node not in self.backward_mem)):
continue
fwd = self.forward_mem[node]
bwd = self.backward_mem[node]
if ignore_retain_graph:
fwd = fwd[1:]
bwd = bwd[1:]
max_usage = max(max(fwd), max(bwd))
node.max_memory_bytes = max(node.max_memory_bytes, max_usage)
d[node.scope] = node.max_memory_bytes
print('max_mem_usage_per_node', d)
return d
def print_times(self, backward=False):
if backward:
ts = self.backward_times
else:
ts = self.forward_times
for (n, t) in ts.items():
print(n.scope, GraphProfiler.avg_time(t))
@staticmethod
def only_tensors_that_require_grad(ts):
return [t for t in flatten(ts) if (isinstance(t, Tensor) and t.requires_grad)]
@staticmethod
def only_tensors_with_grad_fn(ts):
return [t for t in flatten(ts) if (isinstance(t, Tensor) and (t.grad_fn is not None))]
@staticmethod
def only_tensors(ts):
return [t for t in flatten(ts) if isinstance(t, Tensor)]
@staticmethod
def delete_grads(node, function, ts):
if (node.type is NodeTypes.LAYER):
for p in function.parameters():
p.grad = None
for p in GraphProfiler.only_tensors_that_require_grad(ts):
p.grad = None
@staticmethod
def get_grads(ts):
return [torch.randn_like(t) for t in ts]
@staticmethod
def pre_get_grads(function, args, kwargs):
with torch.enable_grad():
set_grad_mode((args, kwargs), True)
output = function(*args, **kwargs)
output = GraphProfiler.only_tensors_that_require_grad(output)
set_grad_mode((args, kwargs), False)
return GraphProfiler.get_grads(output)
@staticmethod
def avg_time(times, drop=2):
vs = times
max_v = None
for i in range(drop):
max_v = max(vs)
vs_cand = [t for t in vs if (t < max_v)]
if (len(vs_cand) == 0):
break
vs = vs_cand
assert (len(vs) > 0), (max_v, times)
total = sum(vs)
return (total / len(vs))
def should_profile(self, node, function, args, kwargs, is_bwd, output=None):
if (node.type not in [NodeTypes.LAYER, NodeTypes.OP]):
return False
if ((not self.profile_ops) and (node.type is NodeTypes.OP)):
return False
if (node.type is NodeTypes.OP):
(op_path, idx) = node.scope.rsplit('/', maxsplit=1)[1].rsplit('_', maxsplit=1)
(namespace, func_name) = op_path.split('::')
inplace_torch_function = (('torch' in namespace) and (func_name[(- 1)] == '_'))
inplace_tensor_function = ((namespace == 'Tensor') and (func_name[(- 1)] == '_') and (not func_name.startswith('__')))
inplace_tensor_magic = ((namespace == 'Tensor') and (func_name in inplace_arithmetic_ops))
if (inplace_tensor_magic or inplace_tensor_function or inplace_torch_function):
warnings.warn(f"can't trace inplace op {node}")
return False
if (output is None):
(tmp_arg, tmp_kwargs) = detach_tensors((args, kwargs))
output = function(*tmp_arg, **tmp_kwargs)
del tmp_arg
del tmp_kwargs
output_w_grads = GraphProfiler.only_tensors_that_require_grad(output)
output_w_grad_fn = GraphProfiler.only_tensors_with_grad_fn(output)
res = ((len(output_w_grads) > 0) or (len(output_w_grad_fn) > 0))
return res
if ((not res) and is_bwd):
return False
else:
return (len(GraphProfiler.only_tensors(output)) > 0)
def _debug_stats(self):
not_fwd = set(self.not_profiled['fwd'])
not_bwd = set(self.not_profiled['bwd'])
not_fwd_not_bwd = not_bwd.intersection(not_fwd)
print(f'not fwd {len(not_fwd)}')
print(f'not bwd {len(not_bwd)}')
print(f'not fwd and bwd {len(not_fwd_not_bwd)}')
not_fwd_req_grad = sum((n.req_grad for n in not_fwd))
not_bwd_req_grad = sum((n.req_grad for n in not_bwd))
print(f'not fwd req_grad {not_fwd_req_grad}')
print(f'not bwd req_grad {not_bwd_req_grad}')
for n in chain(not_fwd, not_bwd):
assert (not n.req_grad)
assert (not_fwd == not_bwd)
print()
for n in not_fwd:
print(n.scope)
|
class TracedFunctions():
functions = set()
@classmethod
def register_function(cls, function, namespace):
assert hasattr(namespace, function.__name__)
traced_function = TracedFunction(namespace, function)
cls.functions.add(traced_function)
@classmethod
def enable_tracing(cls):
for f in cls.functions:
f.replace_binding()
@classmethod
def disable_tracing(cls):
for f in cls.functions:
f.restore_binding()
@classmethod
def traced_namespaces(cls):
return {f.namespace for f in cls.functions}
|
class ExplicitUntracedFunctions():
functions = set()
@classmethod
def register_function(cls, function, namespace):
assert hasattr(namespace, function.__name__)
traced_function = ExplicitUntracedFunction(namespace, function)
cls.functions.add(traced_function)
@classmethod
def enable(cls):
for f in cls.functions:
f.replace_binding()
@classmethod
def disable(cls):
for f in cls.functions:
f.restore_binding()
|
class ExplicitUntracedFunction():
"\n a Wrapper of an arbitrary static function\n which will not be recorded.\n it will not record it's inputs or outputs\n "
def __init__(self, namespace, original_function):
self.namespace = namespace
self.original_function = original_function
self.function_name = self.original_function.__name__
def replace_binding(self):
setattr(self.namespace, self.function_name, self)
def restore_binding(self):
setattr(self.namespace, self.function_name, self.original_function)
def __call__(self, *args, **kwargs):
(args, kwargs) = ExplicitUntracedFunction.ensure_untraced((args, kwargs))
return self.original_function(*args, **kwargs)
@staticmethod
def ensure_untraced(vs):
def untraced(v):
if isinstance(v, TracedValue):
return v._data
return v
return nested_map(untraced, vs, full=True)
|
class TracedFunction():
'\n a Wrapper of an arbitrary static function\n like torch.zeros or math.sqrt which are not invoked from a traced value\n the wrapper records the function call and return a TracedValue\n '
def __init__(self, namespace, original_function):
self.namespace = namespace
self.original_function = original_function
self.function_name = self.original_function.__name__
self.__name__ = self.original_function.__name__
def replace_binding(self):
setattr(self.namespace, self.function_name, self)
def __call__(self, *args, **kwargs):
(args, kwargs) = record_args_and_kwargs(*args, **kwargs)
out = TracedValue(NodeTypes.OP, f'/{self.namespace.__name__}::{self.function_name}')
connect_inputs_to_output(out.id, args, kwargs)
(args, kwargs) = unpack_traced_args_and_kwargs(*args, **kwargs)
out.set_data(self.original_function(*args, **kwargs))
return out
def restore_binding(self):
setattr(self.namespace, self.function_name, self.original_function)
|
def used_namespaces():
return {namespace.__name__ for namespace in chain(override_dict.keys(), TracedFunctions.traced_namespaces()) if (hasattr(namespace, '__name__') and inspect.ismodule(namespace))}
|
def delegate_to_traced_value(func):
@wraps(func)
def wrapper(*args):
(args, _) = record_args_and_kwargs(*args)
op_name = func.__name__
if (op_name in r_arithmetic_ops):
op_name = ('__' + op_name[3:])
args = tuple(reversed(args))
traced_self = args[0]
out = TracedValue(NodeTypes.OP, f'/{type(traced_self._data).__name__}::{op_name}')
connect_inputs_to_output(out.id, args)
(args, _) = unpack_traced_args_and_kwargs(*args)
try:
actual_op = getattr(operator, op_name)
out.set_data(actual_op(*args))
except Exception:
actual_op = getattr(type(traced_self._data), op_name)
out.set_data(actual_op(*args))
return out
return wrapper
|
def tracing_not_supported(func):
'a decortaor to have pretty error messages when accessing an unsupported\n __magic__ method\n '
@wraps(func)
def wrapper(*args, **kwargs):
namespace = type(args[0]._data).__name__
op = func.__name__
msg = f'tracing {namespace}::{op} is currently not supported'
raise NotImplementedError(msg)
return wrapper
|
class TracedValue(object):
'\n a wrapper that traces operations done on a value\n for Tensor values we leverage the __torch_function__ API\n for other values we trace all instance methods invoked\n and methods that are patched in enable_tracing_registered_functions\n\n functions and attributes are delegated to the wrapped value\n by delegating the __getattr__ of the wrapped to the __getattribute__ of the value\n\n __magic__ methods require explicit delegation using @delegate_to_traced_value to mark the delegation\n '
ID = 0
def __init__(self, node_type, creating_op):
self._data = None
self.namespace = ''
self.id = TracedValue.ID
TracedValue.ID += 1
self.scope = (CURRENT_SCOPE + f'{creating_op}')
self.node = Node(node_type, self.id, self.scope)
NODES[self.id] = self.node
def set_data(self, data):
assert is_traceable(data), f'TracedValue expects a basic type got {type(data)} scope {self.scope}'
maybe_make_constant(self.node, data)
self._data = data
self.namespace = f'{type(self._data).__name__}'
self.node.value_type = type(data)
self.node.tensor_dtype = get_tensor_dtypes(data)
self.node.tensor_shape = get_tensor_shapes(data)
def __repr__(self):
return f'''Node ID:{self.id}
Scope:{self.scope}
value: {self._data}
'''
def __torch_function__(self, func, types, args=(), kwargs=None):
if (kwargs is None):
kwargs = {}
func_name = func.__name__
try:
namespace = FUNCTION_NAMESPACE[func].__name__
except KeyError as e:
namespace = None
for m in [torch, F, torch.functional]:
if hasattr(m, func_name):
namespace = m.__name__
break
if (namespace is None):
raise e
op = f'/{namespace}::{func_name}'
(args, kwargs) = record_args_and_kwargs(*args, **kwargs)
out = TracedValue(NodeTypes.OP, op)
connect_inputs_to_output(out.id, args, kwargs)
(args, kwargs) = unpack_traced_args_and_kwargs(*args, **kwargs)
out.set_data(func(*args, **kwargs))
return out
def __getattr__(self, name):
'handles tracing of accessed attributes of traced values\n '
assert isinstance(name, str), f'getattr support only for string args got {type(name)}'
out = getattr(self._data, name)
if is_traceable(out):
name_arg = TracedValue(NodeTypes.CONSTANT, '/prim::Constant')
name_arg.set_data(name)
name_arg.node.constant_value = name
ret = TracedValue(NodeTypes.OP, f'/{self.namespace}::__getattribute__')
record_arg(ret.id, self.id)
record_arg(ret.id, name_arg.id)
ret.set_data(out)
return ret
return TracedInstanceFunction(self.id, self.namespace, out)
@delegate_to_traced_value
def __getitem__(self, idx):
pass
@tracing_not_supported
def __hash__(self):
pass
@delegate_to_traced_value
def __setitem__(self, idx, value):
pass
def __len__(self):
return len(self._data)
@tracing_not_supported
def __contains__(self, key):
pass
def __bool__(self):
return bool(self._data)
@delegate_to_traced_value
def __neg__(self):
pass
@delegate_to_traced_value
def __pos__(self):
pass
@delegate_to_traced_value
def __abs__(self):
pass
@delegate_to_traced_value
def __invert__(self):
pass
@delegate_to_traced_value
def __add__(self, other):
pass
@delegate_to_traced_value
def __sub__(self, other):
pass
@delegate_to_traced_value
def __mul__(self, other):
pass
@delegate_to_traced_value
def __matmul__(self, other):
pass
@delegate_to_traced_value
def __truediv__(self, other):
pass
@delegate_to_traced_value
def __floordiv__(self, other):
pass
@delegate_to_traced_value
def __mod__(self, other):
pass
@delegate_to_traced_value
def __pow__(self, other):
pass
@delegate_to_traced_value
def __lshift__(self, other):
pass
@delegate_to_traced_value
def __rshift__(self, other):
pass
@delegate_to_traced_value
def __and__(self, other):
pass
@delegate_to_traced_value
def __xor__(self, other):
pass
@delegate_to_traced_value
def __or__(self, other):
pass
@delegate_to_traced_value
def __radd__(self, other):
pass
@delegate_to_traced_value
def __rsub__(self, other):
pass
@delegate_to_traced_value
def __rmul__(self, other):
pass
@delegate_to_traced_value
def __rmatmul__(self, other):
pass
@delegate_to_traced_value
def __rtruediv__(self, other):
pass
@delegate_to_traced_value
def __rfloordiv__(self, other):
pass
@delegate_to_traced_value
def __rmod__(self, other):
pass
@delegate_to_traced_value
def __rpow__(self, other):
pass
@delegate_to_traced_value
def __rlshift__(self, other):
pass
@delegate_to_traced_value
def __rrshift__(self, other):
pass
@delegate_to_traced_value
def __rand__(self, other):
pass
@delegate_to_traced_value
def __rxor__(self, other):
pass
@delegate_to_traced_value
def __ror__(self, other):
pass
@delegate_to_traced_value
def __iadd__(self, other):
pass
@delegate_to_traced_value
def __isub__(self, other):
pass
@delegate_to_traced_value
def __imul__(self, other):
pass
@delegate_to_traced_value
def __imatmul__(self, other):
pass
@delegate_to_traced_value
def __itruediv__(self, other):
pass
@delegate_to_traced_value
def __ifloordiv__(self, other):
pass
@delegate_to_traced_value
def __imod__(self, other):
pass
@delegate_to_traced_value
def __ipow__(self, other):
pass
@delegate_to_traced_value
def __ilshift__(self, other):
pass
@delegate_to_traced_value
def __irshift__(self, other):
pass
@delegate_to_traced_value
def __iand__(self, other):
pass
@delegate_to_traced_value
def __ixor__(self, other):
pass
@delegate_to_traced_value
def __ior__(self, other):
pass
@delegate_to_traced_value
def __eq__(self, other):
pass
@delegate_to_traced_value
def __ne__(self, other):
pass
@delegate_to_traced_value
def __ge__(self, other):
pass
@delegate_to_traced_value
def __gt__(self, other):
pass
@delegate_to_traced_value
def __le__(self, other):
pass
@delegate_to_traced_value
def __lt__(self, other):
pass
|
class TracedInstanceFunction(object):
"when we call a function what happens is obj.__getattribute__(self,func_name)(self,*args,**kwargs)\n TracedInstanceFunction is used to record the call operation and it's output\n obj.__getattribute__(self,func_name) returns a TracedInstanceFunction object\n whose __call__ will record the return value\n "
def __init__(self, self_id, namespace, func):
self._func = func
self.self_id = self_id
self.namespace = namespace
def __call__(self, *args, **kwargs):
(args, kwargs) = record_args_and_kwargs(*args, **kwargs)
out = TracedValue(NodeTypes.OP, f'/{self.namespace}::{self._func.__name__}')
record_arg(out.id, self.self_id)
connect_inputs_to_output(out.id, args, kwargs)
(args, kwargs) = unpack_traced_args_and_kwargs(*args, **kwargs)
out.set_data(self._func(*args, **kwargs))
return out
|
class TracedLayer(nn.Module):
' Traced layer is a wrapper around all model layers used for tracing operations\n a traced layer can be terminal as is a layer which will be profiled according to depth and basic blocks\n and a non terminal layer which is not profiled but still traced\n\n terminal layers will pass actual non wrapped values to their module\n non terminal will pass wrapped values to their children\n\n '
def __init__(self, module: nn.Module, name, terminal, is_nesting_special_patched=False, nesting_special_patch=None, patch_direct_children=False):
super(TracedLayer, self).__init__()
self._name = name
if isinstance(module, TracedLayer):
warnings.warn('Double warp for module')
self._module = module
self._terminal = terminal
self._nesting_special_patch = nesting_special_patch
self._is_nesting_special_patched = is_nesting_special_patched
self.patch_direct_children = patch_direct_children
def forward(self, *args, **kwargs):
(args, kwargs) = record_args_and_kwargs(*args, **kwargs)
global CURRENT_SCOPE
t_scope = CURRENT_SCOPE
if (t_scope == ''):
t_scope = self._name
else:
if self._is_nesting_special_patched:
t_scope += f'/{self._nesting_special_patch}'
t_scope += f'/{self._name}'
CURRENT_SCOPE = t_scope
if self._terminal:
out = TracedValue(NodeTypes.LAYER, '')
disable_tracing_registered_functions()
connect_inputs_to_output(out.id, args, kwargs)
(args, kwargs) = unpack_traced_args_and_kwargs(*args, **kwargs)
out.set_data(self._module(*args, **kwargs))
enable_tracing_registered_functions()
else:
with record_free_floating_parameters_and_buffers(self._module):
out = self._module(*args, **kwargs)
if (not isinstance(out, TracedValue)):
out = record_non_terminal_output(out)
t_scope = CURRENT_SCOPE
t_scope = t_scope.rsplit('/', maxsplit=1)[0]
if self._is_nesting_special_patched:
t_scope = t_scope.rsplit('/', maxsplit=1)[0]
CURRENT_SCOPE = t_scope
assert isinstance(out, TracedValue), f'expected layer output of type TracedValue got {type(out)}'
return out
def __getattr__(self, name):
try:
return super().__getattr__(name)
except Exception:
return getattr(self._module, name)
def __getitem__(self, key):
return self._module[key]
def __setitem__(self, key, value):
self._module[key] = value
def __delitem__(self, idx):
delattr(self._module, idx)
def __len__(self):
return len(self._module)
def __contains__(self, key):
return (key in self._module)
|
def is_traceable(data):
'\n predicate to check if a value can be traced\n '
return isinstance(data, (type(None), type(Ellipsis), list, tuple, dict, set, int, bool, str, float, slice, torch.device, torch.Size, torch.Tensor, torch.dtype, torch.memory_format))
|
def trace_module(module: nn.Module, args=(), kwargs=None, depth=1000, basic_blocks=()):
if (basic_blocks is None):
basic_blocks = ()
if (kwargs is None):
kwargs = dict()
reset_tracing_state()
_unwrap_layers(module)
(args, kwargs) = prepare_args_and_kwargs(args=args, kwargs=kwargs)
_wrap_traced_layers(module, depth=depth, basic_blocks=basic_blocks)
enable_tracing_registered_functions()
ExplicitUntracedFunctions.enable()
traced_module = TracedLayer(module, name=f'{type(module).__name__}', terminal=False)
with torch.no_grad():
output = traced_module(*args, **kwargs)
disable_tracing_registered_functions()
ExplicitUntracedFunctions.disable()
output_id = output.id
_unwrap_layers(module)
for m in module.modules():
assert (not isinstance(m, TracedLayer))
global CURRENT_SCOPE
assert (CURRENT_SCOPE == traced_module._name)
CURRENT_SCOPE = ''
nodes = NODES
nodes = discard_unused_nodes(nodes, output_id)
(nodes, output_id) = duplicate_constants(nodes, output_id)
propagate_constant_tuple_accessors(nodes)
nodes = discard_unused_nodes(nodes, output_id)
input_kw_ids = {v.id: k for (k, v) in kwargs.items() if (v.id in nodes)}
(nodes, output_id) = set_node_indices(nodes, output_id)
NODES.clear()
(is_valid, errors) = check_is_valid_graph(nodes)
if (not is_valid):
raise RuntimeError(errors)
return Graph(nodes, input_kw_ids, [output_id], depth, basic_blocks)
|
def find_reachable_nodes(nodes, output_id, keep_tensors=False):
'do a bfs from the output on the undirected graph to find all nodes that are \n reachable from the output node this is really conservative some unused nodes will still remain\n '
open = {nodes[output_id]}
reachable = set()
while open:
node = open.pop()
if (node in reachable):
continue
open.update(node.in_edges)
for n in node.out_edges:
if (('__i' in n.scope) or ((n.value_type is torch.Tensor) and keep_tensors)):
open.add(n)
reachable.add(node)
return reachable
|
def prepare_args_and_kwargs(args=(), kwargs=None):
if (not isinstance(args, tuple)):
args = (args,)
if (kwargs is None):
kwargs = dict()
wrapped_args = []
for (idx, a) in enumerate(args):
v = TracedValue(NodeTypes.IN, f'input{idx}')
v.set_data(a)
wrapped_args.append(v)
n_args = len(args)
wrapped_kwargs = dict()
for (i, (k, a)) in enumerate(sorted(kwargs.items(), key=(lambda t: t[0]))):
v = TracedValue(NodeTypes.IN, f'input{(n_args + i)}')
v.set_data(a)
wrapped_kwargs[k] = v
return (wrapped_args, wrapped_kwargs)
|
def register_new_traced_function(function, namespace):
TracedFunctions.register_function(function, namespace)
|
def register_new_explicit_untraced_function(function, namespace):
ExplicitUntracedFunctions.register_function(function, namespace)
|
def register_torch_functions():
for (f, namespace) in tensor_creation_ops.items():
register_new_traced_function(f, namespace=namespace)
|
def enable_tracing_registered_functions():
'enable tracing of functions registered functions\n '
register_torch_functions()
TracedFunctions.enable_tracing()
global FUNCTION_NAMESPACE
FUNCTION_NAMESPACE = {f: ns for (ns, funcs) in override_dict.items() for f in funcs}
|
def disable_tracing_registered_functions():
'revert the patching done by enable_tracing_registered_functions\n '
FUNCTION_NAMESPACE.clear()
TracedFunctions.disable_tracing()
|
def _wrap_traced_layers(module: nn.Module, depth=1000, basic_blocks=(), allow_ModuleList_ModuleDict=True):
layers_dict = dict()
layers_to_patch = dict()
patched_layers_to_scope = dict()
for (sub_layer, scope, parent, terminal) in traverse_model(module, depth=depth, basic_blocks=basic_blocks, full=True):
name = scope[(scope.rfind('[') + 1):(- 1)]
patch_direct_children = False
if isinstance(sub_layer, (nn.ModuleList, nn.ModuleDict)):
if (not allow_ModuleList_ModuleDict):
raise TypeError(f'tracing nn.ModuleList/nn.ModuleDict is not supported got {scope} of type {type(sub_layer)}')
else:
warnings.warn('Experimentally allowing nn.ModuleList/nn.ModuleDict')
patch_direct_children = True
elif isinstance(sub_layer, (nn.ParameterList, nn.ParameterDict)):
continue
if (parent not in patched_layers_to_scope):
is_nesting_special_patched = False
nesting_special_patch = None
else:
traced_parent = layers_to_patch[patched_layers_to_scope[parent]]
traced_parent: TracedLayer
if traced_parent.patch_direct_children:
is_nesting_special_patched = True
nesting_special_patch = traced_parent._name
else:
is_nesting_special_patched = False
nesting_special_patch = None
wrapper = TracedLayer(sub_layer, scope.rsplit('/', maxsplit=1)[1], terminal, is_nesting_special_patched=is_nesting_special_patched, nesting_special_patch=nesting_special_patch, patch_direct_children=patch_direct_children)
if patch_direct_children:
layers_to_patch[scope] = wrapper
patched_layers_to_scope[sub_layer] = scope
parent.add_module(name, wrapper)
layers_dict[scope] = wrapper
return layers_dict
|
def _unwrap_layers(module: nn.Module):
for (name, sub_module) in module.named_children():
if isinstance(sub_module, TracedLayer):
_unwrap_layers(sub_module._module)
module.add_module(name, sub_module._module)
else:
module.add_module(name, sub_module)
|
def reset_tracing_state():
global CURRENT_SCOPE
CURRENT_SCOPE = ''
disable_tracing_registered_functions()
ExplicitUntracedFunctions.disable()
NODES.clear()
FUNCTION_NAMESPACE.clear()
TracedValue.ID = 0
|
def duplicate_constants(nodes, output_id):
new_nodes = dict()
offset = 0
new_output_id = 0
for node in nodes.values():
if (node.id == output_id):
new_output_id = (node.id + offset)
node.id += offset
if ((node.type is NodeTypes.CONSTANT) and (len(node.out_edges) > 1)):
for (n_copy, o) in enumerate(node.out_edges):
copy_node = Node.from_other(node)
copy_node.id += n_copy
o.replace_input(node, copy_node)
copy_node.out_edges = [o]
new_nodes[copy_node.id] = copy_node
offset += 1
else:
assert (node.id not in new_nodes)
new_nodes[node.id] = node
return (new_nodes, new_output_id)
|
def discard_unused_nodes(nodes, output_id):
new_nodes = []
while True:
changed = False
reachable_nodes = find_reachable_nodes(nodes, output_id)
for node in reversed(list(nodes.values())):
if (node.id == output_id):
new_nodes.append((node.id, node))
unused_branch = False
if ((node.type is NodeTypes.OP) and (len(node.out_edges) == 0)):
op_path = node.scope.rsplit('/', maxsplit=1)[1]
(_, func_name) = op_path.split('::')
unused_branch = (func_name in logical_ops)
iter_sentinel = (node.value_type is None)
unused_constant_or_input = ((node.type in [NodeTypes.IN, NodeTypes.CONSTANT]) and (len(node.out_edges) == 0))
unreachable = (node not in reachable_nodes)
if (unused_branch or iter_sentinel or unused_constant_or_input or unreachable):
assert (len(node.out_edges) == 0), 'unused traced value should not have outgoing edges'
print(f'removing node: {node.scope} of type:{node.type} and dtype:{node.value_type}')
for u in node.in_edges:
u.remove_output(node)
changed = True
else:
new_nodes.append((node.id, node))
if (not changed):
break
nodes = dict(reversed(new_nodes))
new_nodes = []
return dict(reversed(new_nodes))
|
def propagate_constant_tuple_accessors(nodes):
while True:
changed = False
for n in nodes.values():
if ('prim::TupleConstruct' in n.scope):
tuple_elements = n.in_edges
for o in n.out_edges:
if (('tuple::__getitem__' in o.scope) and (o.in_edges[1].type is NodeTypes.CONSTANT)):
idx = o.in_edges[1].constant_value
if (not isinstance(idx, int)):
continue
accessed_element = tuple_elements[idx]
tuple_accessor = o
for dst in tuple_accessor.out_edges:
changed = True
dst.replace_input(tuple_accessor, accessed_element)
accessed_element.add_out_edge(dst)
tuple_accessor.out_edges.clear()
if (not changed):
break
|
def maybe_make_constant(node, data):
can_convert = False
if (isinstance(data, torch.device) or (data == 'cpu') or (isinstance(data, str) and ('cuda' in data))):
data = torch.device(data)
can_convert = True
elif ((node.type is NodeTypes.PRIMITIVE) and all(((i.type is NodeTypes.CONSTANT) for i in node.in_edges))):
can_convert = True
if can_convert:
node.constant_value = data
node.type = NodeTypes.CONSTANT
for i in node.in_edges:
i.remove_output(node)
node.args.clear()
node.kwargs.clear()
|
def _make_constant(nodes, predicate):
for n in nodes.values():
if predicate(n):
for i in n.in_edges:
i.remove_output(n)
n.args.clear()
n.kwargs.clear()
n.type = NodeTypes.CONSTANT
|
def set_node_indices(nodes, output_id):
new_nodes = dict()
for (idx, node) in enumerate(nodes.values()):
assert (idx <= node.id)
node.id = idx
if (node.type in [NodeTypes.OP, NodeTypes.PRIMITIVE]):
node.scope += f'_{node.id}'
new_nodes[idx] = node
return (new_nodes, nodes[output_id].id)
|
def record_args_and_kwargs(*args, **kwargs):
' recording of args and kwargs input format\n this will record all literal values lists/dicts/ints etch\n and build the necessary hierarchy in the graph\n for list/tuple/set elements we record their position\n for dictionaries we record the keywords used\n nested iterables will first record the elements and than the iterable itself\n aka list elements are created before the list itself\n thus ensuring topological/chronological order of traced values\n\n note that a TracedValue cannot be a dictionary key\n '
(recorded_args, _) = record_args(args, top_level=True)
(recorded_kwargs, _) = record_kwargs(kwargs, top_level=True)
return (recorded_args, recorded_kwargs)
|
def record_args(args, top_level):
new_args = []
new_args_id = []
for a in args:
if isinstance(a, (list, tuple, set)):
(traced_children, traced_ids) = record_args(a, top_level=False)
traced_value = TracedValue(NodeTypes.PRIMITIVE, ('/' + container_construct_op_name(type(a))))
for id in traced_ids:
record_arg(traced_value.id, id)
traced_value.set_data(type(a)(traced_children))
elif isinstance(a, dict):
(traced_children, traced_ids) = record_kwargs(a, top_level=False)
traced_value = TracedValue(NodeTypes.PRIMITIVE, ('/' + container_construct_op_name(type(a))))
for (k, id) in traced_ids.items():
record_kwarg(traced_value.id, k, id)
traced_value.set_data(type(a)(traced_children))
elif isinstance(a, slice):
(traced_children, traced_ids) = record_args((a.start, a.stop, a.step), top_level=False)
traced_value = TracedValue(NodeTypes.PRIMITIVE, ('/' + container_construct_op_name(type(a))))
for id in traced_ids:
record_arg(traced_value.id, id)
traced_value.set_data(type(a)(*traced_children))
elif isinstance(a, TracedValue):
traced_value = a
else:
assert (not isinstance(a, Tensor)), 'tensor constants should not happen'
traced_value = TracedValue(NodeTypes.CONSTANT, f'/prim::Constant')
traced_value.set_data(a)
traced_value.node.constant_value = a
if top_level:
new_args.append(traced_value)
else:
new_args.append(traced_value._data)
new_args_id.append(traced_value.id)
return (new_args, new_args_id)
|
def record_kwargs(kwargs, top_level):
new_kwargs = dict()
new_kwargs_ids = dict()
for (k, v) in kwargs.items():
assert isinstance(k, (int, bool, str, float, type(None))), f'unsupported kwargs {type(k)}'
if isinstance(v, (list, tuple, set)):
(traced_children, children_ids) = record_args(v, top_level=False)
traced_value = TracedValue(NodeTypes.PRIMITIVE, ('/' + container_construct_op_name(type(v))))
for id in children_ids:
record_arg(traced_value.id, id)
traced_value.set_data(type(v)(traced_children))
elif isinstance(v, dict):
(traced_children, traced_ids) = record_kwargs(v, top_level=False)
traced_value = TracedValue(NodeTypes.PRIMITIVE, ('/' + container_construct_op_name(type(v))))
for (key, id) in traced_ids.items():
record_kwarg(traced_value.id, key, id)
traced_value.set_data(type(v)(traced_children))
elif isinstance(v, TracedValue):
traced_value = v
else:
assert (not isinstance(v, Tensor)), 'tensor constants should not happen'
traced_value = TracedValue(NodeTypes.CONSTANT, f'/prim::Constant')
traced_value.set_data(v)
traced_value.node.constant_value = v
new_kwargs_ids[k] = traced_value.id
if top_level:
new_kwargs[k] = traced_value
else:
new_kwargs[k] = traced_value._data
return (new_kwargs, new_kwargs_ids)
|
def unpack_traced_args_and_kwargs(*traced_args, **traced_kwargs):
args = [a._data for a in traced_args]
kwargs = {k: v._data for (k, v) in traced_kwargs.items()}
return (args, kwargs)
|
def connect_inputs_to_output(out_id, traced_args, traced_kwargs=None):
if (traced_kwargs is None):
traced_kwargs = dict()
for a in traced_args:
record_arg(out_id, a.id)
for (k, v) in traced_kwargs.items():
record_kwarg(out_id, k, v.id)
|
@contextmanager
def record_free_floating_parameters_and_buffers(module: nn.Module):
'\n context manager that records buffers and parameters\n which are not connected to a terminal layer\n '
for (name, t) in chain(module.named_parameters(recurse=False), module.named_buffers(recurse=False)):
traced_t = TracedValue(NodeTypes.BUFF_PARAM, f'/{type(t).__name__}[{name}]')
traced_t.set_data(t)
if isinstance(t, nn.Parameter):
module._parameters[name] = traced_t
else:
module._buffers[name] = traced_t
for (name, c) in module.named_children():
if isinstance(c, (nn.ParameterList, nn.ParameterDict)):
for (p_name, p) in c.named_parameters():
traced_p = TracedValue(NodeTypes.BUFF_PARAM, f'/{type(c).__name__}[{name}]/{type(p).__name__}[{p_name}]')
traced_p.set_data(p)
c._parameters[p_name] = traced_p
(yield)
for (name, wrapped_t) in chain(module._parameters.items(), module._buffers.items()):
t = wrapped_t._data
if isinstance(t, nn.Parameter):
module._parameters[name] = t
else:
module._buffers[name] = t
for (name, c) in module.named_children():
if isinstance(c, (nn.ParameterList, nn.ParameterDict)):
for (p_name, p) in c._parameters.items():
c._parameters[p_name] = p._data
|
def record_non_terminal_output(out):
(recorded_outs, _) = record_args((out,), top_level=True)
return recorded_outs[0]
|
def record_kwarg(node_id, kwarg, kwarg_id):
assert (kwarg_id < node_id)
NODES[kwarg_id].add_out_edge(NODES[node_id])
NODES[node_id].add_kwarg(kwarg, NODES[kwarg_id])
|
def record_arg(node_id, arg_id):
assert (arg_id < node_id)
NODES[arg_id].add_out_edge(NODES[node_id])
NODES[node_id].add_arg(NODES[arg_id])
|
def container_construct_op_name(container_cls):
container_str = {dict: 'Dict', list: 'List', tuple: 'Tuple', set: 'Set', slice: 'Slice'}[container_cls]
return f'prim::{container_str}Construct'
|
def check_is_valid_graph(nodes):
valid = True
errors = []
for (i, node) in nodes.items():
if ((node.type in [NodeTypes.CONSTANT, NodeTypes.IN, NodeTypes.BUFF_PARAM]) and len(node.in_edges)):
errors.extend(['leaf node with incoming edges', f'node id: {i}', f'node type: {node.type.__name__}scope: {node.scope}', f'incoming edges: {[n.id for n in node.in_edges]}', f'positional args: {[n.id for n in node.args]}', f'keyword args: {[(n.id, k) for (n, k) in node.kwargs.items()]}', f'outgoing edges: {[n.id for n in node.out_edges]}', ''])
valid = False
for o in node.out_edges:
if (i == o.id):
errors.extend(['self cycle', f'node id: {i}', f'node type: {node.type.__name__}scope: {node.scope}', f'incoming edges: {[n.id for n in node.in_edges]}', f'positional args: {[n.id for n in node.args]}', f'keyword args: {[(n.id, k) for (n, k) in node.kwargs.items()]}', f'outgoing edges: {[n.id for n in node.out_edges]}', ''])
valid = False
if (o.id < i):
errors.extend(['violation of topological sort', f'node id: {i}', f'scope: {node.scope}', f'incoming edges: {[n.id for n in node.in_edges]}', f'positional args: {[n.id for n in node.args]}', f'keyword args: {[(n.id, k) for (n, k) in node.kwargs.items()]}', f'outgoing edges: {[n.id for n in node.out_edges]}', ''])
valid = False
if (node not in o.in_edges):
errors.extend(['graph violating back edge not set', f'src id: {i}', f'dest id: {o}', f'src_scope: {node.scope}', f'dest_scope: {o.scope}', f'src_out_edges: {[n.id for n in node.out_edges]}', f'dest_in_edges: {[n.id for n in o.in_edges]}', ''])
valid = False
for in_node in node.in_edges:
if (i == in_node.id):
errors.extend(['self cycle', f'node id: {i}', f'node type: {node.type.__name__}scope: {node.scope}', f'incoming edges: {[n.id for n in node.in_edges]}', f'positional args: {[n.id for n in node.args]}', f'keyword args: {[(n.id, k) for (n, k) in node.kwargs.items()]}', f'outgoing edges: {[n.id for n in node.out_edges]}', ''])
valid = False
if (i < in_node.id):
errors.extend(['violation of topological sort', f'node id: {i}', f'scope: {node.scope}', f'incoming edges: {[n.id for n in node.in_edges]}', f'positional args: {[n.id for n in node.args]}', f'keyword args: {[(n.id, k) for (n, k) in node.kwargs.items()]}', f'outgoing edges: {[n.id for n in node.out_edges]}', ''])
valid = False
if (node not in in_node.out_edges):
errors.extend(['graph violating forward edge not set', f'src id: {in_node}', f'dest id: {i}', f'src_scope: {in_node.scope}', f'dest_scope: {node.scope}', f'src_out_edges: {in_node.out_edges}', f'dest_in_edges: {[n.id for n in node.in_edges]}', ''])
valid = False
if ((node.type != NodeTypes.CONSTANT) and (node.constant_value != None)):
errors.extend(['non constant node with constant_value set', f'node id: {i}', f'scope: {node.scope}', f'value: {node.constant_value}', f'incoming edges: {[n.id for n in node.in_edges]}', f'positional args: {[n.id for n in node.args]}', f'keyword args: {[(n.id, k) for (n, k) in node.kwargs.items()]}', f'outgoing edges: {[n.id for n in node.out_edges]}', ''])
valid = False
if (isinstance(node.tensor_shape, torch.Size) or isinstance(node.tensor_dtype, torch.dtype) or issubclass(node.value_type, Tensor)):
if (node.value_type == torch.Size):
continue
if (not (isinstance(node.tensor_shape, torch.Size) and isinstance(node.tensor_dtype, torch.dtype) and issubclass(node.value_type, Tensor))):
errors.extend(['tensor value value not recorded in all of TENSOR_SHAPES TENSOR_DTYPES VALUE_TYPES', f'node id: {i}', f'node id: {i}', f'scope: {node.scope}', f'incoming edges: {[n.id for n in node.in_edges]}', f'positional args: {[n.id for n in node.args]}', f'keyword args: {[(n.id, k) for (n, k) in node.kwargs.items()]}', f'outgoing edges: {[n.id for n in node.out_edges]}', ''])
valid = False
return (valid, '\n'.join(errors))
|
class UnionFind(object):
'Union-find disjoint sets datastructure.\n\n Union-find is a data structure that maintains disjoint set\n (called connected components or components in short) membership,\n and makes it easier to merge (union) two components, and to find\n if two elements are connected (i.e., belong to the same\n component).\n\n This implements the "weighted-quick-union-with-path-compression"\n union-find algorithm. Only works if elements are immutable\n objects.\n\n Worst case for union and find: :math:`(N + M \\log^* N)`, with\n :math:`N` elements and :math:`M` unions. The function\n :math:`\\log^*` is the number of times needed to take :math:`\\log`\n of a number until reaching 1. In practice, the amortized cost of\n each operation is nearly linear [1]_.\n\n Terms\n -----\n Component\n Elements belonging to the same disjoint set\n\n Connected\n Two elements are connected if they belong to the same component.\n\n Union\n The operation where two components are merged into one.\n\n Root\n An internal representative of a disjoint set.\n\n Find\n The operation to find the root of a disjoint set.\n\n Parameters\n ----------\n elements : NoneType or container, optional, default: None\n The initial list of elements.\n\n Attributes\n ----------\n n_elts : int\n Number of elements.\n\n n_comps : int\n Number of distjoint sets or components.\n\n Implements\n ----------\n __len__\n Calling ``len(uf)`` (where ``uf`` is an instance of ``UnionFind``)\n returns the number of elements.\n\n __contains__\n For ``uf`` an instance of ``UnionFind`` and ``x`` an immutable object,\n ``x in uf`` returns ``True`` if ``x`` is an element in ``uf``.\n\n __getitem__\n For ``uf`` an instance of ``UnionFind`` and ``i`` an integer,\n ``res = uf[i]`` returns the element stored in the ``i``-th index.\n If ``i`` is not a valid index an ``IndexError`` is raised.\n\n __setitem__\n For ``uf`` and instance of ``UnionFind``, ``i`` an integer and ``x``\n an immutable object, ``uf[i] = x`` changes the element stored at the\n ``i``-th index. If ``i`` is not a valid index an ``IndexError`` is\n raised.\n\n .. [1] http://algs4.cs.princeton.edu/lectures/\n\n '
def __init__(self, elements: Optional[Iterable[Any]]=None):
self.n_elts = 0
self.n_comps = 0
self._next = 0
self._elts = []
self._indx = {}
self._par = []
self._siz = []
if (elements is None):
elements = []
for elt in elements:
self.add(elt)
def __repr__(self):
return '<UnionFind:\n\telts={},\n\tsiz={},\n\tpar={},\nn_elts={},n_comps={}>'.format(self._elts, self._siz, self._par, self.n_elts, self.n_comps)
def __len__(self):
return self.n_elts
def __contains__(self, x):
return (x in self._indx)
def __getitem__(self, index):
if ((index < 0) or (index >= self._next)):
raise IndexError('index {} is out of bound'.format(index))
return self._elts[index]
def __setitem__(self, index, x):
if ((index < 0) or (index >= self._next)):
raise IndexError('index {} is out of bound'.format(index))
self._elts[index] = x
def add(self, x):
'Add a single disjoint element.\n\n Parameters\n ----------\n x : immutable object\n\n Returns\n -------\n None\n\n '
if (x in self):
return
self._elts.append(x)
self._indx[x] = self._next
self._par.append(self._next)
self._siz.append(1)
self._next += 1
self.n_elts += 1
self.n_comps += 1
def find(self, x):
'Find the root of the disjoint set containing the given element.\n\n Parameters\n ----------\n x : immutable object\n\n Returns\n -------\n int\n The (index of the) root.\n\n Raises\n ------\n ValueError\n If the given element is not found.\n\n '
if (x not in self._indx):
raise ValueError('{} is not an element'.format(x))
p = self._indx[x]
while (p != self._par[p]):
q = self._par[p]
self._par[p] = self._par[q]
p = q
return p
def connected(self, x, y):
'Return whether the two given elements belong to the same component.\n\n Parameters\n ----------\n x : immutable object\n y : immutable object\n\n Returns\n -------\n bool\n True if x and y are connected, false otherwise.\n\n '
return (self.find(x) == self.find(y))
def union(self, x, y, smallest_new_root=False):
'Merge the components of the two given elements into one.\n\n Parameters\n ----------\n x : immutable object\n y : immutable object\n\n Returns\n -------\n None\n\n '
for elt in [x, y]:
if (elt not in self):
raise ValueError('{} is not an element'.format(x))
xroot = self.find(x)
yroot = self.find(y)
if (xroot == yroot):
return
if smallest_new_root:
if (self._siz[xroot] < self._siz[yroot]):
self._par[xroot] = yroot
self._siz[yroot] += self._siz[xroot]
else:
self._par[yroot] = xroot
self._siz[xroot] += self._siz[yroot]
else:
self._par[yroot] = xroot
self._siz[xroot] += self._siz[yroot]
self.n_comps -= 1
def component(self, x):
'Find the connected component containing the given element.\n\n Parameters\n ----------\n x : immutable object\n\n Returns\n -------\n set\n\n Raises\n ------\n ValueError\n If the given element is not found.\n\n '
if (x not in self):
raise ValueError('{} is not an element'.format(x))
elts = np.array(self._elts)
vfind = np.vectorize(self.find)
roots = vfind(elts)
return set(elts[(roots == self.find(x))])
def components(self):
'Return the list of connected components.\n\n Returns\n -------\n list\n A list of sets.\n\n '
elts = np.array(self._elts)
vfind = np.vectorize(self.find)
roots = vfind(elts)
distinct_roots = set(roots)
return [set(elts[(roots == root)]) for root in distinct_roots]
def sorted_components(self, *args, **kwargs):
elts = np.array(self._elts)
vfind = np.vectorize(self.find)
roots = vfind(elts)
distinct_roots = set(roots)
distinct_roots = sorted(distinct_roots, *args, **kwargs)
return [set(elts[(roots == root)]) for root in distinct_roots]
def component_mapping(self):
'Return a dict mapping elements to their components.\n\n The returned dict has the following semantics:\n\n `elt -> component containing elt`\n\n If x, y belong to the same component, the comp(x) and comp(y)\n are the same objects (i.e., share the same reference). Changing\n comp(x) will reflect in comp(y). This is done to reduce\n memory.\n\n But this behaviour should not be relied on. There may be\n inconsitency arising from such assumptions or lack thereof.\n\n If you want to do any operation on these sets, use caution.\n For example, instead of\n\n ::\n\n s = uf.component_mapping()[item]\n s.add(stuff)\n # This will have side effect in other sets\n\n do\n\n ::\n\n s = set(uf.component_mapping()[item]) # or\n s = uf.component_mapping()[item].copy()\n s.add(stuff)\n\n or\n\n ::\n\n s = uf.component_mapping()[item]\n s = s | {stuff} # Now s is different\n\n Returns\n -------\n dict\n A dict with the semantics: `elt -> component contianing elt`.\n\n '
elts = np.array(self._elts)
vfind = np.vectorize(self.find)
roots = vfind(elts)
distinct_roots = set(roots)
comps = {}
for root in distinct_roots:
mask = (roots == root)
comp = set(elts[mask])
comps.update({x: comp for x in comp})
return comps
def is_root(self, x):
idx = self._indx[x]
return (idx == self._par[idx])
|
def is_None(a):
return operator.is_(a, None)
|
def is_not_None(a):
return operator.is_not(a, None)
|
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]:
'\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n '
if (prefix is None):
prefix = type(module).__name__
for (name, sub_module) in module.named_children():
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
else:
if full:
(yield (sub_module, scope, module, False))
(yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.