code stringlengths 17 6.64M |
|---|
class Constraint(enum.Enum):
MEMORY = 1
TIME = 2
def __repr__(self) -> str:
return self.name
|
def initial_divide(graph: Graph, k: int, weights: Dict[(SimpleNode, float)]) -> Tuple[(int, ...)]:
random_topo_sort = random_Khan_algorithm(graph)
weights = np.asarray([weights[n] for n in random_topo_sort])
cumulative_weights = np.cumsum(weights)
total_weight = cumulative_weights[(- 1)]
avg_weight = (total_weight / k)
Vs = []
options = [math.floor(avg_weight), math.ceil(avg_weight)]
acc = 0
while (len(Vs) < (k - 1)):
stage_weight = options[random.randint(0, 1)]
acc += stage_weight
Vs.append(np.searchsorted(cumulative_weights, acc))
idxs = (([(- 1)] + Vs) + [(len(cumulative_weights) - 1)])
idxs = list(zip(map((lambda i: (i + 1)), idxs), idxs[1:]))
order = [n.id for n in random_topo_sort]
for (i, (start, end)) in enumerate(idxs):
for n in random_topo_sort[start:(end + 1)]:
n.stage_id = i
return tuple(order)
|
def random_Khan_algorithm(graph: Graph):
S = []
T = []
degs = dict()
nodes = list(graph.nodes)
random.shuffle(nodes)
for n in nodes:
if (len(n.in_edges) == 0):
S.append(n)
else:
degs[n] = len(n.in_edges)
while S:
idx = random.randint(0, (len(S) - 1))
n = S[idx]
del S[idx]
T.append(n)
for o in n.out_edges:
degs[o] -= 1
if (degs[o] == 0):
S.append(o)
assert (len(T) == len(nodes)), 'cycle detected'
return T
|
def simple_moves(constraint: Constraint, objective: Objective, stage_volumes: Dict[(int, float)], params_per_stage: Dict[(int, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)], node_weights: Dict[(SimpleNode, float)], params_per_node: Dict[(SimpleNode, float)], L_max: float, rounds: int):
connections = VerticeStageConnections(node_weights)
def update_function(v: SimpleNode, dst: int):
stage_volumes[v.stage_id] -= node_weights[v]
stage_volumes[dst] += node_weights[v]
params_per_stage[v.stage_id] -= params_per_node[v]
params_per_stage[dst] += params_per_node[v]
connections.move_node(v, dst)
v.stage_id = dst
satisfies_constraint = CONSTRAINTS[constraint]
gain_function = GAINS[objective]
state = PartitionState(stage_volumes, params_per_stage, node_weights, edge_weights, params_per_node, connections, L_max)
k = (len(stage_volumes) - 1)
nodes = list(node_weights.keys())
for _ in range(rounds):
changed = False
random.shuffle(nodes)
for n in nodes:
gain_left = (- np.inf)
if ((n.stage_id > 0) and (not connections.has_in_connection(n, n.stage_id)) and satisfies_constraint(n, (n.stage_id - 1), state)):
gain_left = gain_function(n, (n.stage_id - 1), state)
gain_right = (- np.inf)
if ((n.stage_id < k) and (not connections.has_out_connection(n, n.stage_id)) and satisfies_constraint(n, (n.stage_id + 1), state)):
gain_right = gain_function(n, (n.stage_id + 1), state)
moves = defaultdict(list)
moves[gain_left].append((n.stage_id - 1))
moves[gain_right].append((n.stage_id + 1))
max_gain = max(moves.keys())
if (max_gain < 0):
continue
changed = True
best_moves = moves[max_gain]
dst = random.sample(best_moves, 1)[0]
update_function(n, dst)
if (not changed):
break
|
def advanced_moves(constraint: Constraint, objective: Objective, stage_volumes: Dict[(int, float)], params_per_stage: Dict[(int, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)], node_weights: Dict[(SimpleNode, float)], params_per_node: Dict[(SimpleNode, float)], L_max: float, rounds: int):
connections = VerticeStageConnections(node_weights.keys())
def update_function(v: SimpleNode, dst: int):
stage_volumes[v.stage_id] -= node_weights[v]
stage_volumes[dst] += node_weights[v]
params_per_stage[v.stage_id] -= params_per_node[v]
params_per_stage[dst] += params_per_node[v]
connections.move_node(v, dst)
v.stage_id = dst
satisfies_constraint = CONSTRAINTS[constraint]
gain_function = GAINS[objective]
state = PartitionState(stage_volumes, params_per_stage, node_weights, edge_weights, params_per_node, connections, L_max)
nodes = list(node_weights.keys())
for _ in range(rounds):
changed = False
random.shuffle(nodes)
for n in nodes:
A = max((i.stage_id for i in n.in_edges), default=n.stage_id)
B = min((o.stage_id for o in n.out_edges), default=n.stage_id)
if (A == B):
continue
moves = defaultdict(list)
for dst in range(A, (B + 1)):
if (dst == n.stage_id):
continue
gain = (- np.inf)
if satisfies_constraint(n, dst, state):
gain = gain_function(n, dst, state)
moves[gain].append(dst)
max_gain = max(moves.keys())
if (max_gain < 0):
continue
changed = True
best_moves = moves[max_gain]
dst = random.sample(best_moves, 1)[0]
update_function(n, dst)
if (not changed):
break
|
def global_moves(constraint: Constraint, objective: Objective, stage_volumes: Dict[(int, float)], params_per_stage: Dict[(int, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)], node_weights: Dict[(SimpleNode, float)], params_per_node: Dict[(SimpleNode, float)], L_max: float, rounds: int):
connections = VerticeStageConnections(node_weights.keys())
quotient_graph = QuotientGraph(node_weights.keys())
def update_function(v: SimpleNode, dst: int):
stage_volumes[v.stage_id] -= node_weights[v]
stage_volumes[dst] += node_weights[v]
params_per_stage[v.stage_id] -= params_per_node[v]
params_per_stage[dst] += params_per_node[v]
connections.move_node(v, dst)
quotient_graph.move_node(v, dst)
satisfies_constraint = CONSTRAINTS[constraint]
gain_function = GAINS[objective]
state = PartitionState(stage_volumes, params_per_stage, node_weights, edge_weights, params_per_node, connections, L_max)
nodes = list(node_weights.keys())
for _ in range(rounds):
changed = False
random.shuffle(nodes)
for n in nodes:
moves = defaultdict(list)
for dst in stage_volumes.keys():
if (dst == n.stage_id):
continue
gain = (- np.inf)
if (satisfies_constraint(n, dst, state) and (not quotient_graph.move_creates_cycle(n, dst))):
gain = gain_function(n, dst, state)
moves[gain].append(dst)
max_gain = max(moves.keys())
if (max_gain < 0):
continue
changed = True
best_moves = moves[max_gain]
dst = random.sample(best_moves, 1)[0]
update_function(n, dst)
if (not changed):
break
|
def Fiduccia_Mattheyses_moves(constraint: Constraint, objective: Objective, stage_volumes: Dict[(int, float)], params_per_stage: Dict[(int, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)], node_weights: Dict[(SimpleNode, float)], params_per_node: Dict[(SimpleNode, float)], L_max: float, rounds: int):
connections = VerticeStageConnections(node_weights.keys())
partitions = {i: set() for i in stage_volumes}
for n in node_weights:
partitions[n.stage_id].add(n)
def update_function(v: SimpleNode, dst: int):
stage_volumes[v.stage_id] -= node_weights[v]
stage_volumes[dst] += node_weights[v]
partitions[v.stage_id].discard(v)
partitions[dst].add(v)
params_per_stage[v.stage_id] -= params_per_node[v]
params_per_stage[dst] += params_per_node[v]
connections.move_node(v, dst)
v.stage_id = dst
satisfies_constraint = CONSTRAINTS[constraint]
gain_function = GAINS[objective]
state = PartitionState(stage_volumes, params_per_stage, node_weights, edge_weights, params_per_node, connections, L_max)
if (objective is Objective.EDGE_CUT):
best_objective = calculate_edge_cut(edge_weights)
elif STAGE_TIME_MSE:
avg_compute = (sum(stage_volumes.values()) / len(stage_volumes))
best_objective = (sum((((t - avg_compute) ** 2) for t in stage_volumes.values())) / len(stage_volumes))
else:
best_objective = max(stage_volumes.values())
all_blocks = list(stage_volumes.keys())
for _ in range(rounds):
active_blocks = set(all_blocks)
while active_blocks:
(A, B) = random.sample(all_blocks, 2)
if (A > B):
(A, B) = (B, A)
if ((A == B) or (not ((A in active_blocks) or (B in active_blocks)))):
continue
active_blocks.discard(A)
active_blocks.discard(B)
candidate_moves = PriorityQueue()
for node in partitions[A]:
if all(((o.stage_id >= B) for o in node.out_edges)):
candidate_moves.push_task(gain_function(node, B, state), (node, B))
for node in partitions[B]:
if all(((i.stage_id <= A) for i in node.in_edges)):
candidate_moves.push_task(gain_function(node, A, state), (node, A))
locked_nodes = set()
moves_to_best = dict()
current_objective = best_objective
while (len(candidate_moves) > 0):
(node, dst) = candidate_moves.pop_task()
if (node in locked_nodes):
continue
elif (not satisfies_constraint(node, dst, state)):
continue
elif ((node.stage_id == A) and any(((o.stage_id < B) for o in node.out_edges))):
continue
elif ((node.stage_id == B) and any(((i.stage_id > A) for i in node.in_edges))):
continue
locked_nodes.add(node)
if (objective is Objective.EDGE_CUT):
current_objective -= gain_function(node, dst, state)
elif STAGE_TIME_MSE:
current_objective -= gain_function(node, dst, state)
src = node.stage_id
update_function(node, dst)
if ((objective is Objective.STAGE_TIME) and (not STAGE_TIME_MSE)):
current_objective = max(stage_volumes.values())
if (current_objective < best_objective):
best_objective = current_objective
moves_to_best.clear()
active_blocks.add(A)
active_blocks.add(B)
else:
moves_to_best[node] = src
if (src == A):
for i in node.in_edges:
if ((i.stage_id == A) and all(((o.stage_id >= B) for o in i.out_edges))):
gain = gain_function(i, B, state)
candidate_moves.push_task(gain, (i, B))
else:
for o in node.out_edges:
if ((o.stage_id == B) and all(((i.stage_id <= A) for i in o.in_edges))):
gain = gain_function(o, A, state)
candidate_moves.push_task(gain, (o, A))
for (n, dst) in moves_to_best.items():
update_function(n, dst)
current_objective = best_objective
|
class PartitionState(NamedTuple):
stage_volumes: Dict[(int, float)]
params_per_stage: Dict[(int, float)]
node_weights: Dict[(SimpleNode, float)]
edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)]
params_per_node: Dict[(SimpleNode, float)]
connections: VerticeStageConnections
L_max: float
|
def calculate_edge_gain(v: SimpleNode, dst: int, state: PartitionState) -> float:
edge_weights = state.edge_weights
gain = 0
comm_deltas = defaultdict((lambda : 0))
connections = state.connections
src = v.stage_id
for u in v.in_edges:
w = edge_weights[(u, v)]
if (u.stage_id == src):
if (not connections.has_out_connection(u, dst)):
gain -= w
comm_deltas[src] += w
comm_deltas[dst] += w
elif (u.stage_id == dst):
if (connections.out_connections(u, src) == 1):
gain += w
comm_deltas[src] -= w
comm_deltas[dst] -= w
else:
if (connections.out_connections(u, src) == 1):
gain += w
comm_deltas[u.stage_id] -= w
comm_deltas[src] -= w
if (not connections.has_out_connection(u, dst)):
gain -= w
comm_deltas[u.stage_id] += w
comm_deltas[dst] += w
visited = set()
for o in v.out_edges:
w = edge_weights[(v, o)]
if (o.stage_id in visited):
continue
visited.add(o.stage_id)
if (o.stage_id == src):
gain -= w
comm_deltas[src] += w
comm_deltas[dst] += w
elif (o.stage_id == dst):
gain += w
comm_deltas[src] -= w
comm_deltas[dst] -= w
else:
comm_deltas[src] -= w
comm_deltas[dst] += w
return gain
|
def calculate_edge_cut(edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)]) -> float:
edge_cut = 0
visited = set()
for ((u, v), w) in edge_weights.items():
if ((u.stage_id != v.stage_id) and ((u.id, v.stage_id) not in visited)):
visited.add((u.id, v.stage_id))
edge_cut += w
return edge_cut
|
def calculate_stage_time_gain(v: SimpleNode, dst: int, state: PartitionState, use_mse=STAGE_TIME_MSE) -> float:
node_weights = state.node_weights
volumes = state.stage_volumes
if (not use_mse):
assert (not STAGE_TIME_MSE)
prev_max = max(volumes[v.stage_id], volumes[dst])
new_max = max((volumes[v.stage_id] - node_weights[v]), (volumes[dst] + node_weights[v]))
gain = (prev_max - new_max)
else:
assert STAGE_TIME_MSE
avg_compute = (sum(volumes.values()) / len(volumes))
before_squared_distance = (((volumes[v.stage_id] - avg_compute) ** 2) + ((volumes[dst] - avg_compute) ** 2))
after_squared_distance = ((((volumes[v.stage_id] - node_weights[v]) - avg_compute) ** 2) + (((volumes[dst] + node_weights[v]) - avg_compute) ** 2))
gain = (before_squared_distance - after_squared_distance)
return gain
|
def calculate_stage_times(node_weights: Dict[(SimpleNode, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)], include_comm: bool=False) -> Dict[(int, float)]:
stage_times = defaultdict((lambda : 0))
for (n, w) in node_weights.items():
stage_times[n.stage_id] += w
if include_comm:
destinations = set()
for o in n.out_edges:
if ((o.stage_id == n.stage_id) or (o.stage_id in destinations)):
continue
e = edge_weights[(n, o)]
destinations.add(o.stage_id)
stage_times[n.stage_id] += e
stage_times[o.stage_id] += e
return dict(stage_times)
|
def caclculate_comm_per_stage(edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)]) -> Dict[(int, float)]:
comm_per_stage = defaultdict((lambda : 0))
visited = set()
for ((u, v), w) in edge_weights.items():
if ((u.stage_id != v.stage_id) and ((u.id, v.stage_id) not in visited)):
visited.add((u.id, v.stage_id))
comm_per_stage[u.stage_id] += w
comm_per_stage[v.stage_id] += w
return comm_per_stage
|
def calculate_params_per_node(model: Module, graph: Graph) -> Dict[(int, float)]:
layers = layerDict(model, graph.depth, graph.basic_blocks)
tensors = tensorDict(model)
params_per_node = dict()
for n in graph.nodes:
if (n.scope in layers):
params_per_node[n.id] = sum((t.numel() for t in layers[n.scope].parameters()))
elif ((n.value_type is Parameter) and (n.scope in tensors)):
params_per_node[n.id] = tensors[n.scope].numel()
else:
params_per_node[n.id] = 0
return params_per_node
|
def calculate_params_per_stage(params_per_node: Dict[(SimpleNode, float)]) -> Dict[(int, float)]:
params_per_stage = defaultdict((lambda : 0))
for (n, p) in params_per_node.items():
params_per_stage[n.stage_id] += p
return dict(params_per_stage)
|
def move_satisfies_time_constraint(v: SimpleNode, dst: int, state: PartitionState) -> bool:
node_weights = state.node_weights
volumes = state.stage_volumes
return ((volumes[dst] + node_weights[v]) < state.L_max)
|
def move_satisifies_memory_constraint(v: SimpleNode, dst: int, state: PartitionState) -> bool:
params_per_node = state.params_per_node
params_per_stage = state.params_per_stage
return ((params_per_stage[dst] + params_per_node[v]) < state.L_max)
|
def acyclic_partition(model: Module, graph: Graph, k: int, epsilon: float=0.1, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None, constraint: Constraint=Constraint.TIME, objective: Objective=Objective.EDGE_CUT, meta_algorithm: META_ALGORITH=META_ALGORITH.SINGLE_LEVEL, maximum_constraint_value: Optional[float]=None, rounds: int=10, allocated_seconds: int=20, use_layers_graph: bool=True) -> Graph:
if (node_weight_function is None):
warnings.warn('using dummy weight function')
node_weight_function = DefaultWeightFunction()
if (edge_weight_function is None):
warnings.warn('using dummy weight function')
edge_weight_function = DefaultEdgeWeightFunction()
if use_layers_graph:
(work_graph, lookup) = graph.new_graph_without_constants()
else:
work_graph = graph
params_per_node = calculate_params_per_node(model, work_graph)
worker_args = [dict(graph=work_graph.get_copy_without_parallel_edges().state(), params_per_node=params_per_node, k=k, meta_algorithm=meta_algorithm, algorithm=alg, epsilon=epsilon, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, rounds=rounds, allocated_seconds=allocated_seconds, objective=objective, constraint=constraint, maximum_constraint_value=maximum_constraint_value) for alg in ALGORITHM]
with Pool(len(worker_args)) as pool:
results = pool.map(worker, worker_args)
assert (len(results) == len(worker_args))
(best_solution, edge_cut, worst_case) = (None, np.inf, np.inf)
for (s, e, w) in results:
if is_better_solution((e, w), (edge_cut, worst_case), objective):
best_solution = s
edge_cut = e
worst_case = w
for n in work_graph.nodes:
n.stage_id = best_solution[n.id]
if use_layers_graph:
graph.induce_layer_partition(work_graph, lookup)
assert (graph.n_stages == k)
return graph
|
def worker(kwargs) -> Tuple[(Dict[(int, int)], float, float)]:
graph = Graph.from_state(kwargs.pop('graph'))
kwargs['graph'] = graph
meta_algorithm = kwargs.pop('meta_algorithm')
algorithm = kwargs['algorithm']
allocated_seconds = kwargs.pop('allocated_seconds')
objective = kwargs['objective']
(best_solution, edge_cut, worst_case) = (None, np.inf, np.inf)
nwf = kwargs.pop('node_weight_function')
ewf = kwargs.pop('edge_weight_function')
node_weights = dict()
edge_weights = dict()
params_per_node = dict(kwargs['params_per_node'])
for u in graph.nodes:
node_weights[u] = nwf(u)
params_per_node[u] = params_per_node.pop(u.id)
for o in u.out_edges:
edge_weights[(u, o)] = ewf(u, o)
kwargs['params_per_node'] = params_per_node
kwargs['node_weights'] = node_weights
kwargs['edge_weights'] = edge_weights
start = time.time()
steps = 0
while ((time.time() - start) < allocated_seconds):
seed = int.from_bytes(os.urandom(4), byteorder='little')
random.seed(seed)
if (meta_algorithm is META_ALGORITH.SINGLE_LEVEL):
(solution, solution_edge_cut, solution_worst_case) = single_level_partitioning(**kwargs)
else:
(solution, solution_edge_cut, solution_worst_case) = multilevel_partitioning(**kwargs)
if is_better_solution((solution_edge_cut, solution_worst_case), (edge_cut, worst_case), objective):
best_solution = solution
edge_cut = solution_edge_cut
worst_case = solution_worst_case
steps += 1
return (best_solution, edge_cut, worst_case)
|
def is_better_solution(solution: Tuple[(float, float)], best_solution: Tuple[(float, float)], objective: Objective) -> bool:
(solution_edge_cut, solution_worst_case) = solution
(best_edge_cut, best_worst_case) = best_solution
better_edge_cut = (solution_edge_cut < best_edge_cut)
better_worst_case = (solution_worst_case < best_worst_case)
if (objective is Objective.EDGE_CUT):
return (better_edge_cut or ((solution_edge_cut == best_edge_cut) and better_worst_case))
return (better_worst_case or ((solution_worst_case == best_worst_case) and better_edge_cut))
|
def single_level_partitioning(graph: Graph, node_weights: Dict[(SimpleNode, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)], params_per_node: Dict[(SimpleNode, float)], algorithm: ALGORITHM, k: int, epsilon: float, constraint: Constraint, maximum_constraint_value: Optional[float], objective: Objective, rounds: int) -> Tuple[(Dict[(int, int)], float, float)]:
if (constraint is Constraint.TIME):
constraint_weights = node_weights
else:
constraint_weights = params_per_node
initial_divide(graph, k, constraint_weights)
stage_volumes = calculate_stage_times(node_weights, edge_weights, include_comm=False)
params_per_stage = calculate_params_per_stage(params_per_node)
if (constraint is Constraint.TIME):
constraint_per_stage = stage_volumes
else:
constraint_per_stage = params_per_stage
avg_constraint_value = (sum(constraint_per_stage.values()) / k)
if (maximum_constraint_value is None):
L_max = ((1 + epsilon) * math.ceil((sum(constraint_per_stage.values()) / k)))
else:
L_max = maximum_constraint_value
msg = '\n'.join([f'-I- partitioning with {constraint.name} constraint is not possible', f' max allowed stage constraint: {L_max:.2f}', f' average constraint value: {avg_constraint_value:.2f}'])
assert (avg_constraint_value < L_max), msg
HEURISTICS[algorithm](constraint, objective, stage_volumes, params_per_stage, edge_weights, node_weights, params_per_node, L_max, rounds)
global_moves(constraint, objective, stage_volumes, params_per_stage, edge_weights, node_weights, params_per_node, L_max, rounds=1)
edge_cut = calculate_edge_cut(edge_weights)
if (objective is Objective.STAGE_TIME):
stage_volumes = calculate_stage_times(node_weights, edge_weights, include_comm=True)
return ({n.id: n.stage_id for n in graph.nodes}, edge_cut, max(stage_volumes.values()))
|
def multilevel_partitioning(graph: Graph, node_weights: Dict[(SimpleNode, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)], params_per_node: Dict[(SimpleNode, float)], algorithm: ALGORITHM, k: int, epsilon: float, constraint: Constraint, maximum_constraint_value: Optional[float], objective: Objective, rounds: int) -> Tuple[(Dict[(int, int)], float, float)]:
single_level_partitioning(graph, params_per_node=params_per_node, node_weights=node_weights, edge_weights=edge_weights, algorithm=algorithm, k=k, epsilon=epsilon, constraint=constraint, maximum_constraint_value=maximum_constraint_value, objective=objective, rounds=rounds)
stage_volumes = calculate_stage_times(node_weights, edge_weights, include_comm=False)
params_per_stage = calculate_params_per_stage(params_per_node)
if (constraint is Constraint.TIME):
constraint_per_stage = stage_volumes
else:
constraint_per_stage = params_per_stage
if (maximum_constraint_value is None):
L_max = ((1 + epsilon) * math.ceil((sum(constraint_per_stage.values()) / k)))
else:
L_max = maximum_constraint_value
hierarchy = coarsening(graph, node_weights, edge_weights, params_per_node)
for (fine_graph, matching, coarse_graph) in reversed(hierarchy):
HEURISTICS[algorithm](constraint, objective, stage_volumes, params_per_stage, coarse_graph._edge_weights, coarse_graph._node_weights, coarse_graph._params_per_node, L_max, rounds)
refine(fine_graph, coarse_graph, matching)
root = hierarchy[0][0]
for i in range(len(graph)):
graph[i].stage_id = root[i].stage_id
edge_cut = calculate_edge_cut(edge_weights)
if (objective is Objective.STAGE_TIME):
stage_volumes = calculate_stage_times(node_weights, edge_weights, include_comm=True)
return ({n.id: n.stage_id for n in graph.nodes}, edge_cut, max(stage_volumes.values()))
|
class DefaultWeightFunction():
def __call__(self, u: SimpleNode) -> float:
return 1
|
class DefaultEdgeWeightFunction():
def __call__(self, u: SimpleNode, v: SimpleNode) -> float:
return 1
|
def build_dot(node, edge_weights):
'\n return a graphviz representation of the graph\n Parameters\n ----------\n '
theme = {'background_color': '#FFFFFF', 'fill_color': '#E8E8E8', 'outline_color': '#000000', 'font_color': '#000000', 'font_name': 'Times', 'font_size': '10', 'margin': '0,0', 'padding': '1.0,0.5'}
from graphviz import Digraph
dot = Digraph()
dot.attr('graph', concentrate='true', bgcolor=theme['background_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'], margin=theme['margin'], rankdir='TB', pad=theme['padding'])
dot.attr('node', shape='box', style='filled', margin='0,0', fillcolor=theme['fill_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
dot.attr('edge', style='solid', color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
colors = {0: 'grey', 1: 'green', 2: 'red', 3: 'yellow', 4: 'orange', 5: 'brown', 6: 'purple', 7: 'pink', 8: 'cyan', 9: 'gold', 10: 'darkolivegreen', 11: 'seagreen', 12: 'thistle', 13: 'plum', 14: 'deeppink', 15: 'lightyellow', 16: 'tan'}
dot.node(str(node.id), label=f'Node:{node.id}', fillcolor=colors[node.stage_id])
for i in node.in_edges:
dot.node(str(i.id), label=f'Node:{i.id}', fillcolor=colors[i.stage_id])
dot.edge(str(i.id), str(node.id), label=str(edge_weights[(i, node)]))
for o in node.out_edges:
dot.node(str(o.id), label=f'Node:{o.id}', fillcolor=colors[o.stage_id])
dot.edge(str(node.id), str(o.id), label=str(edge_weights[(node, o)]))
return dot
|
def show_move(node, edge_weights, file_name):
dot = build_dot(node, edge_weights)
dot.format = 'pdf'
if os.path.exists(f'./{file_name}.pdf'):
os.remove(f'./{file_name}.pdf')
dot.render(file_name, directory='.', cleanup=True)
|
class PriorityQueue():
def __init__(self):
self.heap = []
def push_task(self, gain: float, task: Any):
tie_braker = random.randint(0, (2 ** 32))
priority = ((- gain), (- tie_braker))
heapq.heappush(self.heap, (priority, task))
def pop_task(self) -> Any:
(priority, task) = heapq.heappop(self.heap)
return task
def __len__(self) -> int:
return len(self.heap)
def __bool__(self) -> bool:
return (len(self) > 0)
|
class PartitionNode():
' PartitionNode is a collection of graph nodes allocated to the same partition\n an edge exists between PartitionNodes iff they there are edges between the underlying graph nodes\n '
def __init__(self, nodes: Iterable[Node], idx: int):
self.nodes: Set[Node] = set(nodes)
self._out_edges = defaultdict((lambda : 0))
self._in_edges = defaultdict((lambda : 0))
self.id = idx
for n in self.nodes:
for i in n.in_edges:
self._in_edges[i.stage_id] += 1
for o in n.out_edges:
self._out_edges[o.stage_id] += 1
self._out_edges.pop(self.id, None)
self._in_edges.pop(self.id, None)
@property
def in_edges(self) -> List[int]:
return [i for (i, n) in self._in_edges.items() if (n > 0)]
@property
def out_edges(self) -> List[int]:
return [i for (i, n) in self._out_edges.items() if (n > 0)]
def __contains__(self, key) -> bool:
return (key in self.nodes)
def __iter__(self) -> Iterator[Node]:
return iter(self.nodes)
def __len__(self) -> int:
return len(self.nodes)
def add_in_edge(self, src: int):
self._in_edges[src] += 1
def add_out_edge(self, dst: int):
self._out_edges[dst] += 1
def remove_in_edge(self, src: int):
self._in_edges[src] -= 1
def remove_out_edge(self, dst: int):
self._out_edges[dst] -= 1
def add_node(self, node: Node):
self.nodes.add(node)
def remove_node(self, node: Node):
self.nodes.discard(node)
|
class QuotientGraph():
def __init__(self, nodes: Iterable[Node]):
groups = defaultdict(list)
for n in nodes:
groups[n.stage_id].append(n)
self._nodes: Dict[(int, PartitionNode)] = {idx: PartitionNode(group, idx) for (idx, group) in groups.items()}
@property
def n_stages(self) -> int:
return len({n.stage_id for n in self.nodes})
def __getitem__(self, idx: int) -> PartitionNode:
return self._nodes[idx]
def move_node(self, node: Node, dst: int):
assert (node.stage_id != dst)
src = node.stage_id
src_part = self[src]
dst_part = self[dst]
src_part.remove_node(node)
dst_part.add_node(node)
node.stage_id = dst
for i in node.in_edges:
i_part = self[i.stage_id]
src_part.remove_in_edge(i.stage_id)
i_part.remove_out_edge(src)
i_part.add_out_edge(dst)
dst_part.add_in_edge(i.stage_id)
for o in node.out_edges:
o_part = self[o.stage_id]
src_part.remove_out_edge(o.stage_id)
o_part.remove_in_edge(src)
o_part.add_in_edge(dst)
self[dst].add_out_edge(o.stage_id)
for p in self.nodes:
p._in_edges.pop(p.id, None)
p._out_edges.pop(p.id, None)
def move_creates_cycle(self, node: Node, dest: int) -> bool:
orig_part = node.stage_id
self.move_node(node, dest)
creates_cycle = self.has_cycles()
self.move_node(node, orig_part)
return creates_cycle
@property
def nodes(self) -> Iterable[PartitionNode]:
return self._nodes.values()
def has_cycles(self) -> bool:
S = []
T = []
degs = dict()
for n in self.nodes:
assert isinstance(n, PartitionNode)
if (len(n.in_edges) == 0):
S.append(n.id)
else:
degs[n] = len(n.in_edges)
while S:
n = self._nodes[S.pop()]
assert isinstance(n, PartitionNode)
T.append(n)
for o in n.out_edges:
out = self._nodes[o]
assert isinstance(out, PartitionNode)
degs[out] -= 1
if (degs[out] == 0):
S.append(o)
return (len(T) < len(self.nodes))
def build_dot(self):
'\n return a graphviz representation of the graph\n Parameters\n ----------\n '
theme = {'background_color': '#FFFFFF', 'fill_color': '#E8E8E8', 'outline_color': '#000000', 'font_color': '#000000', 'font_name': 'Times', 'font_size': '10', 'margin': '0,0', 'padding': '1.0,0.5'}
from graphviz import Digraph
dot = Digraph()
dot.attr('graph', concentrate='true', bgcolor=theme['background_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'], margin=theme['margin'], rankdir='TB', pad=theme['padding'])
dot.attr('node', shape='box', style='filled', margin='0,0', fillcolor=theme['fill_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
dot.attr('edge', style='solid', color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
colors = {0: 'grey', 1: 'green', 2: 'red', 3: 'yellow', 4: 'orange', 5: 'brown', 6: 'purple', 7: 'pink', 8: 'cyan', 9: 'gold', 10: 'darkolivegreen', 11: 'seagreen', 12: 'thistle', 13: 'plum', 14: 'deeppink', 15: 'lightyellow', 16: 'tan'}
for node in self.nodes:
dot.node(str(node.id), label=f'partition:{node.id}', fillcolor=colors[node.id])
for i in node.in_edges:
dot.edge(str(i), str(node.id))
return dot
def save_as_pdf(self, file_name: str, directory: str):
'\n save the rendered graph to a pdf file\n\n Parameters\n ----------\n file_name:\n the name of the saved file\n directory:\n directory to store the file in\n '
dot = self.build_dot()
dot.format = 'pdf'
import os
if os.path.exists(f'{directory}/{file_name}.pdf'):
os.remove(f'{directory}/{file_name}.pdf')
dot.render(file_name, directory=directory, cleanup=True)
return self
def print_stats(self, node_weights: Dict[(Node, float)], edge_weights: Dict[(Tuple[(Node, Node)], float)]):
volumes = defaultdict((lambda : 0))
edge_cut = 0
number_of_cutting_edges = 0
for partition in self.nodes:
for n in partition:
volumes[partition.id] += node_weights[n]
for o in n.out_edges:
if (n.stage_id != o.stage_id):
if (edge_weights[(n, o)] >= 1000):
print(f'{n.id}=>{o.id}')
print(f'{n.stage_id}=>{o.stage_id}')
print(f'{n.value_type}')
print(f'''weight:{edge_weights[(n, o)]:.2f}
''')
edge_cut += edge_weights[(n, o)]
number_of_cutting_edges += 1
total_volume = sum(volumes.values())
avg_volume = (total_volume / len(volumes))
print(f'total number of nodes: {len(node_weights)}')
print(f'total number of edges: {len(edge_weights)}')
print(f'total weight: {total_volume:.2f}')
print(f'avg weight: {avg_volume:.2f}')
print(f'number of cutting edges: {number_of_cutting_edges}')
print(f'edge cut: {edge_cut:.2f}')
print('partition stats:')
for i in range(len(volumes)):
print(f' partition {i}')
print(f' number of nodes {len(self._nodes[i])}')
print(f''' partition volume: {volumes[i]:.2f}
''')
def selfcheck(self):
visited = set()
for (idx, n) in self._nodes.items():
assert (idx == n.id)
for u in n.nodes:
assert (u.stage_id == idx)
assert (u not in visited)
visited.add(u)
for (i, v) in n._in_edges.items():
assert (v >= 0), (idx, i, v)
assert (idx not in n._in_edges)
for i in n.in_edges:
assert (idx in self._nodes[i].out_edges), (idx, i)
for (o, v) in n._out_edges.items():
assert (v >= 0), (idx, o, v)
assert (idx not in n._out_edges)
for o in n.out_edges:
assert (idx in self._nodes[o].in_edges), (idx, o)
assert (not self.has_cycles())
|
class VerticeStageConnections():
def __init__(self, nodes):
self._in_connections = dict()
self._out_connections = dict()
for n in nodes:
self._in_connections[n] = defaultdict((lambda : 0))
self._out_connections[n] = defaultdict((lambda : 0))
for n in nodes:
for u in n.in_edges:
self._in_connections[n][u.stage_id] += 1
self._out_connections[u][n.stage_id] += 1
def add_in_connection(self, n, src: int):
self._in_connections[n][src] += 1
def add_out_connection(self, n, dest: int):
self._out_connections[n][dest] += 1
def remove_in_connection(self, n, src: int):
self._in_connections[n][src] -= 1
def remove_out_connection(self, n, dest: int):
self._out_connections[n][dest] -= 1
def has_in_connection(self, n, src: int) -> bool:
return (self._in_connections[n][src] > 0)
def has_out_connection(self, n, dest: int) -> bool:
return (self._out_connections[n][dest] > 0)
def in_connections(self, n, src: int) -> int:
return self._in_connections[n][src]
def out_connections(self, n, dst: int) -> int:
return self._out_connections[n][dst]
def move_node(self, n, dest: int):
for u in n.in_edges:
self.remove_out_connection(u, n.stage_id)
self.add_out_connection(u, dest)
for o in n.out_edges:
self.remove_in_connection(o, n.stage_id)
self.add_in_connection(o, dest)
|
class Path():
def __init__(self, v):
self.start = self.end = v
self.length = 0
self.active = True
def is_cycle(self) -> bool:
return ((self.start is self.end) and (self.length > 0))
|
class PathSet():
def __init__(self, graph_nodes: Iterable[Node]):
self.paths = {v: Path(v) for v in graph_nodes}
self.next: Dict[(Node, Node)] = {v: v for v in graph_nodes}
self.prev: Dict[(Node, Node)] = {v: v for v in graph_nodes}
self.next_edge: Dict[(Node, Optional[Tuple[(Node, Node)]])] = {v: None for v in graph_nodes}
self.prev_edge: Dict[(Node, Optional[Tuple[(Node, Node)]])] = {v: None for v in graph_nodes}
self.n_active_paths = len(self.paths)
def is_endpoint(self, v: Node) -> bool:
return ((self.next[v] is v) or (self.prev[v] is v))
def next_vertex(self, v: Node) -> Node:
return self.next[v]
def prev_vertex(self, v: Node) -> Node:
return self.prev[v]
def edge_to_next(self, v: Node) -> Optional[Tuple[(Node, Node)]]:
return self.next_edge[v]
def edge_to_prev(self, v: Node) -> Optional[Tuple[(Node, Node)]]:
return self.prev_edge[v]
def add_if_eligible(self, edge: Tuple[(Node, Node)]) -> bool:
(src, dst) = edge
src_path = self.paths[src]
dst_path = self.paths[dst]
assert (src is not dst)
if (src.stage_id != dst.stage_id):
return False
if (not (self.is_endpoint(src) and self.is_endpoint(dst))):
return False
assert (src_path.active and dst_path.active)
if (src_path.is_cycle() or dst_path.is_cycle()):
return False
if (src_path is not dst_path):
self.n_active_paths -= 1
src_path.length += (dst_path.length + 1)
if ((src_path.start is src) and (dst_path.start is dst)):
self.paths[dst_path.end] = src_path
src_path.start = dst_path.end
elif ((src_path.start is src) and (dst_path.end is dst)):
self.paths[dst_path.start] = src_path
src_path.start = dst_path.start
elif ((src_path.end is src) and (dst_path.start is dst)):
self.paths[dst_path.end] = src_path
src_path.end = dst_path.end
elif ((src_path.end is src) and (dst_path.end is dst)):
self.paths[dst_path.start] = src_path
src_path.end = dst_path.start
if (self.next[src] is src):
assert (self.next_edge[src] is None)
self.next[src] = dst
self.next_edge[src] = edge
else:
assert (self.prev_edge[src] is None)
self.prev[src] = dst
self.prev_edge[src] = edge
if (self.next[dst] is dst):
assert (self.next_edge[dst] is None)
self.next[dst] = src
self.next_edge[dst] = edge
else:
assert (self.prev_edge[dst] is None)
self.prev[dst] = src
self.prev_edge[dst] = edge
dst_path.active = False
elif ((src_path.length % 2) == 1):
src_path.length += 1
if (self.next[src_path.start] is src_path.start):
self.next[src_path.start] = src_path.end
self.next_edge[src_path.start] = edge
else:
self.prev[src_path.start] = src_path.end
self.prev_edge[src_path.start] = edge
if (self.next[src_path.end] is src_path.end):
self.next[src_path.end] = src_path.start
self.next_edge[src_path.end] = edge
else:
self.prev[src_path.end] = src_path.start
self.prev_edge[src_path.end] = edge
src_path.end = src_path.start
return True
return False
def active_paths(self) -> Set[Path]:
paths = [p for p in self.paths.values() if p.active]
return set(paths)
|
class SimpleNode():
def __init__(self, idx, stage_id):
self.id = idx
self.in_edges = set()
self.out_edges = set()
self.stage_id = stage_id
def add_in_edge(self, node):
self.in_edges.add(node)
def add_out_edge(self, node):
self.out_edges.add(node)
|
class ContractedGraph():
def __init__(self, in_edges, partition, node_weights, edge_weights, params_per_node, matching):
self._nodes: Dict[(int, SimpleNode)] = dict()
for n in set(matching.values()):
self._nodes[n] = SimpleNode(n, partition[n])
self._node_weights = defaultdict((lambda : 0))
self._edge_weights = defaultdict((lambda : 0))
self._params_per_node = defaultdict((lambda : 0))
for n in node_weights.keys():
matched = matching[n]
self._node_weights[self._nodes[matched]] += node_weights[n]
self._params_per_node[self._nodes[matched]] += params_per_node[n]
for i in in_edges[n]:
matched_i = matching[i]
if (matched_i == matched):
continue
self._nodes[matched].add_in_edge(self._nodes[matched_i])
self._nodes[matched_i].add_out_edge(self._nodes[matched])
self._edge_weights[(self._nodes[matched_i], self._nodes[matched])] += edge_weights[(i, n)]
@property
def n_stages(self) -> int:
return len({n.stage_id for n in self.nodes})
def __len__(self) -> int:
return len(self._nodes)
def __getitem__(self, idx) -> SimpleNode:
return self._nodes[idx]
def node_weight(self, n) -> float:
return self._node_weights[n]
def edge_weight(self, u, v) -> float:
return self._edge_weights[(u, v)]
def params_per_node(self, n) -> float:
return self._params_per_node[n]
@property
def nodes(self) -> Iterable[SimpleNode]:
return self._nodes.values()
def selfcheck(self) -> 'ContractedGraph':
try:
for (idx, n) in self._nodes.items():
assert (n.id == idx)
assert (n in self._node_weights)
for u in n.in_edges:
assert (n.stage_id >= u.stage_id)
assert (n in u.out_edges)
assert ((u, n) in self._edge_weights)
assert (u in self._node_weights)
assert (u.id in self._nodes)
for o in n.out_edges:
assert (o.stage_id >= n.stage_id)
assert (n in o.in_edges)
assert ((n, o) in self._edge_weights)
assert (o in self._node_weights)
assert (o.id in self._nodes)
return self
except AssertionError as e:
self.save_as_pdf('selfcheck_error', '.')
raise e
@classmethod
def contract(cls, contracted_graph, matching) -> 'ContractedGraph':
in_edges = dict()
partition = dict()
node_weights = dict()
edge_weights = dict()
params_per_node = dict()
for n in contracted_graph.nodes:
node_weights[n.id] = contracted_graph.node_weight(n)
params_per_node[n.id] = contracted_graph.params_per_node(n)
partition[n.id] = n.stage_id
us = set()
for u in n.in_edges:
us.add(u.id)
edge_weights[(u.id, n.id)] = contracted_graph.edge_weight(u, n)
in_edges[n.id] = us
return cls(in_edges, partition, node_weights, edge_weights, params_per_node, matching)
@classmethod
def from_Graph(cls, graph: Graph, node_weights, edge_weights, params_per_node) -> 'ContractedGraph':
node_weights = {n.id: w for (n, w) in node_weights.items()}
edge_weights = {(u.id, v.id): w for ((u, v), w) in edge_weights.items()}
params_per_node = {n.id: p for (n, p) in params_per_node.items()}
in_edges = dict()
partition = dict()
for n in graph.nodes:
in_edges[n.id] = {u.id for u in n.in_edges}
partition[n.id] = n.stage_id
initial_matching = {n: n for n in node_weights}
return cls(in_edges, partition, node_weights, edge_weights, params_per_node, initial_matching)
def build_dot(self):
'\n return a graphviz representation of the graph\n Parameters\n ----------\n '
theme = {'background_color': '#FFFFFF', 'fill_color': '#E8E8E8', 'outline_color': '#000000', 'font_color': '#000000', 'font_name': 'Times', 'font_size': '10', 'margin': '0,0', 'padding': '1.0,0.5'}
from graphviz import Digraph
dot = Digraph()
dot.attr('graph', concentrate='true', bgcolor=theme['background_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'], margin=theme['margin'], rankdir='TB', pad=theme['padding'])
dot.attr('node', shape='box', style='filled', margin='0,0', fillcolor=theme['fill_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
dot.attr('edge', style='solid', color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
colors = {0: 'grey', 1: 'green', 2: 'red', 3: 'yellow', 4: 'orange', 5: 'brown', 6: 'purple', 7: 'pink', 8: 'cyan', 9: 'gold', 10: 'darkolivegreen', 11: 'seagreen', 12: 'thistle', 13: 'plum', 14: 'deeppink', 15: 'lightyellow', 16: 'tan'}
for node in self._nodes.values():
dot.node(str(node.id), label=f'''Node:{node.id}
weight:{self.node_weight(node)}''', fillcolor=colors[node.stage_id])
for i in node.in_edges:
dot.edge(str(i.id), str(node.id), label=f'weight:{self.edge_weight(i, node)}')
return dot
def save_as_pdf(self, file_name: str, directory: str):
'\n save the rendered graph to a pdf file\n\n Parameters\n ----------\n file_name:\n the name of the saved file\n directory:\n directory to store the file in\n '
dot = self.build_dot()
dot.format = 'pdf'
import os
if os.path.exists(f'{directory}/{file_name}.pdf'):
os.remove(f'{directory}/{file_name}.pdf')
dot.render(file_name, directory=directory, cleanup=True)
return self
|
def coarsening(graph: Graph, node_weights: Dict[(SimpleNode, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)], params_per_node: Dict[(SimpleNode, float)]) -> List[Tuple[(ContractedGraph, Dict[(int, int)], ContractedGraph)]]:
g = ContractedGraph.from_Graph(graph, node_weights, edge_weights, params_per_node)
n = len(g)
hierarchy = []
p = g
while True:
(matching, _) = find_max_matching(g._node_weights, g._edge_weights)
g = g.contract(g, matching)
if (len(g) == n):
break
hierarchy.append((p, matching, g))
n = len(g)
p = g
return hierarchy
|
def refine(fine_graph: ContractedGraph, coarse_graph: ContractedGraph, matching: Dict[(int, int)]):
for n in fine_graph.nodes:
n.stage_id = coarse_graph[matching[n.id]].stage_id
|
def find_max_matching(node_weights: Dict[(SimpleNode, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)]) -> Tuple[(Dict[(int, int)], float)]:
edges = list(edge_weights.keys())
edge_ratings = {e: edge_rating(e[0], e[1], edge_weights, node_weights) for e in edges}
random.shuffle(edges)
edges = sorted(edges, key=(lambda e: edge_ratings[e]), reverse=True)
pathset = PathSet(node_weights.keys())
for edge in edges:
pathset.add_if_eligible(edge)
max_match = []
max_match_weight = 0
for node in node_weights.keys():
path = pathset.paths[node]
if (not path.active):
continue
if (path.end is not node):
continue
if (path.length == 0):
continue
if path.is_cycle():
unpacked_cycle = unpack_path(pathset, path)
first_edge = unpacked_cycle.pop(0)
(match_a, match_a_weight) = max_path_matching(unpacked_cycle, edge_ratings)
unpacked_cycle.insert(0, first_edge)
last_edge = unpacked_cycle.pop()
(match_b, match_b_weight) = max_path_matching(unpacked_cycle, edge_ratings)
unpacked_cycle.append(last_edge)
if (match_a_weight > match_b_weight):
match = match_a
match_weight = match_a_weight
else:
match = match_b
match_weight = match_b_weight
elif (path.length == 1):
if (pathset.next_vertex(path.end) is path.start):
edge = pathset.edge_to_next(path.end)
else:
edge = pathset.edge_to_prev(path.end)
assert (pathset.next_vertex(path.end) is path.start)
(match, match_weight) = ([edge], edge_ratings[edge])
else:
unpacked_path = unpack_path(pathset, path)
(match, match_weight) = max_path_matching(unpacked_path, edge_ratings)
max_match.extend(match)
max_match_weight += match_weight
matching = {u.id: v.id for (u, v) in max_match}
for n in node_weights.keys():
if (n.id not in matching):
matching[n.id] = n.id
return (matching, max_match_weight)
|
def max_path_matching(unpacked_path: List[Tuple[(Node, Node)]], edge_ratings: Dict[(Tuple[(Node, Node)], float)]) -> Tuple[(List[Tuple[(Node, Node)]], float)]:
k = len(unpacked_path)
if (k == 1):
return (list(unpacked_path), edge_ratings[unpacked_path[0]])
ratings = ([0] * k)
decision = ([False] * k)
ratings[0] = edge_ratings[unpacked_path[0]]
ratings[1] = edge_ratings[unpacked_path[1]]
decision[0] = True
if (ratings[0] < ratings[1]):
decision[1] = True
for i in range(2, k):
cur_w = edge_ratings[unpacked_path[i]]
if ((cur_w + ratings[(i - 2)]) > ratings[(i - 1)]):
decision[i] = True
ratings[i] = (cur_w + ratings[(i - 2)])
else:
decision[i] = False
ratings[i] = ratings[(i - 1)]
if decision[(- 1)]:
match_weight = ratings[(- 1)]
else:
match_weight = ratings[(- 2)]
match = []
i = (k - 1)
while (i >= 0):
if decision[i]:
match.append(unpacked_path[i])
i -= 2
else:
i -= 1
return (match, match_weight)
|
def unpack_path(pathset: PathSet, path: Path) -> List[Tuple[(Node, Node)]]:
assert path.active
head = path.start
prev = path.end
next_v = None
current = prev
unpacked_path = []
if (prev is head):
current = pathset.next_vertex(prev)
unpacked_path.append(pathset.edge_to_next(prev))
while (current is not head):
if (pathset.next_vertex(current) is prev):
next_v = pathset.prev_vertex(current)
unpacked_path.append(pathset.edge_to_prev(current))
else:
next_v = pathset.next_vertex(current)
unpacked_path.append(pathset.edge_to_next(current))
(prev, current) = (current, next_v)
return unpacked_path
|
def edge_rating(u: Node, v: Node, edge_weights: Dict[(Tuple[(Node, Node)], float)], node_weights: Dict[(Node, float)]) -> float:
return ((edge_weights[(u, v)] ** 2) / (1 + (node_weights[u] * node_weights[v])))
|
def visualize_matching(nodes, matching, file_name: str, directory: str):
'\n return a graphviz representation of the graph\n Parameters\n ----------\n '
theme = {'background_color': '#FFFFFF', 'fill_color': '#E8E8E8', 'outline_color': '#000000', 'font_color': '#000000', 'font_name': 'Times', 'font_size': '10', 'margin': '0,0', 'padding': '1.0,0.5'}
from graphviz import Digraph
dot = Digraph()
dot.attr('graph', concentrate='true', bgcolor=theme['background_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'], margin=theme['margin'], rankdir='TB', pad=theme['padding'])
dot.attr('node', shape='box', style='filled', margin='0,0', fillcolor=theme['fill_color'], color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
dot.attr('edge', style='solid', color=theme['outline_color'], fontsize=theme['font_size'], fontcolor=theme['font_color'], fontname=theme['font_name'])
partition_color = {0: 'grey', 1: 'green', 2: 'red', 3: 'yellow', 4: 'orange', 5: 'brown', 6: 'purple', 7: 'pink', 8: 'cyan', 9: 'gold', 10: 'darkolivegreen', 11: 'seagreen', 12: 'thistle', 13: 'plum', 14: 'deeppink', 15: 'lightyellow', 16: 'tan'}
def random_color():
return ('#' + ''.join([random.choice('0123456789ABCDEF') for j in range(6)]))
matching_colors = set()
while (len(matching_colors) < len(matching)):
c = random_color()
if (c not in partition_color):
matching_colors.add(c)
edge_colors = {(u, v): c for ((u, v), c) in zip(matching.items(), matching_colors)}
for node in nodes:
dot.node(str(node.id), label=node.scope, fillcolor=partition_color[node.stage_id])
for i in node.in_edges:
dot.edge(str(i.id), str(node.id), color=edge_colors.get((i.id, node.id), '#000000'))
dot.format = 'pdf'
import os
if os.path.exists(f'{directory}/{file_name}.pdf'):
os.remove(f'{directory}/{file_name}.pdf')
dot.render(file_name, directory=directory, cleanup=True)
|
def partition_and_match_weights_until_last_partition_is_with_no_recomputation(graph: Graph, weights: Dict[(Node, FullExecTimes)], partitioning_method, partition_profiled_graph_fn, n_runs_limit=10, do_exhustive_search_for_last_partition=True, max_memory_usage_r=None, max_memory_usage_nr=None):
print('-I- partition_and_match_weights_until_last_partition_is_with_no_recomputation')
warnings.warn('need to set max memory usage: currently doing this only for recomputation.')
if max_memory_usage_r:
for node in graph.nodes:
if (node.scope in max_memory_usage_r):
node.max_memory_bytes = max_memory_usage_r[node.scope]
saved_state = graph.state()
allowed_mistakes = 0
if (partitioning_method == 'ACYCLIC'):
allowed_mistakes += 2
last_partition_scopes = set()
current_mistakes = (allowed_mistakes + 1)
n_runs = 0
history = dict()
while ((current_mistakes > allowed_mistakes) and ((n_runs_limit < 0) or (n_runs < n_runs_limit))):
n_runs += 1
(current_mistakes, d, generated_last_stage_scopes, graph) = partition_and_check(Graph.from_state(saved_state), last_partition_scopes, partition_profiled_graph_fn, weights)
history[n_runs] = dict(last_partition_scopes=last_partition_scopes, generated_last_stage_scopes=generated_last_stage_scopes, d=d, graph_state=graph.state())
last_partition_scopes = generated_last_stage_scopes
print(f'run:{n_runs}', d)
if (not (current_mistakes > allowed_mistakes)):
print(f'Success! got {current_mistakes} mistakes after {n_runs} runs')
elif (not ((n_runs_limit < 0) or (n_runs < n_runs_limit))):
print(f'Breaking after reaching run limit of {n_runs_limit}!')
(current_mistakes, graph, mistakes_min) = restore_best_from_history(saved_state, history, partition_profiled_graph_fn, weights)
if (current_mistakes != mistakes_min):
warnings.warn(f'current_mistakes != mistakes_min, {current_mistakes} != {mistakes_min}')
if ((current_mistakes > 2) and do_exhustive_search_for_last_partition):
graph = exhustive_search_for_last_partition(saved_state, graph, history, n_runs, partition_profiled_graph_fn, weights, smallest_fp_with_zero_fp=True)
return graph
|
def exhustive_search_for_last_partition(saved_state, graph, history, n_runs, partition_profiled_graph_fn, weights, smallest_fp_with_zero_fp=False):
if smallest_fp_with_zero_fp:
cands = []
for (i, v) in history.items():
d = v['d']
if (d['fp'] > 0):
continue
cands.append((i, (d['fn'], (- d['correct']))))
best = cands[0]
for c in cands[1:]:
if (c[1] < best):
best = c
possible_scopes = set(history[best[0]]['generated_last_stage_scopes'])
else:
possible_scopes = set(history[1]['generated_last_stage_scopes'])
scope_to_id = {}
for n in graph.nodes:
if (n.scope in possible_scopes):
scope_to_id[n.scope] = n.id
topo_sorted_scopes = sorted(possible_scopes, key=(lambda x: scope_to_id[x]))
print("Guessing prev-option didn't converge,")
print('Doing exhaustive search over last stage IDs and taking best fit')
exhaustive_search_history = dict()
for i in range(len(topo_sorted_scopes)):
last_partition_scopes = topo_sorted_scopes[i:]
(current_mistakes, d, generated_last_stage_scopes, graph) = partition_and_check(Graph.from_state(saved_state), last_partition_scopes, partition_profiled_graph_fn, weights)
exhaustive_search_history[i] = dict(last_partition_scopes=last_partition_scopes, generated_last_stage_scopes=generated_last_stage_scopes, d=d, graph_state=graph.state())
print(f'final_countdown_iteration:{i}/{len(topo_sorted_scopes)}', d)
(current_mistakes, graph, mistakes_min) = restore_best_from_history(saved_state, exhaustive_search_history, partition_profiled_graph_fn, weights)
return graph
|
def restore_best_from_history(saved_state, history, partition_profiled_graph_fn, weights):
i_min = list(history.keys())[int(np.argmin([v['d']['mistakes'] for v in history.values()]))]
mistakes_min = history[i_min]['d']['mistakes']
print([history[i]['d']['mistakes'] for i in history])
print(f'Restoring best point in history')
print(f'Taking best seen: {mistakes_min} mistakes after {i_min} runs')
min_hist = history[i_min]
if ('graph_state' in min_hist):
graph_state = min_hist['graph_state']
current_mistakes = mistakes_min
graph = Graph.from_state(graph_state)
else:
print('Partitioning again to restore history')
warnings.warn('must start from clear state!')
last_partition_scopes = history[i_min]['last_partition_scopes']
(current_mistakes, d, generated_last_stage_scopes, graph) = partition_and_check(Graph.from_state(saved_state), last_partition_scopes, partition_profiled_graph_fn, weights)
return (current_mistakes, graph, mistakes_min)
|
def partition_and_check(graph, last_partition_scopes, partition_profiled_graph_fn, weights):
for n in graph.nodes:
if (n.scope in last_partition_scopes):
n.weight = weights[n.id].no_recomputation
else:
n.weight = weights[n.id].recomputation
graph = partition_profiled_graph_fn(graph)
last_p = max((n.stage_id for n in graph.nodes))
generated_last_stage_scopes = [n.scope for n in graph.nodes if (n.stage_id == last_p)]
A = set(last_partition_scopes)
B = set(generated_last_stage_scopes)
intersection = (A & B)
correct = len(intersection)
fp = (len(A) - correct)
fn = (len(B) - correct)
current_mistakes = (fp + fn)
d = dict(correct=correct, fp=fp, fn=fn, mistakes=current_mistakes)
return (current_mistakes, d, generated_last_stage_scopes, graph)
|
def get_weight_functions(args, verbose=True):
MULT_FACTOR = args.weight_mult_factor
if args.auto_infer_node_bwd_to_fwd_ratio:
node = NodeWeightFunctionWithRatioAutoInfer(MULT_FACTOR=MULT_FACTOR)
else:
node = NodeWeightFunction(bwd_to_fwd_ratio=args.bwd_to_fwd_ratio, MULT_FACTOR=MULT_FACTOR)
warnings.warn('Modeling Communications of activations only. Problematic for some algorithms')
edge = EdgeWeightFunction(args.bw, bwd_to_fwd_ratio=0, penalize_non_tensors=args.penalize_non_tensors, penalty=args.edge_penalty, MULT_FACTOR=MULT_FACTOR)
if verbose:
print(f'-I- using heuristics {type(node).__name__} , {type(edge).__name__}')
return (node, edge)
|
class NodeWeightFunction():
def __init__(self, bwd_to_fwd_ratio=(- 1), MULT_FACTOR=10000.0):
self.ratio = bwd_to_fwd_ratio
self.MULT_FACTOR = MULT_FACTOR
def __call__(self, node: Node):
assert isinstance(node.weight, ExecTimes)
if (self.ratio < 0):
return (self.MULT_FACTOR * node.weight.backward_time)
elif (self.ratio == 0):
return (self.MULT_FACTOR * node.weight.forward_time)
else:
return (self.MULT_FACTOR * ((self.ratio * node.weight.backward_time) + node.weight.forward_time))
|
class EdgeWeightFunction():
GPU_MEMORY_BW = 550
NON_CONTIGIOUS_PENATLY = True
def __init__(self, bw_GBps, bwd_to_fwd_ratio=(- 1), MULT_FACTOR=10000.0, penalty=10000000.0, penalize_non_tensors=False, ensure_positive=True):
self.bw = bw_GBps
self.ratio = bwd_to_fwd_ratio
self.MULT_FACTOR = MULT_FACTOR
self.penalty = penalty
self.penalize_non_tensors = penalize_non_tensors
self.ensure_positive = ensure_positive
def __call__(self, u: Node, v: Node, non_contig_penalty=False):
if u.compound_edge_weights:
if (self.ratio == 0):
if ((u.gpu_id != v.gpu_id) or ((u.gpu_id is None) and (v.gpu_id is None))):
return u.compound_edge_weights[v.id]
elif (u.gpu_id == v.gpu_id):
warnings.warn('experimental - compound weight but on same GPU - should it happen? check it is not an output node.')
return (u.compound_edge_weights[v.id] / (self.GPU_MEMORY_BW / self.bw))
else:
raise NotImplementedError('not supported yet')
elif (self.ratio < 0):
warnings.warn('using forward activations as backward comm for merged node, ignoring req_grad=False edges')
if ((u.gpu_id != v.gpu_id) or ((u.gpu_id is None) and (v.gpu_id is None))):
return u.compound_edge_weights[v.id]
elif (u.gpu_id == v.gpu_id):
return (u.compound_edge_weights[v.id] / (self.GPU_MEMORY_BW / self.bw))
else:
raise NotImplementedError('not supported yet')
MB = 1000000.0
if (u.value_type in [torch.device, torch.dtype, str, slice]):
w = self.penalty
elif (self.penalize_non_tensors and ((u.type is NodeTypes.CONSTANT) or (u.value_type in [int, bool, float]))):
w = self.penalty
else:
bwd_volume = 0
if ((u.type is NodeTypes.CONSTANT) or (u.value_type in [int, bool, float, torch.Size, type(None)])):
volume = 4
else:
volume = 0
for (shape, dtype) in zip(flatten(u.tensor_shape), flatten(u.tensor_dtype)):
if isinstance(shape, torch.Size):
tmp = reduce(operator.mul, shape, 1)
tmp *= torch.empty(1, dtype=dtype).element_size()
if (u.req_grad and v.req_grad):
bwd_volume += tmp
elif ((dtype is type(None)) and (not self.penalize_non_tensors)):
warnings.warn('experimentally allowing None inside a tuple.')
tmp = 4
else:
warnings.warn(f'Unknown dtype={dtype}, type(dtype)={type(dtype)}, node:{u}. valtype:{u.value_type} PENALIZING!')
return self.penalty
volume += tmp
if ((u.gpu_id is not None) and (v.gpu_id is not None) and (u.gpu_id == v.gpu_id)):
bw = self.GPU_MEMORY_BW
else:
bw = self.bw
volume /= (MB * bw)
bwd_volume /= (MB * bw)
if (self.ratio < 0):
w = (self.MULT_FACTOR * bwd_volume)
elif (self.ratio == 0):
if ((not u.is_contiguous) and non_contig_penalty):
volume += ((volume * bw) / self.GPU_MEMORY_BW)
w = (self.MULT_FACTOR * volume)
else:
w = (self.MULT_FACTOR * (bwd_volume + volume))
return (max(0.001, w) if self.ensure_positive else w)
|
class NodeWeightFunctionWithRatioAutoInfer():
def __init__(self, MULT_FACTOR=10000.0):
self.MULT_FACTOR = MULT_FACTOR
def __call__(self, node: Node):
assert isinstance(node.weight, ExecTimes)
bwd = node.weight.backward_time
fwd = node.weight.forward_time
bwd_plus_fwd = (bwd + fwd)
if (bwd_plus_fwd == 0):
return 0
return ((self.MULT_FACTOR * ((bwd * bwd) + (fwd * fwd))) / bwd_plus_fwd)
|
class CoarsenedWeightFunction():
def __init__(self, edge_weight_function: EdgeWeightFunction, node_weight_function: NodeWeightFunction, do_critical_path=False):
self.mode = 'ratio'
self.do_critical_path = do_critical_path
self.ewf = edge_weight_function
self.nwf = node_weight_function
assert (self.nwf.MULT_FACTOR == self.ewf.MULT_FACTOR)
if (self.nwf.ratio != 1):
raise NotImplementedError()
def __call__(self, nodes: Iterable[Node], boarders: Optional[Tuple[(Set[Tuple[(Node, Node)]], Set[Node], Set[Tuple[(Node, Node)]], Set[Node])]]=None, total_gpu_comp_cost: Optional[float]=None, total_stage_comp_cost_fwd: Optional[float]=None, total_stage_comp_cost_bwd: Optional[float]=None):
if boarders:
(outgoing_edges, _, incomming_edges, _) = boarders
else:
(outgoing_edges, _, incomming_edges, _) = self.calculate_borders(nodes)
if (total_gpu_comp_cost is None):
(comp_bwd, comp_fwd) = self.calculate_comp(nodes)
combined_comp_cost = (comp_bwd + comp_fwd)
overlaped_comp_fwd = combined_comp_cost
overlaped_comp_bwd = combined_comp_cost
else:
combined_comp_cost = total_gpu_comp_cost
overlaped_comp_fwd = combined_comp_cost
overlaped_comp_bwd = combined_comp_cost
(comm_bwd, comm_fwd) = self.calculate_comm_forward_and_backward(incomming_edges, outgoing_edges)
cost = ((combined_comp_cost + max(0, (comm_fwd - overlaped_comp_fwd))) + max(0, (comm_bwd - overlaped_comp_bwd)))
return cost
def is_comm_bounded_forward(self, node: Node):
outgoing = [(node, nn) for nn in node.out_edges]
comm = self.calculate_comm_forward(outgoing)
return (self.nwf(node) <= comm)
def is_comm_bounded_backward(self, node: Node):
incomming = [(nn, node) for nn in node.in_edges]
comm = self.calculate_comm_backward(incomming)
return (self.nwf(node) <= comm)
def calculate_comp(self, nodes: Iterable[Node]):
if (not self.do_critical_path):
comp_fwd = sum((node.weight.forward_time for node in nodes))
comp_bwd = sum((node.weight.backward_time for node in nodes))
else:
raise NotImplementedError()
comp_fwd *= self.nwf.MULT_FACTOR
comp_bwd *= self.nwf.MULT_FACTOR
return (comp_bwd, comp_fwd)
@staticmethod
def calculate_borders(nodes: Iterable[Node]) -> Tuple[(Set[Tuple[(Node, Node)]], Set[Node], Set[Tuple[(Node, Node)]], Set[Node])]:
set_nodes = set(nodes)
outgoing_edges = set()
incoming_edges = set()
outgoing_nodes = set()
incoming_nodes = set()
for node in nodes:
for out in node.out_edges:
if (out not in set_nodes):
outgoing_edges.add((node, out))
outgoing_nodes.add(node)
for inode in node.in_edges:
if (inode not in set_nodes):
incoming_edges.add((inode, node))
incoming_nodes.add(node)
return (outgoing_edges, outgoing_nodes, incoming_edges, incoming_nodes)
def is_comm_bounded(self, nodes: Set[Node], boarders: Optional[Tuple[(Set[Tuple[(Node, Node)]], Set[Node], Set[Tuple[(Node, Node)]], Set[Node])]]=None):
(comp_bwd, comp_fwd) = self.calculate_comp(nodes)
combined_comp_cost = (comp_bwd + comp_fwd)
overlaped_comp_fwd = overlaped_comp_bwd = combined_comp_cost
if boarders:
(outgoing_edges, _, incomming_edges, _) = boarders
else:
(outgoing_edges, _, incomming_edges, _) = self.calculate_borders(nodes)
(comm_bwd, comm_fwd) = self.calculate_comm_forward_and_backward(incomming_edges, outgoing_edges)
is_comm_fwd = (overlaped_comp_fwd < comm_fwd)
is_comm_bwd = (overlaped_comp_bwd < comm_bwd)
return (is_comm_fwd, is_comm_bwd)
def calculate_comm_forward_and_backward(self, incomming_edges, outgoing_edges):
comm_bwd = self.calculate_comm_backward(incomming_edges)
comm_fwd = self.calculate_comm_forward(outgoing_edges)
return (comm_bwd, comm_fwd)
def calculate_comm_forward(self, outgoing_edges):
tmp = self.ewf.ratio
assert (tmp == 0)
comm_fwd = sum((self.ewf(*e) for e in outgoing_edges))
return comm_fwd
def calculate_comm_backward(self, incomming_edges):
tmp = self.ewf.ratio
self.ewf.ratio = (- 1)
comm_bwd = sum((self.ewf(*e) for e in incomming_edges))
self.ewf.ratio = tmp
return comm_bwd
|
class NodeMemoryEstimator():
THRESHOLD = (11 * 1000000000.0)
def __init__(self, optimizer_multiply=1):
self.optimizer_multiply = optimizer_multiply
@staticmethod
def cuda_activations_and_grads_mem(u: Node):
if ((u.type is NodeTypes.LAYER) or (u.type is NodeTypes.BUFF_PARAM)):
if (u.value_type in [int, bool, float, torch.Size, type(None)]):
return 0
if ((u.type is NodeTypes.CONSTANT) or (u.value_type in [torch.device, torch.dtype, str, slice])):
return 0
bwd_volume = 0
volume = 0
for (shape, dtype) in zip(flatten(u.tensor_shape), flatten(u.tensor_dtype)):
if isinstance(shape, torch.Size):
tmp = reduce(operator.mul, shape, 1)
tmp *= torch.empty(1, dtype=dtype).element_size()
if u.req_grad:
bwd_volume += tmp
volume += tmp
else:
warnings.warn(f'Unknown dtype={dtype}, type(dtype)={type(dtype)}, node:{u}. ignoring volume!')
return (volume + bwd_volume)
else:
return 0
def __call__(self, node: Node):
byte_per_parameter = 4
parameter_size = ((node.num_parameters * byte_per_parameter) * self.optimizer_multiply)
activations_size = node.max_memory_bytes
return (activations_size + parameter_size)
|
def metis_partition(graph: Graph, num_partitions: int, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None, use_layers_only_graph: bool=True, use_virtual_stages: bool=True, **METIS_opts: Dict) -> Graph:
'\n performs METIS Kway partitioning on the given graph\n\n Parameters:\n graph:\n the Graph to partition\n num_partitions:\n the number of partitions\n node_weight_function:\n an optional weight function for the nodes should be a function from Node to int\n if not given a default weight of 1 will be given to all nodes\n edge_weight_function:\n an optional weight function for the edges should be a function (Node,Node) to int\n if not given a default value of 1 will be given to all edges\n use_layers_only_graph:\n whether to partition a smaller version of the graph containing only the layers\n (usefull fo big models with lots of unprofiled ops)\n METIS_opts:\n additional kwargs to pass to the METIS partitioning algorithm\n '
import nxmetis
if use_virtual_stages:
graph.topo_sort()
if use_layers_only_graph:
(layers_graph, layers_to_original) = graph.new_graph_without_constants()
G = layers_graph
else:
G = graph
work_graph = G
G = G.asNetworkx(directed=False, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function)
attempts = METIS_opts.pop('attempts', 1)
verbose_on_error = METIS_opts.pop('verbose_on_error', False)
options = nxmetis.MetisOptions(**METIS_opts)
fail = True
last_exception = None
for _ in range(attempts):
(objval, parts) = nxmetis.partition(G, num_partitions, options=options, node_weight='weight', node_size='size', edge_weight='weight', recursive=False)
parts = sorted(((idx, n) for (n, p) in enumerate(parts) for idx in p))
parts = [n for (_, n) in parts]
if (not use_virtual_stages):
for (node, stage_id) in zip(work_graph.nodes, parts):
node.stage_id = stage_id
else:
unique_gpu_ids = set()
bins = defaultdict(list)
for (node, gpu_id) in zip(work_graph.nodes, parts):
if (node in work_graph.inputs):
continue
node.gpu_id = gpu_id
unique_gpu_ids.add(gpu_id)
bins[gpu_id].append(node)
nodes = [n for n in work_graph.nodes if (n not in work_graph.inputs)]
graph.topo_sort(change_graph=False)
id_to_node = {node.topo_sort_id: node for node in nodes}
stages_from_bins(graph=work_graph, bins=bins, id_to_node_worked_on=id_to_node)
try:
post_process_partition(work_graph, edge_weight_function, assert_output_types=False, verbose_on_error=verbose_on_error)
fail = False
break
except (Exception, RuntimeError, AssertionError) as e:
last_exception = e
if fail:
print(f'-I- METIS could not find a valid partitioning')
raise last_exception
n_parts = set(parts)
actual_nparts = len({n.stage_id for n in work_graph.nodes})
if (actual_nparts < num_partitions):
print('This is deprecated....')
print(f'-I- expected {num_partitions} partitions but only {actual_nparts} found implicating that the model to partition is too small')
print('consider increasing the depth of graph or disabling the basic blocks option')
print(f'before post processing there were {n_parts} partitions')
if use_layers_only_graph:
graph.induce_layer_partition(work_graph, layers_to_original)
if use_virtual_stages:
stage_to_gpu_map = convert_handle_missing_print(bins=bins, graph=graph, verbose=False)
return graph
|
def post_process_partition(graph: Graph, edge_weight_function, verbose_on_error=True, assert_output_types=False) -> Graph:
"\n process the partition and optimize it\n called as part of partition_graph method\n\n Parameters:\n ----------\n graph:\n the Graph object that was partitioned\n verbose_on_error:\n print extra info when cycle can't be solved\n "
re_assign_partition_indices(graph)
if has_stage_cycles(graph):
if os.environ.get('DEBUG', False):
graph.save_as_pdf(f'{graph.model_name}_before_fix', '.')
if verbose_on_error:
(problems, info) = get_problematic_partitions(graph)
print('-V- printing problematic partitions')
for (p, i) in zip(problems, info):
print(p)
print(i)
n_partitions = len(set((u.stage_id for u in graph.nodes)))
print('n_partitions:', n_partitions)
error = 'error cycle detected mutual dependency between partitions'
raise AssertionError(error)
(is_valid, error) = is_valid_partitioning(graph, edge_weight_function)
if assert_output_types:
assert is_valid, error
elif (not is_valid):
print('Output between partitions is tricky, but allowing this')
print_all_problematic_outputs_between_partitions(graph, edge_weight_function)
return graph
|
def get_problematic_partitions(graph):
' For debug when cycle are detected '
problems = []
info = []
for u in graph.nodes:
for v in u.out_edges:
if (v.stage_id < u.stage_id):
problems.append([v.stage_id, u.stage_id])
info.append([v.scope, u.scope])
return (problems, info)
|
def break_partition_cycles(graph: Graph):
parts = set()
roots = defaultdict(set)
for u in graph.nodes:
parts.add(u.stage_id)
for v in u.out_edges:
if (u.stage_id > v.stage_id):
roots[v.stage_id].add(v)
n_parts = len(parts)
for (idx, group) in roots.items():
for n in find_subtree(group, len(graph.nodes)):
n.stage_id = n_parts
n_parts += 1
|
def find_subtree(roots: Set[Node], graph_size: int):
nodes = set()
open = copy(roots)
while (len(open) > 0):
n = open.pop()
nodes.add(n)
for u in n.out_edges:
if (u.stage_id == n.stage_id):
nodes.add(u)
open.add(u)
open = copy(nodes)
while (len(open) > 0):
n = open.pop()
if (n in roots):
continue
for u in n.in_edges:
if (u.stage_id == n.stage_id):
if ((u.type != NodeTypes.IN) and ((n.id - u.id) > (graph_size // 2))):
continue
open.add(u)
nodes.add(u)
return nodes
|
def is_valid_partitioning(graph: Graph, edge_weight_function):
'\n check if we only send tensors between partitions\n '
for n in graph.nodes:
if (n.value_type in {type(None), list, tuple, dict, set, int, bool, float, str, slice, torch.Size, torch.dtype}):
for o in n.out_edges:
if (n.stage_id != o.stage_id):
msg = f'invalid output type at partition boundary {n.stage_id}=>{o.stage_id}'
msg += f'''
output is {n.scope} of type {n.value_type}, weight {edge_weight_function(n, o)}'''
return (False, msg)
return (True, '')
|
def print_all_problematic_outputs_between_partitions(graph: Graph, edge_weight_function):
'\n check if we only send tensors between partitions\n '
problems = []
valid_state = True
for n in graph.nodes:
if (n.value_type in {type(None), list, tuple, dict, set, int, bool, float, str, slice, torch.Size, torch.dtype}):
for o in n.out_edges:
if (n.stage_id != o.stage_id):
msg = f'invalid output type at partition boundary {n.stage_id}=>{o.stage_id}'
msg += f'''
output is {n.scope} of type {n.value_type}, weight {edge_weight_function(n, o)}'''
valid_state = False
problems.append(msg)
s = ((f'''Valid outputs states = {valid_state}
''' + 'problems:\n') + '\n'.join(problems))
print(s)
|
def greedy_best_fit(graph: Graph, P, node_weight_function, node_mem_estimator: NodeMemoryEstimator):
bins = {i: list() for i in range(P)}
bin_weights = heapdict({i: 0 for i in range(P)})
bin_memory = heapdict({i: 0 for i in range(P)})
node_to_weight = {n: node_weight_function(n) for n in graph.non_input_nodes}
node_to_weight = dict(sorted(node_to_weight.items(), key=(lambda item: item[1]), reverse=True))
gpu_mem_threshold_bytes = {i: node_mem_estimator.THRESHOLD for i in bins}
node_to_mem = {n: node_mem_estimator(n) for n in graph.non_input_nodes}
def check_memory_fit(candidate, bin_id):
if ((node_to_mem[candidate] + bin_memory[bin_id]) > gpu_mem_threshold_bytes[bin_id]):
print(f'-v- failed to add candidate to GPU {bin_id}')
return False
return True
def choose_bin(node):
tmp = []
while bin_weights:
(bin_id, w) = bin_weights.peekitem()
if (not check_memory_fit(node, bin_id)):
tmp.append(bin_weights.popitem())
continue
for (i, v) in tmp:
warnings.warn('it is improbable we got here.')
bin_weights[i] = v
return bin_id
print('Could not find an assignment which fits memory')
print(f'node: {node}')
print('bins:')
for x in tmp:
print(x)
print('node to mem:')
pprint(node_to_mem)
print(f'sum(node_to_mem.values()): {(sum(node_to_mem.values()) * 1e-09)} GB')
raise RuntimeError('Could not find an assignment which fits memory')
while node_to_weight:
(node, node_weight) = node_to_weight.popitem()
try:
bin_id = choose_bin(node)
except RuntimeError as e:
if (sum(node_to_mem.values()) < sum(gpu_mem_threshold_bytes.values())):
warnings.warn('Can find assignment using largest memory job first v1')
try:
bins = largest_memory_first_greedy_best_fit_v1(graph, P, node_weight_function, node_mem_estimator)
return bins
except Exception as ee:
print(f'-v- largest_memory_first_greedy_best_fit_v1 Failed: {str(ee)}')
raise e
bins[bin_id].append(node)
bin_weights[bin_id] += node_weight
bin_memory[bin_id] += node_to_mem[node]
print('bin_memory after greedy assignment:')
pprint(str(bin_memory))
print(f'sum(node_to_mem.values()): {(sum(node_to_mem.values()) * 1e-09)} GB')
return bins
|
def largest_memory_first_greedy_best_fit_v1(graph: Graph, P, node_weight_function, node_mem_estimator: NodeMemoryEstimator):
bins = {i: list() for i in range(P)}
bin_weights = heapdict({i: 0 for i in range(P)})
bin_memory = heapdict({i: 0 for i in range(P)})
node_to_weight = {n: node_weight_function(n) for n in graph.non_input_nodes}
node_to_weight = dict(sorted(node_to_weight.items(), key=(lambda item: item[1]), reverse=True))
gpu_mem_threshold_bytes = {i: node_mem_estimator.THRESHOLD for i in bins}
node_to_mem = {n: node_mem_estimator(n) for n in graph.non_input_nodes}
node_to_mem = dict(sorted(node_to_mem.items(), key=(lambda item: item[1]), reverse=True))
node_to_mem_copy = {node: v for (node, v) in node_to_mem.items()}
def check_memory_fit(candidate, bin_id):
if ((node_to_mem[candidate] + bin_memory[bin_id]) > gpu_mem_threshold_bytes[bin_id]):
print(f'-v- failed to add candidate to GPU {bin_id}')
return False
return True
def choose_bin(node):
tmp = []
while bin_weights:
(bin_id, w) = bin_weights.peekitem()
if (not check_memory_fit(node, bin_id)):
tmp.append(bin_weights.popitem())
continue
for (i, v) in tmp:
warnings.warn('it is improbable we got here.')
bin_weights[i] = v
return bin_id
print('Could not find an assignment which fits memory')
print(f'node: {node}')
print('bins:')
for x in tmp:
print(x)
print('node to mem:')
pprint(node_to_mem)
print(f'sum(node_to_mem.values()): {(sum(node_to_mem.values()) * 1e-09)} GB')
if (sum(node_to_mem.values()) < sum(gpu_mem_threshold_bytes.values())):
warnings.warn('Can find assignment using largest memory job first')
raise RuntimeError('Could not find an assignment which fits memory')
while node_to_weight:
(node, node_mem) = node_to_mem_copy.popitem()
node_weight = node_to_weight.pop(node)
bin_id = choose_bin(node)
bins[bin_id].append(node)
bin_weights[bin_id] += node_weight
bin_memory[bin_id] += node_to_mem[node]
print('bin_memory after greedy assignment:')
pprint(str(bin_memory))
print(f'sum(node_to_mem.values()): {(sum(node_to_mem.values()) * 1e-09)} GB')
return bins
|
def algorithm_u(ns, m):
'taken from https://codereview.stackexchange.com/questions/1526/finding-all-k-subset-partitions\n '
def visit(n, a):
ps = [[] for i in range(m)]
for j in range(n):
ps[a[(j + 1)]].append(ns[j])
return ps
def f(mu, nu, sigma, n, a):
if (mu == 2):
(yield visit(n, a))
else:
for v in f((mu - 1), (nu - 1), ((mu + sigma) % 2), n, a):
(yield v)
if (nu == (mu + 1)):
a[mu] = (mu - 1)
(yield visit(n, a))
while (a[nu] > 0):
a[nu] = (a[nu] - 1)
(yield visit(n, a))
elif (nu > (mu + 1)):
if (((mu + sigma) % 2) == 1):
a[(nu - 1)] = (mu - 1)
else:
a[mu] = (mu - 1)
if (((a[nu] + sigma) % 2) == 1):
for v in b(mu, (nu - 1), 0, n, a):
(yield v)
else:
for v in f(mu, (nu - 1), 0, n, a):
(yield v)
while (a[nu] > 0):
a[nu] = (a[nu] - 1)
if (((a[nu] + sigma) % 2) == 1):
for v in b(mu, (nu - 1), 0, n, a):
(yield v)
else:
for v in f(mu, (nu - 1), 0, n, a):
(yield v)
def b(mu, nu, sigma, n, a):
if (nu == (mu + 1)):
while (a[nu] < (mu - 1)):
(yield visit(n, a))
a[nu] = (a[nu] + 1)
(yield visit(n, a))
a[mu] = 0
elif (nu > (mu + 1)):
if (((a[nu] + sigma) % 2) == 1):
for v in f(mu, (nu - 1), 0, n, a):
(yield v)
else:
for v in b(mu, (nu - 1), 0, n, a):
(yield v)
while (a[nu] < (mu - 1)):
a[nu] = (a[nu] + 1)
if (((a[nu] + sigma) % 2) == 1):
for v in f(mu, (nu - 1), 0, n, a):
(yield v)
else:
for v in b(mu, (nu - 1), 0, n, a):
(yield v)
if (((mu + sigma) % 2) == 1):
a[(nu - 1)] = 0
else:
a[mu] = 0
if (mu == 2):
(yield visit(n, a))
else:
for v in b((mu - 1), (nu - 1), ((mu + sigma) % 2), n, a):
(yield v)
n = len(ns)
a = ([0] * (n + 1))
for j in range(1, (m + 1)):
a[((n - m) + j)] = (j - 1)
return f(m, n, 0, n, a)
|
def exhustive_search(graph: Graph, P, node_weight_function, node_mem_estimator: NodeMemoryEstimator, L):
all_nodes = list(graph.non_input_nodes)
all_weights = np.array([node_weight_function(x) for x in all_nodes])
all_mems = np.array([node_mem_estimator(x) for x in all_nodes])
L_tag = len(all_nodes)
homogenous_threshold = node_mem_estimator.THRESHOLD
print(f'Doing exhaustive search ')
def num_stages(m):
raise NotImplementedError()
best_m_comp = np.inf
best_solution = None
for m in tqdm(algorithm_u(list(range(L_tag)), P), desc='exhustive_search'):
top_mem = max((np.sum(all_mems[b]) for b in m))
if (top_mem > homogenous_threshold):
continue
top_comp = max((np.sum(all_weights[b]) for b in m))
if (top_comp < best_m_comp):
best_m_comp = top_comp
best_solution = deepcopy(m)
all_nodes = np.array(all_nodes)
m = best_solution
bins = {i: all_nodes[b].tolist() for (i, b) in enumerate(m)}
return bins
|
def coarsen_prefixes(model: Module, graph: Graph, node_weight_function, edge_weight_function, uf: UnionFind, basic_blocks, special_blocks, depth):
prev_graph = Graph.from_other(graph)
uf2 = UnionFind(elements=graph._nodes.keys())
nodes = list(graph.non_input_nodes)
sb_scope_to_nodes = get_marked_nodes_for_prefix_coarsening(module=model, nodes=nodes, basic_blocks=basic_blocks, special_blocks=special_blocks, depth=depth)
for (sb_scope, sb_nodes) in sb_scope_to_nodes.items():
set_sb_nodes = set(sb_nodes)
sb_nodes.sort(key=(lambda n: n.topo_sort_id))
did_something = True
while (did_something and (len(set_sb_nodes) > 1)):
did_something = False
for u in sb_nodes:
if (u not in set_sb_nodes):
continue
for v in sorted(u.out_edges, key=(lambda n: n.topo_sort_id)):
if (v not in set_sb_nodes):
continue
if check_cycle2(graph, u, v):
continue
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
uf2.union(u.id, v.id)
set_sb_nodes.discard(v)
did_something = True
if (len(set_sb_nodes) > 1):
warnings.warn(f'failed to fully coarsen special block. remaining ({len(set_sb_nodes)}): {set_sb_nodes}')
raise NotImplementedError()
matching = None
sb_names = list(sb_scope_to_nodes.keys())
return (prev_graph, matching, graph, uf, uf2, sb_names)
|
def get_marked_nodes_for_prefix_coarsening(module, nodes, basic_blocks, special_blocks, depth):
sb_id_to_nodes = dict()
all_sbs = []
for packed in special_traverse_model(module, depth=depth, basic_blocks=basic_blocks, special_blocks=special_blocks, full=True, mark=True):
(sub_layer, scope, parent, terminal, sb_id) = packed
if (sb_id is not None):
all_sbs.append(packed)
for packed in all_sbs:
(_, scope, _, _, sb_id) = packed
l = []
l = [node for node in nodes if (node.scope.startswith(scope) or (node.scope_to_hold_to and node.scope_to_hold_to.startswith(scope)))]
sb_id_to_nodes[scope] = l
return sb_id_to_nodes
|
def annotate_special_blocks_to_hold_to(model, graph, special_blocks, basic_blocks, depth):
nodes = list(graph.non_input_nodes)
sb_scope_to_nodes = get_marked_nodes_for_prefix_coarsening(module=model, nodes=nodes, basic_blocks=basic_blocks, special_blocks=special_blocks, depth=depth)
scopes_to_hold_to = list(sb_scope_to_nodes.keys())
for node in graph.nodes:
for scope_to_hold_to in scopes_to_hold_to:
if node.scope.startswith(scope_to_hold_to):
if ((node.scope_to_hold_to is not None) and (node.scope_to_hold_to != scope_to_hold_to)):
print(f'need to assign a scope to hold to for node:{node.scope}')
print(f'but node already has {node.scope_to_hold_to}')
raise NotImplementedError('nested by prefix coarsening not supported')
node.scope_to_hold_to = scope_to_hold_to
|
def stochastic_centers_matching(graph: Graph, node_weight_function: NodeWeightFunction, edge_weight_function: EdgeWeightFunction, L, P, uf: UnionFind, verbose=False, record_history=False, special_blocks=None, sb_names=None):
print('stochastic_centers_matching')
prev_graph = Graph.from_other(graph)
uf2 = UnionFind(elements=graph._nodes.keys())
all_nodes = {n for n in graph.non_input_nodes}
if (special_blocks is None):
special_blocks = ()
bb = special_blocks
sb_names = [c.__name__ for c in bb]
found_nodes = {b: list() for b in sb_names}
total_found = 0
for n in graph.non_input_nodes:
for b in sb_names:
if ((b in n.scope) or (n.scope_to_hold_to and (b in n.scope_to_hold_to))):
found_nodes[b].append(n)
total_found += 1
print(f'-I- Found {total_found} special blocks')
pprint(found_nodes)
if (total_found < L):
warnings.warn(f'There are only {total_found} special blocks, but need to find {L} centers')
warnings.warn('Finding {L-total_found} more random centers, all found special block centers will be centers')
print('-I- assigning centers from special blocks')
lengths = {b: math.floor((L * (len(nodes) / total_found))) for (b, nodes) in found_nodes.items()}
total_basic_block_centers = sum(lengths.values())
print(f'-I- total_basic_block_centers: {total_basic_block_centers}')
print(f'-I- centers to assign in each basic block: {lengths}')
hd = deque()
centers = set()
to_assign = L
sorted_iter = sorted(list(found_nodes.items()), key=(lambda x: len(x[1])))
for (b_name, nodes) in sorted_iter:
print(f'-I- Assigning centers in block {b_name}')
L_tag = len(nodes)
L_prop_int = lengths[b_name]
jump = math.ceil((L_tag / L_prop_int))
if (jump <= 0):
continue
for i in range(0, L_tag, jump):
center = nodes[i]
hd.append(center)
centers.add(center)
to_assign -= 1
if (to_assign == 0):
break
if (to_assign == 0):
break
print(f'-I- Assigned total of {len(centers)} centers:')
pprint(centers)
if (to_assign > 0):
print(f'-I- Now, choosing {to_assign} more random centers')
additional_centers = random.sample((all_nodes - centers), to_assign)
for x in additional_centers:
centers.add(x)
hd.append(x)
to_assign -= len(additional_centers)
assert (to_assign == 0)
print('-I- final centers:')
print(hd)
def inner_loop():
for i in range(len(hd)):
u = hd.popleft()
for v in sorted(u.out_edges, key=(lambda n: n.topo_sort_id)):
if (v in centers):
continue
if check_cycle2(graph, u, v):
continue
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
uf2.union(u.id, v.id)
all_nodes.discard(v)
hd.append(u)
return True
hd.append(u)
return False
history_sizes = []
history_weights = []
while (len(all_nodes) > L):
merged_something = inner_loop()
if (not merged_something):
break
if record_history:
history_sizes.append((len(all_nodes) + 1))
if verbose:
print(f'Nodes: {len(all_nodes)} Centers: {len(hd)}')
if (len(all_nodes) > L):
print(f'Merged until {len(all_nodes)} Merging more, until {L} left')
def inner_loop():
for i in range(len(hd)):
v = hd.popleft()
v: Node
for u in sorted(v.in_edges, key=(lambda n: (- n.topo_sort_id))):
if (u not in all_nodes):
continue
if (u in centers):
continue
if check_cycle2(graph, u, v):
continue
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
uf2.union(u.id, v.id)
all_nodes.discard(v)
centers.discard(v)
centers.add(u)
hd.append(u)
return True
hd.append(v)
return False
while (len(all_nodes) > L):
merged_something = inner_loop()
if (not merged_something):
break
if record_history:
history_sizes.append((len(all_nodes) + 1))
if verbose:
print(f'Nodes: {len(all_nodes)} Centers: {len(hd)}')
matching = None
return (prev_graph, matching, graph, uf, uf2)
|
def check_cycle2(g: Graph, a: Node, b: Node, nms=NodeMemoryEstimator()):
'\n Checks if contracting (merging) (a,b) breaks topo order\n Args:\n g: topo-sorted graph\n a: start: first node in edge (a,b)\n b: end: second node in edge (a,b)\n\n Returns:\n True if contracting would create a cycle.\n\n '
ab = Node(None, None, 'dummy_not_None_scope')
ab.out_edges = sorted((set((a.out_edges + b.out_edges)) - {a, b}), key=(lambda x: x.id))
creates_a_cycle = g.forward_dfs_and_check_if_in_set(source=ab, set_to_check={a, b}, depth_limit=None)
if (not creates_a_cycle):
if ((nms(a) + nms(b)) > nms.THRESHOLD):
return True
return creates_a_cycle
|
def check_cycle_given_topo_sort(g: Graph, a: Node, b: Node):
'\n # TODO: Requires topological order. (e.g dyanamic topo order)\n Checks if merging (a,b) breaks topo order\n Args:\n g: topo-sorted graph\n a: start: first node in edge (a,b)\n b: end: second node in edge (a,b)\n Returns:\n True if there is another path (a->...->b) through missing nodes.\n (This means merging breaks topo order)\n '
src_ids = a.topo_sort_id
dst_ids = b.topo_sort_id
A = {a}
B = {b}
missing_ids = set(range((src_ids + 1), (dst_ids + 1)))
set_inputs = set(g.inputs)
missing_nodes_in_work_graph = [g[i] for i in missing_ids if ((i in g) and (g[i] not in set_inputs))]
edge_nodes: Set[Node] = set(missing_nodes_in_work_graph)
edges = []
for a in A:
for c in a.out_edges:
if (c in edge_nodes):
edges.append((0, (c.topo_sort_id + 2)))
for c in edge_nodes:
for nc in c.out_edges:
if (nc in edge_nodes):
edges.append(((c.topo_sort_id + 2), (nc.topo_sort_id + 2)))
elif (nc in B):
edges.append(((c.topo_sort_id + 2), 1))
G = nx.DiGraph(incoming_graph_data=edges)
G.add_node(0)
G.add_node(1)
has_path = nx.algorithms.shortest_paths.generic.has_path(G, 0, 1)
is_ok = (not has_path)
has_path_via_missing_nodes = (not is_ok)
if (not has_path_via_missing_nodes):
for nn in b.in_edges:
if (nn.topo_sort_id > a.topo_sort_id):
return True
return has_path_via_missing_nodes
|
def coarsening(model, graph, edge_weight_function: EdgeWeightFunction, node_weight_function: NodeWeightFunction, L, P, basic_blocks, special_blocks, depth) -> List[Tuple[(Graph, List[List[Node]], Graph, UnionFind)]]:
print(f'-I- Coarsening: got graph with {graph.num_nodes} nodes')
mgr = CoarseningMgr(model, graph, edge_weight_function, node_weight_function, L, P, basic_blocks, special_blocks, depth)
mgr.add_method('prefixes')
mgr.add_method('forbidden_edges')
mgr.add_method('node_weight_0')
mgr.add_method('cco')
mgr.add_method('stochastic_centers')
mgr.add_method('smallest_nodes')
mgr.execute()
return mgr.hierarchy
|
class CoarseningMgr():
def __init__(self, model, graph: Graph, edge_weight_function: EdgeWeightFunction, node_weight_function: NodeWeightFunction, L, P, basic_blocks, special_blocks, depth):
self.model = model
self.graph = graph
self.edge_weight_function = edge_weight_function
self.node_weight_function = node_weight_function
self.L = L
self.P = P
self.basic_blocks = basic_blocks
self.special_blocks = special_blocks
self.depth = depth
print(f'-I- Coarsening: got graph with {graph.num_nodes} nodes')
self.uf = UnionFind(elements=[n.id for n in graph.non_input_nodes])
self.p = graph
self.pipeline = []
self.kwargs_pipeline = []
self.hierarchy = []
def add_method(self, name, *method_args, **method_kwargs):
self.pipeline.append(name)
self.kwargs_pipeline.append((method_args, method_kwargs))
def execute(self):
if ('prefixes' in self.pipeline):
annotate_special_blocks_to_hold_to(model=self.model, graph=self.graph, special_blocks=self.special_blocks, basic_blocks=self.basic_blocks, depth=self.depth)
for (i, (method, (method_args, method_kwargs))) in enumerate(zip(self.pipeline, self.kwargs_pipeline)):
is_last_op_in_pipe = (i == (len(self.pipeline) - 1))
if (method == 'prefixes'):
self.coarsen_prefixes(is_last_op_in_pipe=is_last_op_in_pipe)
elif (method == 'forbidden_edges'):
self.forbidden_edges(is_last_op_in_pipe=is_last_op_in_pipe)
elif (method == 'node_weight_0'):
self.node_weight_0(is_last_op_in_pipe=is_last_op_in_pipe)
elif (method == 'heavy_edges'):
self.heavy_edges(*method_args, **method_kwargs, is_last_op_in_pipe=is_last_op_in_pipe)
elif (method == 'cco'):
self.cco(is_last_op_in_pipe=is_last_op_in_pipe)
elif (method == 'stochastic_centers'):
self.stochastic_centers(is_last_op_in_pipe=is_last_op_in_pipe)
elif (method == 'smallest_nodes'):
self.smallest_nodes(is_last_op_in_pipe=is_last_op_in_pipe)
else:
raise NotImplementedError(method)
def append_to_hierarchy(self, p, uf2, g, uf, is_last_op_in_pipe=False):
hierarchy = self.hierarchy
if (not is_last_op_in_pipe):
hierarchy.append((p, uf2, g, deepcopy(uf)))
else:
hierarchy.append((p, uf2, g, uf))
def coarsen_prefixes(self, is_last_op_in_pipe=False):
(p, _, g, uf, uf2, sb_names) = coarsen_prefixes(model=self.model, graph=self.p, node_weight_function=self.node_weight_function, edge_weight_function=self.edge_weight_function, uf=self.uf, basic_blocks=self.basic_blocks, special_blocks=self.special_blocks, depth=self.depth)
print(sb_names)
print(f'merged {(len(p) - len(g))} nodes with common prefixes')
self.append_to_hierarchy(p, uf2, g, uf, is_last_op_in_pipe=is_last_op_in_pipe)
self.p = g
def forbidden_edges(self, is_last_op_in_pipe=False):
matching = penalty_edges_matching(graph=self.p, edge_weight_function=self.edge_weight_function)
g = contract(self.p, matching, self.edge_weight_function, uf=self.uf)
print(f'merged {len(matching)} nodes with penalty edges')
self.append_to_hierarchy(self.p, matching, g, self.uf, is_last_op_in_pipe=is_last_op_in_pipe)
self.p = g
def node_weight_0(self, is_last_op_in_pipe=False):
print(f'merging nodes with weight <=0')
(p, _, g, uf, uf2) = nodes_leq_threshold_matching(self.p, self.node_weight_function, self.edge_weight_function, self.L, self.uf, verbose=False, record_history=True, threshold=0)
self.append_to_hierarchy(p, uf2, g, self.uf, is_last_op_in_pipe=is_last_op_in_pipe)
self.p = g
def heavy_edges(self, percentile_to_filter=0.95, is_last_op_in_pipe=False):
(p, _, g, uf, uf2) = online_heavy_edge_matching(self.p, self.node_weight_function, self.edge_weight_function, self.L, self.uf, verbose=True, record_history=True, pecentile_to_filter=percentile_to_filter)
self.append_to_hierarchy(p, uf2, g, self.uf, is_last_op_in_pipe=is_last_op_in_pipe)
self.p = g
def cco(self, is_last_op_in_pipe=False):
(p, _, g, uf, uf2) = systematic_comm_comp_ratio_matching(self.p, self.node_weight_function, self.edge_weight_function, self.L, self.uf, verbose=True)
if (uf2 is None):
warnings.warn("can't restore single step of systematic max blocks")
self.append_to_hierarchy(p, uf2, g, self.uf, is_last_op_in_pipe=is_last_op_in_pipe)
self.p = g
def stochastic_centers(self, is_last_op_in_pipe=False):
(p, _, g, uf, uf2) = stochastic_centers_matching(self.p, self.node_weight_function, self.edge_weight_function, self.L, self.P, self.uf, verbose=True, record_history=False, special_blocks=self.special_blocks)
self.append_to_hierarchy(p, uf2, g, self.uf, is_last_op_in_pipe=is_last_op_in_pipe)
self.p = g
def smallest_nodes(self, is_last_op_in_pipe=False):
(p, _, g, uf, uf2) = online_smallest_comp_node_matching(self.p, self.node_weight_function, self.edge_weight_function, self.L, self.uf, verbose=True, record_history=True)
self.append_to_hierarchy(p, uf2, g, self.uf, is_last_op_in_pipe=is_last_op_in_pipe)
self.p = g
|
def contract(graph: Graph, matching: List[List[Node]], edge_weight_function: EdgeWeightFunction, uf: Optional[UnionFind]=None) -> Graph:
new_graph = Graph.from_other(graph)
for m in sorted(matching, key=(lambda x: x[0].id), reverse=True):
root = m[0]
for i in m[1:]:
new_graph.merge(root.id, i.id, edge_weight_function=edge_weight_function, uf=uf)
if (uf is not None):
uf.union(x=root.id, y=i.id)
return new_graph
|
def penalty_edges_matching(graph: Graph, edge_weight_function: EdgeWeightFunction):
"Penalized edges are for disallowing sending weird stuff which MPI and the like can't handle.\n # TODO: if this creates a cycle we have nothing to do, but manually wrap it and disallow communication of weird stuff\n "
matching = []
for node in graph.non_input_nodes:
check = False
for out in node.out_edges:
if (edge_weight_function(node, out) >= edge_weight_function.penalty):
if check_cycle2(graph, node, out):
warnings.warn(f"can't compress edge with penalty (node,out)={(node, out)}")
continue
matching.append([node, out])
check = True
if check:
if (not node.compound_edge_weights):
try:
for out in node.out_edges:
assert (edge_weight_function(node, out) >= edge_weight_function.penalty)
except AssertionError:
for out in node.out_edges:
print(edge_weight_function(node, out))
print('PENATLY', edge_weight_function.penalty)
count = 0
for v in node.out_edges:
if (v.id in graph.output_ids):
continue
count += 1
for e in matching[(- count):]:
if (e[0] is not node):
print('matching')
for tup in matching:
print(tup)
print('out edges')
for v in node.out_edges:
print(v)
raise NotImplementedError(f'potential cycle in edge {e}. (count={count}) Should probably duplicate node, or check topo order.')
return matching
|
def code_analysis_matching(graph: Graph):
pass
|
def adjacent_and_same_size_matching(graph: Graph):
pass
|
def systematic_comm_comp_ratio_matching(graph: Graph, node_weight_function, edge_weight_function, L, uf: UnionFind, verbose=False):
prev_graph = Graph.from_other(graph)
rbc = RatioBlockCreator(graph, edge_weight_function=edge_weight_function, node_weight_function=node_weight_function, uf=uf)
rbc.apply(L, verbose=verbose)
matching = None
return (prev_graph, matching, graph, uf, None)
|
def online_smallest_comp_node_matching(graph: Graph, node_weight_function, edge_weight_function, L, uf: UnionFind, verbose=False, record_history=False):
prev_graph = Graph.from_other(graph)
uf2 = UnionFind(elements=graph._nodes.keys())
hd = ValueSortedDict({n: node_weight_function(n) for n in graph.non_input_nodes})
def inner_loop():
for (u, weight_of_u) in hd.items():
for v in sorted(u.out_edges, key=(lambda n: node_weight_function(n))):
if check_cycle2(graph, u, v):
continue
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
uf2.union(u.id, v.id)
hd.pop(u)
hd.pop(v)
hd[u] = node_weight_function(u)
return (True, weight_of_u)
return (False, None)
history_sizes = []
history_weights = []
while (len(hd) > L):
(merged_something, weight_of_u) = inner_loop()
if (not merged_something):
break
if record_history:
history_sizes.append((len(hd) + 1))
history_weights.append(weight_of_u)
if verbose:
print(f'Nodes: {len(hd)}, Smallest: {weight_of_u}')
matching = None
return (prev_graph, matching, graph, uf, uf2)
|
def nodes_leq_threshold_matching(graph: Graph, node_weight_function, edge_weight_function, L, uf: UnionFind, verbose=False, record_history=False, threshold=0):
prev_graph = Graph.from_other(graph)
uf2 = UnionFind(elements=graph._nodes.keys())
hd = ValueSortedDict({n: node_weight_function(n) for n in graph.non_input_nodes})
total_merged = 0
def inner_loop():
for (u, weight_of_u) in hd.items():
if (weight_of_u > threshold):
print(f'done with nodes <= threshold {threshold}, breaking (last weight: {weight_of_u}). merged {total_merged}')
return (False, None, True)
u: Node
for v in sorted(u.in_edges, key=(lambda n: node_weight_function(n))):
if (v in graph.inputs):
continue
if check_cycle2(graph, v, u):
continue
graph.merge(uid=v.id, vid=u.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(v.id, u.id)
uf2.union(v.id, u.id)
hd.pop(v)
hd.pop(u)
hd[v] = node_weight_function(v)
return (True, weight_of_u, False)
for v in sorted(u.out_edges, key=(lambda n: node_weight_function(n))):
if check_cycle2(graph, u, v):
continue
warnings.warn(f"can't merge small node {u} backward, will merge forward and lose the name of {v}.")
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
uf2.union(u.id, v.id)
hd.pop(u)
hd.pop(v)
hd[u] = node_weight_function(u)
return (True, weight_of_u, False)
return (False, None, False)
history_sizes = []
history_weights = []
while (len(hd) > L):
(merged_something, weight_of_u, threshold_cond) = inner_loop()
if threshold_cond:
break
if (not merged_something):
break
total_merged += 1
if record_history:
history_sizes.append((len(hd) + 1))
history_weights.append(weight_of_u)
if verbose:
print(f'Nodes: {len(hd)}, Smallest: {weight_of_u}')
matching = None
return (prev_graph, matching, graph, uf, uf2)
|
def ofline_smallest_comp_node_matching(graph: Graph, node_weight_function):
matching = []
matched = set()
for u in sorted(graph.non_input_nodes, key=(lambda n: node_weight_function(n))):
if (u in matched):
continue
for v in sorted(u.out_edges, key=(lambda n: node_weight_function(n))):
if (v in matched):
continue
if check_cycle2(graph, u, v):
continue
matched.add(u)
matched.add(v)
matching.append((u, v))
return matching
|
def online_heavy_edge_matching(graph: Graph, node_weight_function, edge_weight_function, L, uf: UnionFind, verbose=False, record_history=False, pecentile_to_filter=0.9):
prev_graph = Graph.from_other(graph)
uf2 = UnionFind(elements=graph._nodes.keys())
rbc = RatioBlockCreator(graph, edge_weight_function=edge_weight_function, node_weight_function=node_weight_function, uf=uf)
hd = rbc.sorted_graph_forward_edges(descending=True)
def inner_loop():
for ((uid, vid), weight_of_u_v) in hd.items():
u = graph[uid]
v = graph[vid]
if check_cycle2(graph, u, v):
continue
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf2.union(u.id, v.id)
rbc.update_sorted_edges_on_merges(edges_to_value=hd, merges=[(u.id, v.id)], allow_poped_outside=True)
return (True, weight_of_u_v)
return (False, None)
history_sizes = []
history_weights = []
import pandas as pd
s = pd.Series(list(hd.values()))
description = s.describe(percentiles=[0.5, 0.75, 0.8, 0.9, 0.95, 0.99])
print(description)
if (pecentile_to_filter is not None):
dest_length = (len(hd) * pecentile_to_filter)
print(f'Filtering hte {pecentile_to_filter} percentile')
else:
dest_length = L
while (len(hd) > dest_length):
(merged_something, weight_of_merged) = inner_loop()
if (not merged_something):
break
if record_history:
history_sizes.append((len(hd) + 1))
history_weights.append(weight_of_merged)
if verbose:
print(f'Edges: {len(hd)}, Largest edge: {weight_of_merged}')
matching = None
return (prev_graph, matching, graph, uf, uf2)
|
def full_alg(graph, P, L, node_weight_function, edge_weight_function, uf, rtol=0.002):
history = get_rep_analysis_history_with_online_smallest_comp_node_matching(graph, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, L=L, uf=uf, rtol=rtol, verbose=False)
torch.save(history, 'history.tmp')
print('Saved history to file history.tmp')
repetitive_adjacent_analysis(history, L, P)
|
def repetitive_adjacent_analysis(history: List[List[Set[Node]]], L, P):
for (i, found_sets) in enumerate(history):
lengths = [len(x) for x in found_sets]
print(f'-I- merge {i} Found set lengths {lengths}')
for l in lengths:
if ((l % P) == 0):
k = (l // P)
print(f' Found set of size {l} splitting it to {k}*{P} groups')
elif (l > P):
print(f' Found set of size {l}, currently ignoring')
|
def record_repetitive_adjacent(graph, node_weight_function, rtol=0.002, do_topo_sort=True):
if do_topo_sort:
graph.topo_sort(change_graph=False)
topo_sorted_nodes_to_weight = SortedDict({n.topo_sort_id: node_weight_function(n) for n in graph.non_input_nodes})
found_sets = []
cur = None
rsum = 0
cur_set = set()
for (node, weight) in topo_sorted_nodes_to_weight.items():
if (cur is None):
cur = weight
cur_set.add(node)
rsum = weight
elif np.allclose(weight, cur, rtol):
rsum += weight
cur_set.add(node)
cur = (rsum / len(cur_set))
else:
if cur_set:
found_sets.append(cur_set)
cur = weight
rsum = weight
cur_set = set()
return found_sets
|
def get_rep_analysis_history_with_online_smallest_comp_node_matching(graph: Graph, node_weight_function, edge_weight_function, L, uf: UnionFind, verbose=True, rtol=0.002):
prev_graph = Graph.from_other(graph)
(graph, prev_graph) = (prev_graph, graph)
uf = deepcopy(uf)
hd = ValueSortedDict({n: node_weight_function(n) for n in graph.non_input_nodes})
def inner_loop():
for (u, weight_of_u) in hd.items():
for v in sorted(u.out_edges, key=(lambda n: node_weight_function(n))):
if check_cycle2(graph, u, v):
continue
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
hd.pop(u)
hd.pop(v)
hd[u] = node_weight_function(u)
return (True, weight_of_u)
return (False, None)
rep_analysis_history = []
while (len(hd) > L):
(merged_something, weight_of_u) = inner_loop()
if (not merged_something):
break
found_sets = record_repetitive_adjacent(graph, node_weight_function, rtol=rtol, do_topo_sort=True)
rep_analysis_history.append(found_sets)
if verbose:
print(f'Nodes: {len(hd)}, Smallest: {weight_of_u}')
return rep_analysis_history
|
def doc(s):
if hasattr(s, '__call__'):
s = s.__doc__
def f(g):
g.__doc__ = s
return g
return f
|
class heapdict(MutableMapping):
__marker = object()
def __init__(self, *args, **kw):
self.heap = []
self.d = {}
self.update(*args, **kw)
@doc(dict.clear)
def clear(self):
del self.heap[:]
self.d.clear()
@doc(dict.__setitem__)
def __setitem__(self, key, value):
if (key in self.d):
self.pop(key)
wrapper = [value, key, len(self)]
self.d[key] = wrapper
self.heap.append(wrapper)
self._decrease_key((len(self.heap) - 1))
def _min_heapify(self, i):
n = len(self.heap)
h = self.heap
while True:
l = ((i << 1) + 1)
r = ((i + 1) << 1)
if ((l < n) and (h[l][0] < h[i][0])):
low = l
else:
low = i
if ((r < n) and (h[r][0] < h[low][0])):
low = r
if (low == i):
break
self._swap(i, low)
i = low
def _decrease_key(self, i):
while i:
parent = ((i - 1) >> 1)
if (self.heap[parent][0] < self.heap[i][0]):
break
self._swap(i, parent)
i = parent
def _swap(self, i, j):
h = self.heap
(h[i], h[j]) = (h[j], h[i])
h[i][2] = i
h[j][2] = j
@doc(dict.__delitem__)
def __delitem__(self, key):
wrapper = self.d[key]
while wrapper[2]:
parentpos = ((wrapper[2] - 1) >> 1)
parent = self.heap[parentpos]
self._swap(wrapper[2], parent[2])
self.popitem()
@doc(dict.__getitem__)
def __getitem__(self, key):
return self.d[key][0]
@doc(dict.__iter__)
def __iter__(self):
return iter(self.d)
def popitem(self):
'D.popitem() -> (k, v), remove and return the (key, value) pair with lowest\nvalue; but raise KeyError if D is empty.'
wrapper = self.heap[0]
if (len(self.heap) == 1):
self.heap.pop()
else:
self.heap[0] = self.heap.pop()
self.heap[0][2] = 0
self._min_heapify(0)
del self.d[wrapper[1]]
return (wrapper[1], wrapper[0])
@doc(dict.__len__)
def __len__(self):
return len(self.d)
def peekitem(self):
'D.peekitem() -> (k, v), return the (key, value) pair with lowest value;\n but raise KeyError if D is empty.'
return (self.heap[0][1], self.heap[0][0])
def __str__(self):
return str(self.d)
|
class ReminderPolicy(Enum):
ToLast = 'last'
ToMin = 'min'
|
class SecondAndOnClusterPolicy(Enum):
BestFitBinPacking = 'best_fit'
InOrder = 'order'
Reversed = 'reversed'
|
def maketree(n, iterable):
d = deque(iterable)
res = []
while d:
pair = [d.popleft() for _ in range(n)]
res.append(pair)
return res
|
def flatten_subsplit(subsplit):
to_add = []
for i in subsplit:
if isinstance(i, list):
to_add.extend(i)
else:
to_add.append(i)
return to_add
|
def sum_subsplit_weight(subsplit):
return sum((i.weight for i in flatten_subsplit(subsplit)))
|
def get_all_splits(K: int, clusters, id_to_node: Dict[(int, Node)], to_unify: Dict[(int, List[Union[(List, Any)]])], C: int, reminder_policy: ReminderPolicy=ReminderPolicy.ToLast):
all_splits = []
assert (len(clusters.cluster.unique()) == C)
clusters = [list(clusters.groupby('cluster').get_group(c).sort_values('id').itertuples()) for c in range(C)]
clusters_lengths = {i: len(clusters[i]) for i in range(len(clusters))}
print('cluster_lengths, before compressing', clusters_lengths)
new_clusters = get_unified_clusters(clusters, to_unify)
clusters = new_clusters
clusters_lengths = {i: len(clusters[i]) for i in range(len(clusters))}
print('cluster_lengths, after compressing', clusters_lengths)
for (c_i, cluster) in enumerate(clusters):
n_i = len(cluster)
reminder = (n_i % K)
only_reminder = False
if (n_i < K):
only_reminder = True
warnings.warn(f'small number of items in cluster {c_i}: {n_i} need at least {K}. will treat as reminder with policy {reminder_policy}')
N = (n_i // K)
if (not only_reminder):
cluster_for_split = (cluster if (not reminder) else cluster[:(- reminder)])
split = list(maketree(n=N, iterable=cluster_for_split))
else:
cluster_for_split = cluster
split = [[x] for x in cluster_for_split]
reminder = 0
if (reminder > 0):
if (reminder_policy == ReminderPolicy.ToLast):
warnings.warn(f'cluster {c_i} is problematic, {n_i}%{K}!=0, will put reminding {reminder} nodes in last partition')
split[(- 1)].extend(cluster[(- reminder):])
elif (reminder_policy == ReminderPolicy.ToMin):
warnings.warn(f'cluster {c_i} is problematic {c_i}, {n_i}%{K}!=0, will put reminding {reminder} nodes in min weight partition')
min_idx = np.argmin([sum_subsplit_weight(subsplit) for subsplit in split])
split[min_idx].extend(cluster[(- reminder):])
else:
raise NotImplementedError(f'reminder_policy:{reminder_policy}')
all_splits.append(split)
return all_splits
|
def get_unified_clusters(clusters, to_unify):
def to_set(v, s):
if (not isinstance(v, list)):
s.add(v)
return
for x in v:
to_set(x, s)
(A, B) = (set(), set())
to_set(clusters, A)
new_clusters = []
for (c_i, cluster) in enumerate(clusters):
to_unify_for_cluster = to_unify[c_i]
cluster_D = {i.Index: i for i in cluster}
deleted_from_cluster = {}
next_gen = []
first_time = True
while (first_time or next_gen):
first_time = False
if next_gen:
to_unify_for_cluster = next_gen
next_gen = []
for l in to_unify_for_cluster:
z = l[0]
x = l[1]
if isinstance(cluster_D[x], list):
v = cluster_D[x]
zz = cluster_D[z]
if isinstance(zz, list):
v.extend(zz)
else:
v.append(zz)
v.sort(key=(lambda y: y.Index))
else:
v = [cluster_D[x]]
try:
zz = cluster_D[z]
except KeyError as e:
if (not (z in deleted_from_cluster)):
raise e
else:
warnings.warn(f'found a double: {l}, I already deleted {z} and unified it with {deleted_from_cluster[z]}, will unify now to {x}')
next_gen.append(sorted([x, deleted_from_cluster[z]]))
continue
if isinstance(zz, list):
v.extend(zz)
else:
v.append(zz)
v.sort(key=(lambda y: y.Index))
cluster_D[x] = v
deleted_from_cluster[z] = x
del cluster_D[z]
cluster = []
for i in sorted(cluster_D.keys()):
cluster.append(cluster_D[i])
new_clusters.append(cluster)
to_set(new_clusters, B)
assert (A == B), (A, B)
return new_clusters
|
def make_clusters(graph: Graph, nodes: List[Node], node_weight_function, C: int, THRESHOLD=0):
def node_to_record(node):
return {'id': node.id, 'weight': node_weight_function(node)}
records = [node_to_record(node) for node in nodes]
X = pd.DataFrame.from_records(data=records, index='id')
nodes_below_thresholds = X[(X['weight'] <= THRESHOLD)]
nodes_above_thresholds = X[(X['weight'] > THRESHOLD)]
kmeans = KMeans(n_clusters=C, max_iter=1000, copy_x=True).fit(nodes_above_thresholds)
X.loc[(nodes_above_thresholds.index, 'cluster')] = kmeans.labels_
to_unify = defaultdict(list)
set_idx = set(nodes_below_thresholds.index)
def _basic_nest_forward(y: Node, X, set_idx, node_id, to_unify, nesting_to_take=0, curr_nesting=0):
for y in y.out_edges:
if (curr_nesting == nesting_to_take):
if (y.id not in set_idx):
dst_cluster = X.loc[y.id]['cluster']
X.loc[((X.index == node_id), 'cluster')] = dst_cluster
to_unify[dst_cluster].append([node_id, y.id])
print(f'-V- unify node: {node.id} to dst: {y.id}, cluster: {dst_cluster} (forward)')
return True
else:
return _basic_nest_forward(y, X, set_idx, node_id, to_unify, nesting_to_take=nesting_to_take, curr_nesting=(curr_nesting + 1))
return False
def _basic_nest_backward(y: Node, X, set_idx, node_id, to_unify, nesting_to_take=0, curr_nesting=0):
for y in y.in_edges:
if (curr_nesting == nesting_to_take):
if (y.id not in set_idx):
try:
dst_cluster = X.loc[y.id]['cluster']
except KeyError:
continue
X.loc[((X.index == node_id), 'cluster')] = dst_cluster
to_unify[dst_cluster].append([y.id, node_id])
print(f'-V- unify node: {node.id} to dst: {y.id}, cluster: {dst_cluster} (backward)')
return True
else:
return _basic_nest_backward(y, X, set_idx, node_id, to_unify, nesting_to_take=nesting_to_take, curr_nesting=(curr_nesting + 1))
return False
for node_id in nodes_below_thresholds.index:
node = graph[node_id]
nesting_to_take = 0
NESTING_LIMIT = (len(graph) + 1)
while True:
broke = (_basic_nest_forward(node, X, set_idx, node_id, to_unify, nesting_to_take=nesting_to_take, curr_nesting=0) or _basic_nest_backward(node, X, set_idx, node_id, to_unify, nesting_to_take=nesting_to_take, curr_nesting=0))
if broke:
break
nesting_to_take += 1
print(f'Going {(nesting_to_take + 1)} more nesting level for node:{node_id} because all outputs are below threshold {set_idx}')
if (nesting_to_take >= NESTING_LIMIT):
raise NotImplementedError(f'did not find node with above THRESHOLD={THRESHOLD} weight to unify')
for n in graph.nodes:
n.out_edges.sort(key=(lambda x: x.id))
cluster_to_min_node_id = {i: X.query(f'cluster == {i}').first_valid_index() for i in range(C)}
min_node_id_to_cluster = {v: i for (i, v) in cluster_to_min_node_id.items()}
Y = X.copy()
for (i, (min_node_id, cluster_id)) in enumerate(min_node_id_to_cluster.items()):
Y.loc[((X['cluster'] == cluster_id), 'cluster')] = i
X = Y
print(X)
Y = X.copy()
Y['scope'] = [node.scope for node in nodes]
print(Y)
cluster_sums = X.groupby('cluster')['weight'].describe().transpose()
print('cluster_sums_statistics', cluster_sums)
clusters = X
return (clusters, to_unify)
|
def best_Fit_cluster(K: int, clusters, id_to_node: Dict[(int, Node)], to_unify: Dict[(int, List[Union[(List, Any)]])], C: int, second_and_on_cluster_policy: SecondAndOnClusterPolicy=SecondAndOnClusterPolicy.BestFitBinPacking, reminder_policy: ReminderPolicy=ReminderPolicy.ToLast):
bins = defaultdict(list)
bin_weights = heapdict({i: 0 for i in range(K)})
bin_memory = heapdict({i: 0 for i in range(K)})
all_splits = get_all_splits(K, clusters, id_to_node=id_to_node, to_unify=to_unify, C=C, reminder_policy=reminder_policy)
def check_memory_fit(candidate, bin_id):
return True
def choose_bin(subsplit, subsplit_idx, cluster_idx):
if ((cluster_idx == 0) and (subsplit_idx >= K)):
warnings.warn(f'not fully implemented behavior for 1st cluster subsplit_idx >= K (subsplit_idx:{subsplit_idx},K:{K}), will do FirstFitBinPacking')
if ((cluster_idx == 0) and (subsplit_idx < K)):
return subsplit_idx
elif ((second_and_on_cluster_policy == SecondAndOnClusterPolicy.BestFitBinPacking) or ((cluster_idx == 0) and (subsplit_idx >= K))):
saved = []
while True:
if (len(bin_weights) == 0):
raise RuntimeError('no bin can fit (memory-wise)')
(emptiest_bin_id, current_bin_weight) = bin_weights.peekitem()
fits_memory = check_memory_fit(subsplit, emptiest_bin_id)
if fits_memory:
break
saved.append(bin_weights.popitem())
for (k, v) in saved:
bin_weights[k] = v
return emptiest_bin_id
elif (second_and_on_cluster_policy == SecondAndOnClusterPolicy.InOrder):
if (subsplit_idx < K):
return subsplit_idx
else:
raise NotImplementedError('probably 1st cluster >= K came here')
elif (second_and_on_cluster_policy == SecondAndOnClusterPolicy.Reversed):
if (subsplit_idx < K):
if ((cluster_idx % 2) != 0):
return ((K - subsplit_idx) - 1)
else:
return subsplit_idx
else:
raise NotImplementedError('probably 1st cluster >= K came here')
for (cluster_idx, split) in enumerate(reversed(all_splits)):
if ((len(split) > K) and (cluster_idx == 0)):
raise NotImplementedError()
if ((len(split) < K) and (cluster_idx == 0)):
warnings.warn(f'got only reminder in 1st and largest cluster: {split}')
if ((cluster_idx > 0) and (second_and_on_cluster_policy == SecondAndOnClusterPolicy.BestFitBinPacking)):
split = sorted(split, key=sum_subsplit_weight, reverse=True)
for (subsplit_idx, subsplit) in enumerate(split):
bin_idx = choose_bin(subsplit, subsplit_idx, cluster_idx)
bin = bins[bin_idx]
to_add = flatten_subsplit(subsplit)
bin.extend(to_add)
bin_weights[bin_idx] += sum((i.weight for i in to_add))
assert (len(bins) == K)
return bins
|
def stages_from_bins(graph: Graph, bins: Dict[(int, List[Node])], id_to_node_worked_on: Dict[(int, Node)], verbose=False, assert_missing_in_bins=False, convert_id_to_node_to_topo=False):
if convert_id_to_node_to_topo:
tmp = dict()
for (i, v) in id_to_node_worked_on.items():
tmp[v.topo_sort_id] = v
id_to_node_worked_on = tmp
del tmp
bins_to_id = {p: set((n.topo_sort_id for n in v if (n.topo_sort_id in id_to_node_worked_on))) for (p, v) in bins.items()}
nodes_with_out_edges_to_different_gpu = defaultdict(set)
nodes_with_in_edges_from_different_gpu = defaultdict(set)
all_broken_stages = []
for (gpu_id, nodes_in_bin) in bins.items():
unbroken_stages = get_ccs_on_same_gpu(bins_to_id, gpu_id, nodes_in_bin, nodes_with_in_edges_from_different_gpu, nodes_with_out_edges_to_different_gpu)
broken_stages = break_ccs_on_same_gpu_to_stages(graph, id_to_node_worked_on, unbroken_stages, bins_to_id, assert_missing_in_bins=assert_missing_in_bins)
if verbose:
print('unbroken_stages')
print(unbroken_stages)
print('broken_stages')
print(broken_stages)
broken_stages: List[List[int]]
all_broken_stages.extend(broken_stages)
all_broken_stages.sort(key=(lambda topo_sorted_list: topo_sorted_list[0]))
for (i, topo_sorted_list) in enumerate(all_broken_stages):
for nid in topo_sorted_list:
n = id_to_node_worked_on[nid]
n.stage_id = i
return len(all_broken_stages)
|
def break_ccs_on_same_gpu_to_stages(graph, id_to_node_worked_on, unbroken_stages, bins_to_id, assert_missing_in_bins=False):
broken_stages = []
for unbroken_stage in unbroken_stages:
broken_stages_for_unbroken_stage = []
cur_set = list()
unbroken_stage = deque(sorted(unbroken_stage))
while unbroken_stage:
prev_topo_sort_id = unbroken_stage.popleft()
if (prev_topo_sort_id in id_to_node_worked_on):
cur_set.append(prev_topo_sort_id)
break
else:
print(f'skipping input_v0: {prev_topo_sort_id}')
while unbroken_stage:
topo_sort_id = unbroken_stage.popleft()
if (topo_sort_id == (prev_topo_sort_id + 1)):
pass
elif (topo_sort_id not in id_to_node_worked_on):
print(f'skipping input_v1: {prev_topo_sort_id}')
else:
has_path_via_missing_nodes = ccs_on_same_gpu_has_path_via_missing_nodes(cur_set, graph, id_to_node_worked_on, prev_topo_sort_id, topo_sort_id, unbroken_stage)
if has_path_via_missing_nodes:
broken_stages_for_unbroken_stage.append(cur_set)
cur_set = list()
if assert_missing_in_bins:
missing_topo_sort_ids = list(range((prev_topo_sort_id + 1), topo_sort_id))
for mid in missing_topo_sort_ids:
in_bins = any(map((lambda v: (mid in v)), bins_to_id.values()))
if (not in_bins):
print(f'missing_topo_sort_ids are not in bins {missing_topo_sort_ids}')
raise ValueError(f'missing_topo_sort_ids are not in bins {missing_topo_sort_ids}')
if (topo_sort_id in id_to_node_worked_on):
cur_set.append(topo_sort_id)
prev_topo_sort_id = topo_sort_id
else:
print(f'skipping input_v2: {prev_topo_sort_id}')
while unbroken_stage:
prev_topo_sort_id = unbroken_stage.popleft()
if (prev_topo_sort_id in id_to_node_worked_on):
cur_set.append(prev_topo_sort_id)
break
else:
print(f'skipping input_v3: {prev_topo_sort_id}')
if cur_set:
broken_stages_for_unbroken_stage.append(cur_set)
broken_stages.extend(broken_stages_for_unbroken_stage)
broken_stages.sort(key=(lambda topo_sorted_list: topo_sorted_list[0]))
return broken_stages
|
def ccs_on_same_gpu_has_path_via_missing_nodes(cur_set, graph, id_to_node_worked_on, prev_topo_sort_id, topo_sort_id, unbroken_stage):
missing_topo_sort_ids = list(range((prev_topo_sort_id + 1), topo_sort_id))
is_ok = True
for missing_topo_sort_id in missing_topo_sort_ids:
if (missing_topo_sort_id not in id_to_node_worked_on):
continue
if (id_to_node_worked_on[missing_topo_sort_id].id not in graph):
continue
cur_nodes = [id_to_node_worked_on[x] for x in cur_set]
scs = set(cur_set)
missing_nodes_in_work_graph = [id_to_node_worked_on[x] for x in missing_topo_sort_ids if ((x not in scs) and (x in id_to_node_worked_on))]
nodes_left_in_unborken_stage = set((id_to_node_worked_on[x] for x in unbroken_stage))
nodes_left_in_unborken_stage.add(id_to_node_worked_on[topo_sort_id])
A: Set[Node] = set(cur_nodes)
B: Set[Node] = set(nodes_left_in_unborken_stage)
edge_nodes: Set[Node] = set(missing_nodes_in_work_graph)
edges = []
for a in A:
for c in a.out_edges:
if (c in edge_nodes):
edges.append((0, (c.id + 2)))
for c in edge_nodes:
for nc in c.out_edges:
if (nc in edge_nodes):
edges.append(((c.id + 2), (nc.id + 2)))
elif (nc in B):
edges.append(((c.id + 2), 1))
G = nx.DiGraph(incoming_graph_data=edges)
G.add_node(0)
G.add_node(1)
has_path = nx.algorithms.shortest_paths.generic.has_path(G, 0, 1)
is_ok = (not has_path)
if (not is_ok):
break
has_path_via_missing_nodes = (not is_ok)
return has_path_via_missing_nodes
|
def get_ccs_on_same_gpu(bins_to_id, gpu_id, nodes_in_bin, nodes_with_in_edges_from_different_gpu, nodes_with_out_edges_to_different_gpu):
uf = UnionFind(elements=bins_to_id[gpu_id])
open = deque(sorted(nodes_in_bin, key=(lambda x: x.topo_sort_id)))
while open:
x = open.popleft()
x: Node
for y in x.out_edges:
if (y.topo_sort_id not in uf):
nodes_with_out_edges_to_different_gpu[gpu_id].add(x)
nodes_with_in_edges_from_different_gpu[y.gpu_id].add(y)
continue
uf.union(x.topo_sort_id, y.topo_sort_id)
unbroken_stages = uf.sorted_components()
return unbroken_stages
|
def analyze_n_clusters(nodes: List[Node], node_weight_function, max_k=10, THRESHOLD=0, manual_choose_n_clusters=True):
' utility to help determine number of clusters for partition_2dbin_pack'
def node_to_record(node):
return {'id': node.id, 'weight': node_weight_function(node)}
records = [node_to_record(node) for node in nodes]
records = [node_to_record(node) for node in nodes]
X = pd.DataFrame.from_records(data=records, index='id')
nodes_below_thresholds = X[(X['weight'] <= THRESHOLD)]
nodes_above_thresholds = X[(X['weight'] > THRESHOLD)]
X = pd.DataFrame.from_records(data=records, index='id')
print(X)
Y = X.copy()
Y['scope'] = [node.scope for node in nodes]
print(Y)
sse = {}
for k in range(1, (max_k + 1)):
kmeans = KMeans(n_clusters=k, max_iter=1000, copy_x=True).fit(nodes_above_thresholds)
X.loc[(nodes_above_thresholds.index, 'cluster')] = kmeans.labels_
sse[k] = kmeans.inertia_
plt.figure()
plt.plot(list(sse.keys()), list(sse.values()))
plt.xlabel('Number of clusters')
plt.ylabel('SSE')
plt.show(block=False)
if manual_choose_n_clusters:
n_clusters = input('-I- choose desired number of clusters to continue...')
n_clusters = int(n_clusters)
return n_clusters
|
def partition_2dbin_pack(graph: Graph, num_gpus: int, n_clusters: int, node_weight_function: Optional[NodeWeightFunction]=None, use_layers_graph: bool=True, THRESHOLD=0, second_and_on_cluster_policy: SecondAndOnClusterPolicy=SecondAndOnClusterPolicy.BestFitBinPacking, reminder_policy: ReminderPolicy=ReminderPolicy.ToLast, display_cluster_sse_plot=False, **kwargs):
if isinstance(second_and_on_cluster_policy, type(next(iter(ReminderPolicy._value2member_map_.keys())))):
second_and_on_cluster_policy = SecondAndOnClusterPolicy._value2member_map_[second_and_on_cluster_policy]
if isinstance(reminder_policy, type(next(iter(ReminderPolicy._value2member_map_.keys())))):
reminder_policy = ReminderPolicy._value2member_map_[reminder_policy]
graph.topo_sort()
if use_layers_graph:
(work_graph, lookup) = graph.new_graph_without_constants()
else:
(work_graph, lookup) = (graph, None)
nodes = [n for n in work_graph.nodes if (n not in work_graph.inputs)]
K = num_gpus
if (('analyze_n_clusters' in kwargs) and kwargs['analyze_n_clusters']):
n_clusters = analyze_n_clusters(nodes, node_weight_function, max_k=10, THRESHOLD=THRESHOLD, manual_choose_n_clusters=True)
print(f'-I- Will use n_clusters={n_clusters}')
elif display_cluster_sse_plot:
print('-V- displaying info about n_clusters')
analyze_n_clusters(nodes, node_weight_function, max_k=10, THRESHOLD=THRESHOLD, manual_choose_n_clusters=False)
id_to_node = {node.id: node for node in nodes}
C = n_clusters
(clusters, to_unify) = make_clusters(work_graph, nodes, node_weight_function, C=C, THRESHOLD=THRESHOLD)
bins = best_Fit_cluster(K, clusters, id_to_node=id_to_node, to_unify=to_unify, C=C, second_and_on_cluster_policy=second_and_on_cluster_policy, reminder_policy=reminder_policy)
for v in bins.values():
v.sort(key=(lambda x: x.Index))
pprint(bins)
def node_list(iterable):
return [id_to_node[i.Index] for i in iterable]
bins = {i: node_list(bins[i]) for i in bins}
times = {i: sum((node_weight_function(x) for x in bins[i])) for i in bins}
print('times:')
pprint(times)
for (i, bin_nodes) in bins.items():
for n in bin_nodes:
n.gpu_id = i
work_graph.topo_sort(change_graph=False)
stages_from_bins(work_graph, bins, id_to_node_worked_on=id_to_node, convert_id_to_node_to_topo=True)
work_graph = post_process_partition(work_graph)
if use_layers_graph:
graph.induce_layer_partition(work_graph, lookup)
stage_to_gpu_map = convert_handle_missing_print(bins, graph)
return (graph, stage_to_gpu_map)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.