python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
from .permutation_utilities import *
################################################################################################################
# Greedy Channel Swaps - iterative, deterministic, can be parallelized
# 1. Build a map of the magnitude improvement of involved stripes for all pairs of channel swaps
# 2. Sort the map, march through by decreasing improvement, skipping entries whose stripes have been modified
# 3. Repeat until there's no entry with positive improvement (convergence)
################################################################################################################
## try swapping columns and tracking magnitude after pruning
def try_swap(matrix, dst, src):
src_base = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_base = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap
matrix[...,[src,dst]] = matrix[...,[dst,src]]
# check the Nx4 slices of the swapped columns
src_sum = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_sum = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap back
matrix[...,[src,dst]] = matrix[...,[dst,src]]
return src_sum + dst_sum, (src_sum + dst_sum) - (src_base + dst_base)
## convert stripe and a swap indices to columns
def stripes_and_swap_idx_to_columns(stripe0, stripe1, idx):
i = 0
for c0 in range(4):
for c1 in range(4):
if i == idx:
return stripe0*4+c0, stripe1*4+c1
i += 1
return None
## convert columns to stripe and swap indices
def columns_to_stripes_and_swap_idx(col0, col1):
stripe0 = int(col0/4)
col0 %= 4
stripe1 = int(col1/4)
col1 %= 4
idx = 0
for c0 in range(4):
for c1 in range(4):
if c0 == col0 and c1 == col1:
return stripe0, stripe1, idx
idx += 1
return None
## build a list of stripe pairs that need their benefits recomputed because one stripe was modified
def build_stripe_pairs(matrix, used_stripes):
stripe_pairs = []
total_stripes = int(matrix.shape[1]/4)
used_stripes = np.sort(used_stripes)
for stripe0 in range(total_stripes-1):
for stripe1 in range(stripe0, total_stripes):
if stripe0 in used_stripes or stripe1 in used_stripes:
stripe_pairs.append([stripe0,stripe1])
return np.asarray(stripe_pairs)
## compute the benefit of swapping each pair of columns in the matrix using the GPU
## only update stripes' columns that appear in used_stripes to avoid unnecessary computations
def compute_swap_map(matrix, used_stripes):
do_gpu = use_gpu()
assert(do_gpu)
stripe_pairs = build_stripe_pairs(matrix, used_stripes).astype(np.uint32)
matrix_view = matrix.astype(np.float32).flatten()
stripe_pairs_view = stripe_pairs.flatten()
output = np.zeros((len(stripe_pairs)*16), dtype=np.float32).flatten()
result = permutation_search_cuda_kernels.build_swap_map(matrix_view, matrix.shape[0], matrix.shape[1], stripe_pairs_view, output)
# translate the flat array from the GPU to a map
pair_improvement_map = {}
for i,pair in enumerate(stripe_pairs):
for swap_idx in range(16):
col0, col1 = stripes_and_swap_idx_to_columns(pair[0], pair[1], swap_idx)
pair_improvement_map[(col0, col1)] = output[i*16+swap_idx]
return pair_improvement_map
## build the full swap map
def build_swap_map(matrix, swap_map, swap_ids, used_stripes, verbosity):
improvements = None
# if we have a GPU and built kernels, pre-compute the needed values
do_gpu = use_gpu()
if do_gpu:
if len(swap_map) == 0:
used_stripes = [s for s in range(int(matrix.shape[1]/4))]
improvements = compute_swap_map(matrix, used_stripes)
idx = 0
updates = 0
for src in range(matrix.shape[1]-1): # parallelize these loops
for dst in range(src+1, matrix.shape[1]):
# swapping within a stripe does nothing
if int(src/4) == int(dst/4):
continue
# if we touched this stripe last time, update it
if (int(src/4) in used_stripes) or (int(dst/4) in used_stripes) or len(swap_map) <= idx:
tmp_improvement = 0.0
# use the pre-computed values from the GPU if possible, otherwise compute on the CPU
if do_gpu:
tmp_improvement = improvements[(src,dst)]
else:
tmp_mag, tmp_improvement = try_swap(matrix, src, dst)
updates += 1
if len(swap_map) <= idx:
swap_map.append(tmp_improvement)
swap_ids.append((src,dst))
else:
swap_map[idx] = tmp_improvement
swap_ids[idx] = (src,dst)
idx += 1
if verbosity > 15:
print(f"\tupdated {updates} map entries")
return swap_map, swap_ids
def use_swap_map(matrix, swap_map, swap_ids, threshold, used_escape_attempts, escape_attempts, permutation, verbosity):
used_stripes = []
swaps = 0
improvement = 0.0
# set the traversal order and threshold
ix = np.flip(np.argsort(swap_map)) # small to large -> large to small
threshold = min(max(swap_map[ix[0]] * threshold, 0.0001),1.0)
# iterate through the potential swaps in benefit order
for swap in range(len(ix)):
swap_id = ix[swap]
src = swap_ids[swap_id][0]
dst = swap_ids[swap_id][1]
# early-out of swaps that are below the threshold (don't be so greedy)
if swap_map[ix[swap]] < threshold:
# see if an arbitrary swap helps things if we've converged
if len(used_stripes) == 0 and used_escape_attempts < escape_attempts:
swap_id = np.random.randint(len(swap_ids))
if verbosity > 15:
print(F"converged, attempt #{used_escape_attempts+1} to jiggle out, using index {swap_id} into the sorted list={ix[swap_id]}")
swap_id =ix[swap_id]
src = swap_ids[swap_id][0]
dst = swap_ids[swap_id][1]
used_escape_attempts += 1
else:
break
# skip swaps that include a stripe we've already modified
if int(src/4) in used_stripes or int(dst/4) in used_stripes:
continue
# we'll need to update these stripes later
used_stripes.append(int(src/4))
used_stripes.append(int(dst/4))
# make the swap
if verbosity > 20:
print(F"\t{swap}\t{src},{dst} {swap_map[swap_id]:.4f}")
matrix[...,[src,dst]] = matrix[...,[dst,src]]
permutation[src],permutation[dst] = permutation[dst],permutation[src]
improvement += swap_map[swap_id]
swaps += 1
return matrix, swaps, swap_map, swap_ids, used_stripes, improvement, used_escape_attempts, permutation
def Channel_Swap(matrix, escape_attempts=0, verbosity=0, permutation=None):
threshold = 0.00001
used_escape_attempts = 0
# initialize
if permutation is None:
permutation = [c for c in range(matrix.shape[1])]
swap_map = []
swap_ids = []
used_stripes = []
swap_count = 0
iterations = 0
agg_improvement = 0.
cur_total_sum = sum_after_2_to_4(matrix)
start_time = time.perf_counter()
# do the work
swapped = 1 # just start with nonzero value to fall into the loop
while swapped > 0:
swap_map, swap_ids = build_swap_map(matrix, swap_map, swap_ids, used_stripes, verbosity)
matrix, swapped, swap_map, swap_ids, used_stripes, improvement, used_escape_attempts, permutation = use_swap_map(matrix, swap_map, swap_ids, threshold, used_escape_attempts, escape_attempts, permutation, verbosity)
agg_improvement += improvement
# keep track of statistics, print occasionally
swap_count += swapped
if verbosity > 10:
iterations += 1
cur_total_sum += agg_improvement
duration = time.perf_counter() - start_time
print(F"\t{iterations:8} {cur_total_sum:7.2f} {agg_improvement:7.2f} {swap_count:4} {agg_improvement/max(swap_count,1):5.2f} {duration:7.2f}")
agg_improvement = 0.
swap_count = 0
# final status
seconds = time.perf_counter() - start_time
return matrix, seconds, permutation
|
apex-master
|
apex/contrib/sparsity/permutation_search_kernels/channel_swap.py
|
import numpy as np
from .permutation_utilities import *
from .exhaustive_search import Exhaustive_Search
def accelerated_search_for_good_permutation(matrix_group, options=None, verbosity=0):
"""This function is used to call the permutation search CUDA kernels.
users can provide prefer search strategy by providing a valid 'options' as a dictionary,
or users can implement their customized 'accelerated_search_for_good_permutation' function.
"""
input_matrix = matrix_group.cpu().detach().numpy()
if verbosity > 1:
print("\n[accelerated_search_for_good_permutation] input matrix shape: \'{:}\'.".format(input_matrix.shape))
result = np.copy(input_matrix)
# init a sequential permutation search sequence
input_channel_num = matrix_group.size(1)
permutation_sequence = [n for n in range(input_channel_num)]
duration = 0.0
if options == None:
options = {}
if 'strategy' not in options: # right now, the default permutation search strategy is: 'exhaustive' search
options['strategy'] = 'exhaustive'
if verbosity > 1:
print("[accelerated_search_for_good_permutation] the permutation strategy is: \'{:} search\'.".format(options['strategy']))
# define sub options for each search strategy
if options['strategy'] == 'exhaustive':
# right now, the default options for 'exhaustive' search is: 'exhaustive,8,100'
if 'stripe_group_size' not in options:
options['stripe_group_size'] = 8
if 'escape_attempts' not in options:
options['escape_attempts'] = 100
elif options['strategy'] == 'progressive channel swap':
# just swaps meaningful channels, keeping the good swaps, until the search time limit expires.
if 'progressive_search_time_limit' not in options:
options['progressive_search_time_limit'] = 60
if 'improvement_threshold' not in options:
options['improvement_threshold'] = 1e-9
# execute the requested strategy
if options['strategy'] == 'exhaustive':
result, duration, permutation_sequence = Exhaustive_Search(result, stripe_group_size=options['stripe_group_size'], escape_attempts=options['escape_attempts'])
elif options['strategy'] == 'progressive channel swap':
real_swap_num = 0
start_time = time.perf_counter()
while time.perf_counter() - start_time < options['progressive_search_time_limit']:
src = np.random.randint(result.shape[1])
dst = np.random.randint(result.shape[1])
src_group = int(src/4)
dst_group = int(dst/4)
if src_group == dst_group: # channel swapping within a stripe does nothing
continue
new_sum, improvement = try_swap(result, dst, src)
if improvement > options['improvement_threshold']:
result[...,[src,dst]] = result[...,[dst,src]]
permutation_sequence[src], permutation_sequence[dst] = permutation_sequence[dst], permutation_sequence[src]
real_swap_num += 1
duration = time.perf_counter() - start_time
if verbosity > 1:
print("\tFinally swap {} channel pairs until the search time limit expires.".format(real_swap_num))
elif options['strategy'] == 'user defined': # need to get the permutated matrix (result) by applying customized permutation search function
if verbosity > 1:
print("[accelerated_search_for_good_permutation] Use the user customized permutation search function!")
else:
if verbosity >= 0:
print("[accelerated_search_for_good_permutation] Cannot find the implementation of the required strategy!")
if verbosity > 1:
print("[accelerated_search_for_good_permutation] Take {:.4f} seconds to search the permutation sequence.".format(duration))
return permutation_sequence
|
apex-master
|
apex/contrib/sparsity/permutation_search_kernels/call_permutation_search_kernels.py
|
from .call_permutation_search_kernels import accelerated_search_for_good_permutation
from .permutation_utilities import sum_after_2_to_4
|
apex-master
|
apex/contrib/sparsity/permutation_search_kernels/__init__.py
|
import numpy as np
import time
import subprocess
import math
gpus_tested = False
gpus_found = 0
kernels_found = True
try:
import permutation_search_cuda as permutation_search_cuda_kernels
print(f"Found permutation search CUDA kernels")
except ImportError:
try:
from . import permutation_search_cuda as permutation_search_cuda_kernels
print(f"Found permutation search CUDA kernels for standalone testing")
except ImportError:
print(f"Could not find permutation search CUDA kernels, falling back to CPU path")
kernels_found = False
def use_gpu(initial_override = True):
global gpus_tested, gpus_found, kernels_found
if not gpus_tested:
if not initial_override:
gpus_tested = True
return False
try:
gpus_found = str(subprocess.check_output(["nvidia-smi", "-L"])).count('UUID')
print(f"Found {gpus_found} gpus")
except:
gpus_found = 0
print(f"Could not find nvidia-smi, please check your cuda installation")
gpus_tested = True
return gpus_found > 0 and kernels_found
##############################################################################################
# pruning utilities
##############################################################################################
## apply 2:4 to some matrix
def apply_2_to_4(matrix):
for row in range(matrix.shape[0]):
for col in range(0,matrix.shape[1],4):
ix = np.argsort(np.abs(matrix[row,col:col+4]))
matrix[row,col+ix[0]] = 0.0
matrix[row,col+ix[1]] = 0.0
return matrix
## find the sum of magnitudes if 2:4 were applied to a matrix
def sum_after_2_to_4(matrix):
cur_sum = 0.0
use_cuda = use_gpu()
if not use_cuda:
for row in range(matrix.shape[0]):
for col in range(0,matrix.shape[1],4):
ix = np.argsort(np.abs(matrix[row,col:col+4]))
cur_sum += abs(matrix[row,col+ix[2]])
cur_sum += abs(matrix[row,col+ix[3]])
else:
matrix = matrix.astype(np.float32)
cuda_sum = np.zeros((1), dtype=np.float32)
matrix_view = np.copy(matrix).flatten()
sum_view = cuda_sum.flatten()
blocks = max(int(matrix.shape[1]/4/2), 1)
threads = min(max(math.ceil(matrix.shape[0]/4), 1), 1024)
result = permutation_search_cuda_kernels.sum_after_2_to_4(matrix_view,
matrix.shape[0],
matrix.shape[1],
0,
matrix.shape[1],
blocks,
threads,
sum_view)
cur_sum = sum_view[0]
return cur_sum
# perform unstructured pruning on some matrix
def unstructured_prune(matrix, sparsity):
shp = matrix.shape
matrix = matrix.flatten()
ix = np.argsort(matrix)
ix = ix[:int(len(ix)*sparsity)]
matrix[ix] = 0.0
matrix = np.reshape(matrix, shp)
return matrix
## try swapping columns and tracking magnitude after pruning
def try_swap(matrix, dst, src):
src_base = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_base = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap
matrix[...,[src,dst]] = matrix[...,[dst,src]]
# check the Nx4 slices of the swapped columns
src_sum = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_sum = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap back
matrix[...,[src,dst]] = matrix[...,[dst,src]]
return src_sum + dst_sum, (src_sum + dst_sum) - (src_base + dst_base)
## magnitude improvement from the naive 2:4 matrix / how much was lost by naive 2:4 compared to the optimal
def efficacy(optimal_lost_magnitude, base_lost_magnitude, cur_lost_magnitude):
if base_lost_magnitude == optimal_lost_magnitude:
eff = 1.0
else:
eff = (base_lost_magnitude - cur_lost_magnitude) / (base_lost_magnitude - optimal_lost_magnitude)
return eff
## find the magnitude if the rows of a matrix were pruned independently, without structure
def magnitude_after_pruning_rows(matrix, rate=0.5):
magnitude = 0.
cols = matrix.shape[1]
for r in range(matrix.shape[0]):
rowVals = matrix[r]
rowVals = np.sort(np.abs(rowVals))
magnitude += np.sum(rowVals[int(cols*rate):])
return magnitude
##############################################################################################
# permutation utilities
##############################################################################################
## exhaustively search an entire matrix on the GPU
def try_permutations_on_matrix(matrix, permutations):
use_cuda = use_gpu()
assert(use_cuda) # caller should have checked
matrix = np.copy(matrix)
matrix = matrix.astype(np.float32)
matrix_view = np.copy(matrix).flatten()
permutations_view = np.copy(np.asarray(permutations)).astype(np.uint32).flatten()
stripe_groups = np.asarray([[s for s in range(int(matrix.shape[1]/4))]]).astype(np.uint32)
stripe_groups_view = stripe_groups.flatten()
improvement = np.zeros((1), dtype=np.float32).flatten()
permutation = np.zeros((1), dtype=np.uint32).flatten()
result = permutation_search_cuda_kernels.check_permutations(matrix_view,
matrix.shape[0],
matrix.shape[1],
stripe_groups_view,
len(stripe_groups[0]),
len(stripe_groups),
permutations_view,
len(permutations),
improvement,
permutation)
return improvement[0], permutations[permutation[0]]
## find the permutation needed to make matrix A look like matrix B
def find_permutation(A, B):
permutation = []
for col in range(A.shape[1]):
Avals = A[...,col]
for bcol in range(B.shape[1]):
if np.all(Avals - B[...,bcol] == np.zeros(Avals.shape)):
permutation.append(bcol)
break
return permutation
########################################
# reasonable method to find distance between permutations
# this is used to generate permutations "between" two other permutations to divide efficacy space
#######################################
## separate a flat permutation array into its groups, sort each group and the overall order to
## put the output into a canonical order: if two permutations have the same groups, they should appear identical
def make_grouped(A):
groups = []
for x in range(0,len(A),4):
group = []
for c in range(4):
group.append(A[x+c])
group = np.sort(group)
groups.append(group)
return groups
## given two permutations, find the groups they have in common
def common_groups(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
# convert to sets to take the intersection
As = set(tuple(Ag[g]) for g in range(len(Ag)))
Bs = set(tuple(Bg[g]) for g in range(len(Bg)))
common = As.intersection(Bs)
# flatten
C = []
for s in common:
for v in s:
C.append(v)
# group
return make_grouped(C)
## given two permutations, remove the groups that are common between them
def remove_common_groups(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
# convert to sets to take set difference
As = set(tuple(Ag[g]) for g in range(len(Ag)))
Bs = set(tuple(Bg[g]) for g in range(len(Bg)))
Ad = As - Bs
Bd = Bs - As
# turn the differences back into flat arrays
A = []
for s in Ad:
for v in s:
A.append(v)
B = []
for s in Bd:
for v in s:
B.append(v)
# group to put into canonical order, re-flatten
A = make_grouped(A)
B = make_grouped(B)
A = [item for sublist in A for item in sublist]
B = [item for sublist in B for item in sublist]
return A,B
## given two permutations, find which elements in B need to go where to look like A
def group_differences(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
wrong_entries = []
#for g,group in enumerate(Bg):
for g in range(len(Bg)):
group = Bg[g]
for i in range(len(group)):
val = group[i]
if val not in Ag[g]:
group_in_a = int(np.where(A == val)[0][0] / 4)
wrong_entries.append((val, g, group_in_a))
return wrong_entries
## (val, cur_group, desired_group) ==> dict[(cur_group, desired_group)] = [vals]
def dictify(wrong_entries):
result = {}
for entry in wrong_entries:
key = (entry[1], entry[2])
if key in result:
result[key].append(entry[0])
else:
result[key] = [entry[0]]
return result
## move groups of B to where they best match A's groups
def move_groups_to_match(B, A, debug=False):
Ag = make_grouped(A)
Bg = make_grouped(B)
new_Bg = [[] for g in range(len(Ag))]
wrong_entry_dict = dictify(group_differences(A, B))
if debug:
print(f"MGTM:\n\tAg: {Ag}\n\tBg: {Bg}\n\tWED: {wrong_entry_dict}")
moved_groups = []
keys_to_del = []
# move triples to the right spot
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(wrong_entry_dict[k]) == 3:
new_Bg[k[1]] = Bg[k[0]]
moved_groups.append(k[0])
keys_to_del.append(k)
if debug:
print(f"MGTM: moved triple {wrong_entry_dict[k]} from group {k[0]} to group {k[1]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# move doubles
for k in wrong_entry_dict.keys():
# if we've already moved the group to which this key belongs, remove it
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(wrong_entry_dict[k]) == 2:
if len(new_Bg[k[1]]) == 0: # move it to its requested destination if possible
new_Bg[k[1]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: moved double {wrong_entry_dict[k]} from group {k[0]} to its preferred group {k[1]}")
elif len(new_Bg[k[0]]) == 0: # otherwise leave it where it is (if possible)
new_Bg[k[0]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: left double {wrong_entry_dict[k]} where it was in group {k[0]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# move singles
# try to leave things where they are to prevent oscillating
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(new_Bg[k[1]]) == 0: # requested destination
new_Bg[k[1]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: moved single {wrong_entry_dict[k]} from group {k[0]} to its preferred group {k[1]}")
elif len(new_Bg[k[0]]) == 0:
new_Bg[k[0]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: left group {wrong_entry_dict[k]} where it was in group {k[0]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# put what's left where it'll fit
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
for dst in range(len(new_Bg)):
if len(new_Bg[dst]) == 0:
new_Bg[dst] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: put group {wrong_entry_dict[k]} where it found a spot in group {dst}")
break
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
assert(len(wrong_entry_dict) == 0)
Agsize = sum( [ len(group) for group in Ag] )
Bgsize = sum( [ len(group) for group in new_Bg] )
assert(Agsize == Bgsize)
new_B = [item for sublist in new_Bg for item in sublist]
return new_B
## swap two permutation entries and put the permutation into unique order
def swap_and_correct(permutation, src, tgt):
permutation[src],permutation[tgt] = permutation[tgt],permutation[src]
grouped = make_grouped(permutation)
grouped = [item for sublist in grouped for item in sublist]
return grouped
## make a swap that will move B in the direction of A
num_diffs = 0
def move_permutation_towards(B, A, debug=False):
global num_diffs
B = move_groups_to_match(B, A, debug)
wrong_entries = group_differences(A, B)
num_diffs = len(wrong_entries)
# nothing to do, early out
if len(wrong_entries) == 0:
if debug:
print("MPT: early out")
return B
if debug:
print(f"MPT: checking {len(wrong_entries)} diffs: {wrong_entries}")
# look for a group of three wrong entries that want to do the same thing
entry_dict = dictify(wrong_entries)
for k in entry_dict.keys():
entry = entry_dict[k]
if len(entry) == 3:
if debug:
print(f"MPT: found a triple swap at {k}: {entry_dict[k]}")
(src, dst) = k
# find the index of the one needed to complete the group
# the value is the value in A[dst] that's not in B[src]
# it's already in the destination group and may or may not need to move
group_id = dst
Ag = make_grouped(np.copy(A))
Bg = make_grouped(np.copy(B))
value = -1
for c in range(4):
if Ag[dst][c] not in Bg[src]:
value = Ag[dst][c]
if debug:
print(f"\tMPT: found the missing value {value} in A group {dst} offset {c}")
break
assert(value != -1)
# now find that value in B
idx0 = np.where(B == value)[0][0]
# find the index of the one this group doesn't need
# it's a member of the group but not in the dict entry
group_id = src
for c in range(4):
if B[group_id*4+c] not in entry_dict[k]:
if debug:
print(f"\tMPT: swapping {idx0} and {group_id*4+c}")
return swap_and_correct(B, idx0, group_id*4+c)
# look for a group of two entries that are heading to the same place as another wrong entry
victim_loner_pair = None
for k in entry_dict.keys():
entry = entry_dict[k]
if len(entry) == 2:
if debug:
print(f"MPT: found a double swap at {k}: {entry_dict[k]}")
(src, dst) = k
# find a wrong entry whose dst is the same
for k2 in entry_dict.keys():
if k2 == k:
continue
# k2 is a key whose value also belongs in stripe k2[1] (dst2)
if dst == k2[1]:
if debug:
print(f"\tMPT: found a loner going in the same direction at {k2}: {entry_dict[k2][0]}")
# instead of moving these three to where they're headed, start merging them by moving the loner into the double
# look for a complement: something moving from src to src2
(src2, dst2) = k2
complement_key = (src, src2)
if complement_key in entry_dict:
complement = entry_dict[complement_key][0]
if debug:
print(f"\t\tMPT: found a complement to the loner:{complement}")
return swap_and_correct(B, np.where(B == entry_dict[k2][0])[0][0], np.where(B == complement)[0][0])
# didn't find a complement, choose one of the two in the src group that don't belong
elif victim_loner_pair is None:
for k3 in entry_dict.keys():
if k3 == k:
continue
if k3[0] == src: # found the victim
victim = entry_dict[k3][0]
if debug:
print(f"\t\tMPT: found a victim for the double swap:{k3} -> {victim}")
victim_loner_pair = (victim, entry_dict[k2][0])
#return swap_and_correct(B, np.where(B == entry_dict[k2][0])[0][0], np.where(B == victim)[0][0])
if victim_loner_pair is not None:
if debug:
print(f"\t\tMPT: couldn't find any complements for double swaps, so going with a loner to make a triple: {victim_loner_pair}")
return swap_and_correct(B, np.where(B == victim_loner_pair[0])[0][0], np.where(B == victim_loner_pair[1])[0][0])
# look for one swap that will correct two entries
candidate_second = None
for we in range(len(wrong_entries)):
cur_entry = wrong_entries[we]
#if debug:
# print(f"\tMPT: checking {cur_entry} for complement")
for we2 in range(0,len(wrong_entries)):
pos_swap = wrong_entries[we2]
#if debug:
# print(f"\t\tMPT: is {pos_swap}?")
if cur_entry[1] == pos_swap[2] and cur_entry[2] == pos_swap[1]:
if debug:
print(f"\t\tfound complements: swapping {cur_entry} and {pos_swap}")
return swap_and_correct(B, np.where(B == cur_entry[0])[0][0], np.where(B == pos_swap[0])[0][0])
elif wrong_entries[0][2] == pos_swap[1]: # if pos_swap is currently where we[0] wants to go, keep it in mind
candidate_second = pos_swap
# fall back on picking the first one we come across
assert(candidate_second is not None)
if debug:
print(f"No complement, swapping two entries: {wrong_entries[0]} {candidate_second}")
return swap_and_correct(B, np.where(B == wrong_entries[0][0])[0][0], np.where(B == candidate_second[0])[0][0])
## find a shortest path from permutation A to B
def permutation_distance(A, B, matrix=None, magnitude_targets=None, debug=False, verbosity=0):
global num_diffs
swaps = 0
debug = False
swap_limit = int(math.pow(2,int(len(A)/4)-1))
num_diffs = swap_limit
common = []
target_results = None
if magnitude_targets is not None:
assert matrix is not None
cur_mag = sum_after_2_to_4(matrix[:,A])
target_results = [(cur_mag, A) for i in range(len(magnitude_targets))]
if verbosity > 0 and matrix is not None:
print(f"swap {'0':>4} {sum_after_2_to_4(matrix[:, B]):>15.3f}")
if verbosity > 5:
print(f"swap {0:>4}, {make_grouped(A)} {make_grouped(B)}")
while not np.all(np.array(A)-np.array(B) == np.zeros(np.array(A).shape)):
cGroups = common_groups(A, B)
for g in cGroups:
common.append(g)
A, B = remove_common_groups(A, B)
if len(A) == 0:
break
B = move_permutation_towards(np.array(B), np.array(A), debug=debug)
swaps += 1
if matrix is not None:
total_cur_permute = [c for c in B]
for c in [item for sublist in common for item in sublist]:
total_cur_permute.append(c)
if verbosity > 0 or magnitude_targets is not None:
cur_mag = sum_after_2_to_4(matrix[:,total_cur_permute])
for i in range(len(target_results)):
result = target_results[i]
if abs(magnitude_targets[i] - result[0]) > abs(magnitude_targets[i] - cur_mag):
target_results[i] = (cur_mag, total_cur_permute)
if verbosity > 0:
print(f"swap {swaps:>4} {cur_mag:>15.3f}")
if verbosity > 5 or swaps > swap_limit:
print(f"swap {swaps:>4}, {A} {B}, {num_diffs} diffs remain")
# safety net
if swaps > swap_limit+3:
return swaps, target_results
return swaps, target_results
|
apex-master
|
apex/contrib/sparsity/permutation_search_kernels/permutation_utilities.py
|
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
# only prune linear layers, even though we also support conv1d, conv2d and conv3d
ASP.init_model_for_pruning(model, "m4n2_1d", whitelist=[torch.nn.Linear], allow_recompute_mask=True)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
# recompute sparse masks
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
# turn off sparsity
print("SPARSE :: ",one_ll)
ASP.restore_pruned_weights()
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps_2)
if __name__ == '__main__':
class Args:
batch_size = 32
input_features = 16
output_features = 8
hidden_features = 40
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
num_dense_steps_2 = 1500
args = Args()
main(args)
|
apex-master
|
apex/contrib/sparsity/test/toy_problem.py
|
import torch
import torch.onnx
from apex.contrib.sparsity.permutation_lib import Permutation
"""
Functional and behavioral correctness checking for network permutations
Each test class is a torch.nn.Module with three required members:
- self.input_shape is used to populate a dummy input
- self.expected_C_params indicates how many parameters are expected to be permuted in the C dimension
- self.expected_K_params indicates how many parameters are expected to be permuted in the K dimension
A test is successful if and only if:
1. The output of the un-permuted module matches (within a tolerance) the ouput of the permuted module
2. The number of parameters permuted in C, as reported by the Permutation class, matches the expected value in the test module
3. The number of parameters permuted in K, as reported by the Permutation class, matches the expected value in the test module
This file has all the test modules defined first, followed by the common test routine to check each module's correctness, and finally the main/entry point.
"""
class simple_convs(torch.nn.Module):
"""Stack of 2d convolutions with different normalization and activation functions"""
def __init__(
self,
num_convs: int,
channels: int,
normalization: str = 'none',
activation: str = 'ReLU',
):
super().__init__()
self.num_convs = num_convs
self.channels = channels
self.normalization = normalization
self.activation = activation
self.input_shape = [4, channels, 7, 7]
# we'll permute all convs' weights along C except the first
self.expected_C_params = -1
self.expected_K_params = 0
self.conv_stack = torch.nn.Sequential()
for c in range(self.num_convs-1):
self.conv_stack.add_module(f"conv_{c}", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
self.expected_C_params += 1
self.expected_K_params += 2
if self.normalization == 'BatchNorm2d':
self.conv_stack.add_module(f"norm_{c}", torch.nn.BatchNorm2d(self.channels, track_running_stats=False))
self.expected_K_params += 2
elif self.normalization == 'LazyBatchNorm2d':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LazyBatchNorm2d(track_running_stats=False))
self.expected_K_params += 2
elif self.normalization == 'GroupNorm':
self.conv_stack.add_module(f"norm_{c}", torch.nn.GroupNorm(4, self.channels, affine=True))
self.expected_C_params -= 1 # GN prevents permutations of the neighboring convs
self.expected_K_params -= 2
elif self.normalization == 'InstanceNorm2d':
self.conv_stack.add_module(f"norm_{c}", torch.nn.InstanceNorm2d(self.channels, affine=True, track_running_stats=False))
self.expected_K_params += 2
elif self.normalization == 'LocalResponseNorm':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LocalResponseNorm(16))
elif self.normalization == 'LayerNorm1':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LayerNorm(7))
elif self.normalization == 'LayerNorm2':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LayerNorm([7, 7]))
elif self.normalization == 'LayerNorm3':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LayerNorm([self.channels, 7, 7]))
self.expected_K_params += 2
elif self.normalization == 'SyncBatchNorm':
self.conv_stack.add_module(f"norm_{c}", torch.nn.SyncBatchNorm(self.channels, track_running_stats=False))
self.expected_K_params += 2
self.conv_stack.add_module(f"act_{c}", torch.nn.ReLU())
self.conv_stack.add_module("conv_out", torch.nn.Conv2d(self.channels, 8, kernel_size=(1,1)))
self.expected_C_params += 1
def forward(self, x: torch.Tensor):
x = self.conv_stack(x)
return x
class conv_1d(torch.nn.Module):
"""1D convolutions in isolation and with siblings"""
def __init__(
self,
with_2d = False,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.with_2d = with_2d
self.input_conv = torch.nn.Conv2d(self.input_shape[1], 32, kernel_size=(3,3), padding=1)
self.expected_K_params += 2
self.branch_a_1D = torch.nn.Conv1d(32, 32, kernel_size=3, padding=1)
self.expected_C_params += 1
self.expected_K_params += 2
if self.with_2d:
self.branch_b_2D = torch.nn.Conv2d(32, 32, kernel_size=(3,3), padding=1)
self.expected_C_params += 1
self.expected_K_params += 2
self.out_conv = torch.nn.Conv2d(32, 8, kernel_size=(1,1))
self.expected_C_params += 1
def forward(self, x: torch.Tensor):
step0 = self.input_conv(x)
s0shape = step0.shape
step1 = self.branch_a_1D(step0.view(s0shape[0], s0shape[1], s0shape[2]*s0shape[3])).view(s0shape)
if self.with_2d:
step1 = step1 + self.branch_b_2D(step0)
return self.out_conv(step1)
class grouped_convs(torch.nn.Module):
"""Stack of 2d convolutions with different types of grouped convolutions"""
def __init__(
self,
):
super().__init__()
self.channels = 128
self.input_shape = [4, self.channels, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.conv_stack = torch.nn.Sequential()
self.conv_stack.add_module("conv_in", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
# dw conv will let previous and this layers' weights and biases permute along K
self.expected_K_params += 4
self.conv_stack.add_module("conv_dw", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1, groups=self.channels))
# regular conv permutes both
self.expected_C_params += 1
self.expected_K_params += 2
self.conv_stack.add_module("conv_0", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1, groups=1)) # explicit '1' groups for extra coverage
# only 2 groups should allow permutations only in C
self.expected_C_params += 1
self.conv_stack.add_module("conv_gr2", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1, groups=2))
# another regular conv, this one can't do anything
self.conv_stack.add_module("conv_1", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
# finally, grouped conv with small groups
self.conv_stack.add_module("conv_gr64", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1, groups=self.channels//2))
def forward(self, input: torch.Tensor):
return self.conv_stack(input)
class simple_forks_joins(torch.nn.Module):
"""Some simple residual connections to test collecting parameters into a single group. Four sections: input, blocka + residual, blockb + blockc, output"""
def __init__(
self,
):
super().__init__()
self.channels = 64
self.input_shape = [4, self.channels, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_convs = torch.nn.Sequential()
# input conv can only permute along K
self.expected_K_params += 2
self.input_convs.add_module("conv_in0", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
# the next conv can permute along both C and K
self.expected_C_params += 1
self.expected_K_params += 2
self.input_convs.add_module("conv_in1", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
# BN will permute 2 more along K
self.expected_K_params += 2
self.input_convs.add_module("bn_in1", torch.nn.BatchNorm2d(self.channels, track_running_stats=False))
self.block_a = torch.nn.Sequential()
# cut channels in half, then back to full, two fully permutable convs
self.expected_C_params += 2
self.expected_K_params += 4
self.block_a.add_module("conv_a0", torch.nn.Conv2d(self.channels, self.channels // 2, kernel_size=(3,3), padding=1))
self.block_a.add_module("conv_a1", torch.nn.Conv2d(self.channels // 2, self.channels, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
# cut channels in half, then back to full, two fully permutable convs
self.expected_C_params += 2
self.expected_K_params += 4
self.block_b.add_module("conv_b0", torch.nn.Conv2d(self.channels, self.channels // 2, kernel_size=(3,3), padding=1))
self.block_b.add_module("conv_b1", torch.nn.Conv2d(self.channels // 2, self.channels, kernel_size=(3,3), padding=1))
self.block_c = torch.nn.Sequential()
# cut channels in half, then back to full, two fully permutable convs
self.expected_C_params += 2
self.expected_K_params += 4
self.block_c.add_module("conv_c0", torch.nn.Conv2d(self.channels, self.channels // 2, kernel_size=(3,3), padding=1))
self.block_c.add_module("conv_c1", torch.nn.Conv2d(self.channels // 2, self.channels, kernel_size=(3,3), padding=1))
self.output_conv = torch.nn.Sequential()
self.expected_C_params += 1
self.output_conv.add_module("conv_out", torch.nn.Conv2d(self.channels, 8, kernel_size=(3,3), padding=1))
def forward(self, input: torch.Tensor):
step0 = self.input_convs(input)
step1 = step0 + self.block_a(step0)
step2 = self.block_b(step1) + self.block_c(step1)
return self.output_conv(step2)
class different_grouped_convs(torch.nn.Module):
"""Convolutions with different group sizes need to use the GCD of the input channel counts if siblings"""
def __init__(
self,
):
super().__init__()
self.channels = 16
self.input_shape = [4, self.channels, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.input_conv.add_module("input_conv", torch.nn.Conv2d(self.channels, 128, kernel_size=(3,3), padding=1))
self.expected_C_params += 4
# 4 parallel blocks with decreasing group size from "left" to "right"
self.block_a = torch.nn.Sequential()
self.block_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
self.block_b.add_module("conv_b", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=2))
self.block_c = torch.nn.Sequential()
self.block_c.add_module("conv_c", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=4))
self.block_d = torch.nn.Sequential()
self.block_d.add_module("conv_d", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=8))
# output can't permute along C, disallowed by parents
self.output_conv = torch.nn.Sequential()
self.output_conv.add_module("output_conv", torch.nn.Conv2d(128, 8, kernel_size=(3,3), padding=1))
def forward(self, input: torch.Tensor):
step0 = self.input_conv(input)
step1 = self.block_a(step0) + self.block_b(step0) + self.block_c(step0) + self.block_d(step0)
return self.output_conv(step1)
class siblings_poison(torch.nn.Module):
"""A single sibling that cannot permute along C poisons all other siblings in its group"""
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_conv = torch.nn.Sequential()
self.input_conv.add_module("input_conv", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
# two parallel block: conv->flatten->linear | flatten->linear
self.expected_K_params += 4 # two linears will have their output channels permuted for the output layer
self.block_a = torch.nn.Sequential()
self.block_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_a.add_module("flatten_a", torch.nn.Flatten(1))
self.block_a.add_module("linear_a", torch.nn.Linear(6272, 128))
self.block_b = torch.nn.Sequential()
self.block_b.add_module("flatten_b", torch.nn.Flatten(1))
self.block_b.add_module("linear_b", torch.nn.Linear(6272, 128))
self.output = torch.nn.Sequential()
self.expected_C_params += 1 # output layer will have its C dimension permuted
self.output.add_module("output", torch.nn.Linear(128, 8))
def forward(self, input: torch.Tensor):
step0 = self.input_conv(input)
step1 = self.block_a(step0) + self.block_b(step0)
return self.output(step1)
class coparent_poison(torch.nn.Module):
"""A single coparent that cannot permute along K poisons all other coparents in its group"""
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.input_conv.add_module("input_conv", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
# two parallel block: conv | conv-> grouped conv
self.expected_C_params += 3 # all convs permute along C
self.expected_K_params += 2 # only conv_b0 permutes along K
self.block_a = torch.nn.Sequential()
self.block_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
self.block_b.add_module("conv_b0", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b.add_module("conv_b1", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=4))
self.output = torch.nn.Sequential()
self.output.add_module("output", torch.nn.Conv2d(128, 8, kernel_size=(1,1)))
def forward(self, input: torch.Tensor):
step0 = self.input_conv(input)
step1 = self.block_a(step0) + self.block_b(step0)
return self.output(step1)
class depthwise_child_is_sibling(torch.nn.Module):
"""The child of a depthwise convolution should act as a sibling"""
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.input_conv.add_module("input_conv", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
# two parallel block: conv | depthwise->conv
self.expected_C_params += 2
self.expected_K_params += 4 + 2
self.block_a = torch.nn.Sequential()
self.block_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
self.block_b.add_module("conv_b_dw", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=128))
self.block_b.add_module("conv_b_1", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.output_conv = torch.nn.Sequential()
self.expected_C_params += 1
self.output_conv.add_module("output_conv", torch.nn.Conv2d(128, 8, kernel_size=(1,1)))
def forward(self, input: torch.Tensor):
step0 = self.input_conv(input)
step1 = self.block_a(step0) + self.block_b(step0)
return self.output_conv(step1)
class module_attribute(torch.nn.Module):
"""Attributes of some module must be permuted if they feed some operation that is permuted"""
def __init__(
self,
complexity: int = 0,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.complexity = complexity
self.input_conv = torch.nn.Sequential()
self.expected_K_params += 3 # conv weight, conv bias, input_offset C (counts as K since it's acting as a parent)
self.input_offset = torch.nn.Parameter(torch.zeros(128,7,7))
torch.nn.init.normal_(self.input_offset.data, mean=0.0, std=2.0)
self.input_conv.add_module("conv_input", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
# add a couple more layers, and let the same offset affect another layer, as well
if complexity == 1:
self.expected_C_params += 2
self.expected_K_params += 4
self.stack_a = torch.nn.Sequential()
self.stack_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.stack_b = torch.nn.Sequential()
self.stack_b.add_module("conv_b", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.output_conv = torch.nn.Sequential()
self.expected_C_params += 1
self.output_conv.add_module("conv_output", torch.nn.Conv2d(128, 8, kernel_size=(3,3)))
def forward(self, input: torch.Tensor):
batch_input_offset = self.input_offset.expand(input.shape[0], -1, -1, -1)
x = self.input_conv(input) + batch_input_offset
if self.complexity == 1:
x = self.stack_a(x) + batch_input_offset
x = self.stack_b(x) + batch_input_offset
return self.output_conv(x)
class square_attribute(torch.nn.Module):
"""Attributes with multiple dimensions matching the permutation length should only be permuted along the correct dimension"""
# TODO: currently, such an attribute will disallow permutations around it, but with effort, it could be handled correctly.
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 16]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_linear = torch.nn.Sequential()
#self.expected_K_params += 2 # if handled correctly, the linear's K and the offset's K should both be permuted
self.input_linear.add_module("linear_input", torch.nn.Linear(self.input_shape[1], 16))
self.input_offset = torch.nn.Parameter(torch.zeros(16, 16))
torch.nn.init.normal_(self.input_offset.data, mean=0.0, std=2.0)
self.output_linear = torch.nn.Sequential()
#self.expected_C_params += 1 # if handled correctly, this should be permuted
self.output_linear.add_module("linear_output", torch.nn.Linear(16, 8))
def forward(self, input: torch.Tensor):
batch_input_offset = self.input_offset.expand(input.shape[0], -1, -1)
x = self.input_linear(input) + torch.permute(batch_input_offset, (0, 2, 1))
return self.output_linear(x)
class MHA_test(torch.nn.Module):
"""MultiheadAttention modules are unique, we need to check permutations for input and ouput projections"""
def __init__(
self,
hidden_dim: int = 256,
seq_len: int = 64,
num_heads: int = 16
):
super().__init__()
self.hidden_dim = hidden_dim
self.seq_len = seq_len
self.num_heads = num_heads
self.input_shape = [4, self.seq_len, self.hidden_dim]
self.expected_C_params = 1
self.expected_K_params = 2
self.MHA0 = torch.nn.MultiheadAttention(self.hidden_dim, self.num_heads, dropout=False, batch_first=True)
self.MHA1 = torch.nn.MultiheadAttention(self.hidden_dim, self.num_heads, dropout=False, batch_first=True)
def forward(self, input: torch.Tensor):
step0,_ = self.MHA0(input, input, input)
step1,_ = self.MHA1(step0, step0, step0)
return step1
class one_sparse_sibling(torch.nn.Module):
"""If only one of two siblings is sparse, both need to be permuted"""
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.in_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.in_conv.add_module("conv_in", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
self.block_a = torch.nn.Sequential()
self.expected_C_params += 1 # only conv_a0 will be permuted along C
self.expected_K_params += 2 # only conv_a1 will be permuted along K
self.block_a.add_module("conv_a0", torch.nn.Conv2d(128, 3, kernel_size=(1,1)))
self.block_a.add_module("conv_a1", torch.nn.Conv2d(3, 128, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
self.expected_C_params += 2 # even though conv_a0 will not be sparse (only 3 output channels), conv_b0 can still be permuted along C
self.expected_K_params += 4
self.block_b.add_module("conv_b0", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b.add_module("conv_b1", torch.nn.Conv2d(128, 128, kernel_size=(1,1)))
self.out_conv = torch.nn.Sequential()
self.expected_C_params += 1
self.out_conv.add_module("conv_out", torch.nn.Conv2d(128, 8, kernel_size=(1,1)))
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
step1 = self.block_a(step0) + self.block_b(step0)
return self.out_conv(step1)
class test_concat(torch.nn.Module):
"""If concats are along the channel dimension (dim1 of NCHW), downstream layers can still be permuted despite C!=parentK"""
def __init__(
self,
ratio = 1, # ratio between # channels in either path to be concatenated
dim = 1, # dimension to concatenate, K by default
depth = 1, # number of concats to stack
):
super().__init__()
assert dim == 1 or ratio == 1 ,"can't concat along dimensions other than K if K's don't match"
self.dim = dim
self.depth = depth
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.in_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.in_conv.add_module("conv_in", torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1)))
self.left_paths = torch.nn.ModuleList([torch.nn.Conv2d(64, 64, kernel_size=(1,1))])
self.expected_C_params += 1
self.expected_K_params += 2
in_C = 64
out_C = 64
for d in range(1,depth,1):
self.expected_C_params += 1
self.expected_K_params += 2
if dim == 1:
out_C += 64
self.left_paths.append(torch.nn.Conv2d(in_C+64, out_C, kernel_size=(1,1)))
if dim == 1:
in_C += 64
self.right_path = torch.nn.Sequential()
self.expected_C_params += 1
self.expected_K_params += 2
self.right_path.add_module("conv_b", torch.nn.Conv2d(64, 64*ratio, kernel_size=(1,1)))
self.out_conv = torch.nn.Sequential()
self.expected_C_params += 1
if dim == 1:
out_C += 64*ratio
self.out_conv.add_module("conv_out", torch.nn.Conv2d(out_C, 16, kernel_size=(1,1)))
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
step1 = step0
for d, layer in enumerate(self.left_paths):
if d == 0:
step1 = layer(step1)
else:
step1 = layer(torch.cat([step1, step0], 1))
step2 = torch.cat([step1, self.right_path(step0)], self.dim)
return self.out_conv(step2)
class test_flatten_op(torch.nn.Module):
"""flatten ops may change the effective channel count, typically by collapsing N,C,H,W into N,C*H*W before a classifier"""
def __init__(
self,
change_dims = True,
):
super().__init__()
self.change_dims = change_dims
self.input_shape = [4, 16, 3, 3]
self.expected_C_params = 0
self.expected_K_params = 0
if not self.change_dims:
self.input_shape = [4, 16, 1, 1]
self.expected_C_params = 1
self.expected_K_params = 2
self.flattened_C = self.input_shape[2] * self.input_shape[3] * 64
self.in_conv = torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1))
self.out_gemm = torch.nn.Linear(self.flattened_C, 16)
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
step1 = torch.flatten(step0, start_dim=1)
return self.out_gemm(step1)
class test_flatten_module(torch.nn.Module):
"""flatten modules may change the effective channel count, typically by collapsing N,C,H,W into N,C*H*W before a classifier"""
def __init__(
self,
change_dims = True,
):
super().__init__()
self.change_dims = change_dims
self.input_shape = [4, 16, 3, 3]
self.expected_C_params = 0
self.expected_K_params = 0
if not self.change_dims:
self.input_shape = [4, 16, 1, 1]
self.expected_C_params = 1
self.expected_K_params = 2
self.flattened_C = self.input_shape[2] * self.input_shape[3] * 64
self.stack = torch.nn.Sequential()
self.stack.add_module("conv_in", torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1)))
self.stack.add_module("flatten", torch.nn.Flatten(1))
self.stack.add_module("gemm_out", torch.nn.Linear(self.flattened_C, 16))
def forward(self, input: torch.Tensor):
return self.stack(input)
class test_trace_failure(torch.nn.Module):
"""make sure tracing failures are handled gracefully"""
def __init__(
self
):
super().__init__()
self.input_shape = [4, 16, 1, 1]
self.expected_C_params = 0
self.expected_K_params = 0
self.in_conv = torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1))
self.out_conv = torch.nn.Conv2d(64, 16, kernel_size=(1,1))
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
#NCHW = 4,64,1,1
channels = step0.size(1)
channel_offset = torch.arange(channels, dtype=torch.long, device=step0.device)
channel_offset = channel_offset.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(step0)
step0.add_(channel_offset)
return self.out_conv(step0)
class already_sparse(torch.nn.Module):
"""if weights are already sparse, permutations should be skipped"""
def __init__(
self
):
super().__init__()
self.input_shape = [4, 16, 3, 3]
self.expected_C_params = 0
self.expected_K_params = 0
self.in_conv = torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1))
self.out_conv = torch.nn.Conv2d(64, 16, kernel_size=(1,1))
# apply 2:4 to the output weights, it will not require a permutation
out_weights = torch.ones_like(self.out_conv.weight)
out_weights[:,0::2,...] = 0
assert torch.sum(out_weights) == torch.numel(out_weights)/2
self.out_conv.weight.data.copy_(out_weights)
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
return self.out_conv(step0)
def test_model(model, tag, verbosity=0, save_onnx=False):
Permutation.set_identical_seed()
x = torch.rand(model.input_shape)
if save_onnx:
torch.onnx.export(model, x, f"{tag}.onnx", verbose=False)
base_out = model(x)
sparse_parameters = []
all_parameters = []
module_to_params = {}
module_to_params[torch.nn.MultiheadAttention] = ('q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight')
for module_name, module in model.named_modules():
module_type_str = str(type(module)).split("\'")[1]
if module_type_str == 'torch.nn.modules.container.Sequential' or module_type_str.startswith('torchvision.models'):
# filter out the 'torch.nn.modules.container.Sequential' type and the whole model, like 'torchvision.models.vgg.VGG'
continue
for p_name, p in module.named_parameters():
all_parameters.append((module_name, module, p_name, p))
if isinstance(module, (torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.MultiheadAttention, torch.nn.modules.linear.NonDynamicallyQuantizableLinear)):
allowed_names = ('weight',)
if type(module) in module_to_params.keys():
allowed_names = module_to_params[type(module)]
if p_name not in allowed_names:
continue
if len(p.size()) >= 2 and (p.size()[0] % 8) == 0 and (p.size()[1] % 16) == 0:
mask = torch.ones_like(p).bool()
buffname = p_name.split(".")[-1]
module.register_buffer('__%s_mma_mask' % buffname, mask)
sparse_parameters.append((module_name, module, p_name, p, mask, None))
if module_type_str == 'torch.nn.modules.batchnorm.BatchNorm2d':
# need to get the running_mean and running_var from model.state_dict(), as they are not the learnable parameters
module_mean_name = module_name + '.running_mean'
module_var_name = module_name + '.running_var'
for param_key in model.state_dict():
if module_mean_name == param_key or module_var_name == param_key:
all_parameters.append((module_name, module, param_key.split(".")[-1], model.state_dict()[param_key]))
if verbosity > 1:
sparse_param_names = [module_name+":"+p_name for (module_name, module, p_name, p, mask, pruned) in sparse_parameters]
all_param_names = [module_name+":"+p_name for (module_name, module, p_name, p) in all_parameters]
print(f"\tSparse parameter names: {sparse_param_names}\n\tAll parameter names: {all_param_names}")
Permutation.set_permutation_params_from_asp(model, sparse_parameters, all_parameters, verbosity)
Permutation.permute_model(model)
C_params, K_params, missed_dims = Permutation.get_permutation_stats()
success = True
fail_str = ""
succ_str = ""
if len(C_params) != model.expected_C_params:
success = False
fail_str = fail_str + f"\n\tC expected {model.expected_C_params}, got {len(C_params)} ({C_params})"
elif verbosity > 0:
succ_str = succ_str + f"\n\tC expected {model.expected_C_params}, got {len(C_params)} ({C_params})"
if len(K_params) != model.expected_K_params:
success = False
fail_str = fail_str + f"\n\tK expected {model.expected_K_params}, got {len(K_params)} ({K_params})"
elif verbosity > 0:
succ_str = succ_str + f"\n\tK expected {model.expected_K_params}, got {len(K_params)} ({K_params})"
if len(missed_dims) != 0:
success = False
fail_str = fail_str + f"\n\tMissed permutations along {len(missed_dims)} dimensions ({missed_dims})"
perm_out = model(x)
atol = 1e-5
rtol = 1e-4
outs_match = torch.allclose(base_out.data, perm_out.data, atol=atol, rtol=rtol)
if not outs_match:
fail_str = fail_str + f"\n\tOutputs matched: {outs_match}"
if success:
diffs = base_out - perm_out
diff_locs = (diffs >= atol).nonzero(as_tuple=True)
fail_str = fail_str + f"\n{diff_locs}\n{diffs[diff_locs]}"
success = False
if success:
print(f"{tag}: Success\t{succ_str}")
else:
print(f"{tag}: FAIL\t{fail_str}")
return success
def main():
global_success = True
global_success &= test_model(simple_convs(2,16), "smoke test")
global_success &= test_model(simple_convs(5, 64), "simple 5 64")
global_success &= test_model(simple_convs(10, 32), "simple 10 32")
# normalization
for norm in ['BatchNorm2d', 'LazyBatchNorm2d', 'InstanceNorm2d', 'LazyInstanceNorm2d', 'LayerNorm3', 'LocalResponseNorm']:
global_success &= test_model(simple_convs(4, 128, norm), norm)
# disallowed normalization
for norm in ['GroupNorm']:
global_success &= test_model(simple_convs(4, 128, norm), norm)
global_success &= test_model(conv_1d(), "conv1d")
global_success &= test_model(conv_1d(with_2d=True), "conv1d and conv2d")
global_success &= test_model(grouped_convs(), "grouped convs")
global_success &= test_model(simple_forks_joins(), "forks and joins")
global_success &= test_model(different_grouped_convs(), "GCD")
global_success &= test_model(siblings_poison(), "sibling poison")
global_success &= test_model(coparent_poison(), "coparent poison")
global_success &= test_model(depthwise_child_is_sibling(), "dw child is sibling")
global_success &= test_model(module_attribute(complexity=0), "single attribute")
global_success &= test_model(module_attribute(complexity=1), "single attribute thrice")
global_success &= test_model(MHA_test(hidden_dim=256, seq_len=64, num_heads=16), "stacked MHA")
global_success &= test_model(one_sparse_sibling(), "one sparse sibling")
global_success &= test_model(test_concat(), "simple concat") # concat along K
global_success &= test_model(test_concat(dim=0), "concat dim0") # concat along C
global_success &= test_model(test_concat(ratio=2), "concat ratio2") # concat along K with different K values
global_success &= test_model(test_concat(depth=2), "concat depth2") # concat along K multiple times
global_success &= test_model(test_concat(depth=3), "concat depth3")
global_success &= test_model(test_concat(ratio=3, depth=4), "concat ratio3 depth4")
global_success &= test_model(test_concat(dim=0, depth=3), "concat dim0 depth3")
global_success &= test_model(test_flatten_op(), "flatten op")
global_success &= test_model(test_flatten_op(change_dims=False), "useless flatten op")
global_success &= test_model(test_flatten_module(), "flatten module")
global_success &= test_model(test_flatten_module(change_dims=False), "useless flatten module")
global_success &= test_model(test_trace_failure(), "trace failure")
global_success &= test_model(already_sparse(), "skip already sparse")
global_success &= test_model(square_attribute(), "square attributes")
if global_success:
print("All tests completed successfully.")
else:
print("There was at least one failure.")
if __name__ == '__main__':
main()
|
apex-master
|
apex/contrib/sparsity/test/test_permutation_application.py
|
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(step, args, model_state_dict, optimizer_state_dict):
#
# PART2
#
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, verbosity=args.verbosity, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
torch.manual_seed(args.seed2)
model.load_state_dict(model_state_dict)
optimizer.load_state_dict(optimizer_state_dict)
print("Model sparsity is %s" % ("enabled" if ASP.is_sparsity_enabled() else "disabled"))
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
if __name__ == '__main__':
checkpoint = torch.load("part1.chkp")
class Args:
verbosity = checkpoint['verbosity']
seed = 4873
seed2 = checkpoint['seed2']
pattern = checkpoint['pattern']
whitelist = checkpoint['whitelist']
allow_recompute_mask = checkpoint['allow_recompute_mask']
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(checkpoint['step'], args, checkpoint['model_state_dict'], checkpoint['optimizer_state_dict'])
|
apex-master
|
apex/contrib/sparsity/test/checkpointing_test_part2.py
|
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
#
# PART1
#
torch.manual_seed(args.seed)
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, verbosity=args.verbosity, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
torch.save({
'step': step,
'verbosity': args.verbosity,
'seed2': args.seed2,
'pattern': args.pattern,
'whitelist': args.whitelist,
'allow_recompute_mask': args.allow_recompute_mask,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, args.checkpoint_path)
if __name__ == '__main__':
class Args:
verbosity=3
seed = 4873
seed2 = 99875
pattern = "m4n2_2d_best"
whitelist = [torch.nn.Linear]
allow_recompute_mask = True
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(args)
|
apex-master
|
apex/contrib/sparsity/test/checkpointing_test_part1.py
|
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
#
# Reference run for checkpointing test (part1 + part2)
#
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
#
# PART1
#
torch.manual_seed(args.seed)
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
#
# PART 2
#
torch.manual_seed(args.seed2)
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
if __name__ == '__main__':
class Args:
seed = 4873
seed2 = 99875
pattern = "m4n2_2d_best"
whitelist = [torch.nn.Linear]
allow_recompute_mask = True
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(args)
|
apex-master
|
apex/contrib/sparsity/test/checkpointing_test_reference.py
|
import numpy as np
import time
import sys
# permutation-specifics
sys.path.append("../")
from permutation_search_kernels.permutation_utilities import *
from permutation_search_kernels.exhaustive_search import Exhaustive_Search
from permutation_search_kernels.channel_swap import Channel_Swap
# Arguments
import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='Test channel permutations')
parser.add_argument('--infile', default='random', type=str, help='input file or "random"')
parser.add_argument('--channels', default=384, type=int, help='random input channel count (C)')
parser.add_argument('--filters', default=96, type=int, help='random input filter count (K)')
parser.add_argument('--verbosity', default=0, type=int, help='print status updates')
parser.add_argument('--seed', default=1, type=int, help='random seed')
parser.add_argument('--pretty_print', default=True, type=str2bool, help='print the table for pretty viewing (as opposed to strict .csv)')
parser.add_argument('--unstructured', default=0.0, type=float, help='perform unstructured pruning to a target sparsity before processing, emulate an unstructured sparse network. "-1" will find the minimum sparsity required to achieve a perfect permutation')
parser.add_argument('--gpu', default=True, type=str2bool, help='uses a gpu to accelerate the search if possible')
parser.add_argument('--check_permutation', default=False, type=str2bool, help='check that the tracked permutation matches the recovered permutation')
parser.add_argument('--intermediate_steps', default=0, type=int, help='find roughly evenly-spaced permutations in efficacy')
parser.add_argument('--print_permutation', default=False, type=str2bool, help='print the final permutation found by each strategy')
parser.add_argument('strategies', metavar='strategy', type=str, nargs='+', help='strategies to try')
## binary search for the minimum sparsity necessary to achieve a perfect permutation with some strategy
def find_minimum_sparsity(matrix, search_function, **kwargs):
duration = 0
min_sparsity = 50
max_sparsity = 100
sparsity = 75
verbosity = 0
if 'verbosity' in kwargs:
verbosity = kwargs['verbosity']
while min_sparsity < max_sparsity:
if verbosity > 5:
print(f"\tlooking now at {sparsity} (between {min_sparsity} and {max_sparsity})")
# prepare unstructured sparse matrix, get row sparsity magnitude
tmp_result = unstructured_prune(result, sparsity/100.0)
local_unpruned_magnitude = np.sum(np.abs(tmp_result))
local_unstructured_rows_magnitude = magnitude_after_pruning_rows(tmp_result, rate=0.5)
# quick check to see if this sparsity is trivially too low
if local_unstructured_rows_magnitude*1.0001 < local_unpruned_magnitude:
if verbosity > 5:
print(f"Skipping sparsity {sparsity} since there's no perfect permutation (unstructured mag {local_unpruned_magnitude} is larger than sparse rows {local_unstructured_rows_magnitude}).")
min_sparsity = sparsity+1
sparsity = int(min_sparsity + (max_sparsity - min_sparsity)/2.0)
continue
tmp_result, tmp_duration, found_permutation = search_function(tmp_result, **kwargs)
duration += tmp_duration
nonzeros = np.count_nonzero(tmp_result)
tmp_result = apply_2_to_4(tmp_result)
nonzeros_after_2to4 = np.count_nonzero(tmp_result)
if nonzeros == nonzeros_after_2to4: # found a winner, are we done?
if verbosity > 3:
print(f"Found an unstructured sparsity that we can turn into 2:4: {sparsity}")
max_sparsity = sparsity
if max_sparsity <= min_sparsity and verbosity > 0:
print(f"Found the minimum unstructured sparsity that we can turn into 2:4: {sparsity}")
break
else:
if verbosity > 5:
print(f"Unstructured sparsity {sparsity} was insufficient to produce 2:4 sparsity")
min_sparsity = sparsity+1
if max_sparsity <= min_sparsity and verbosity > 0:
print(f"Found the minimum unstructured sparsity that we can turn into 2:4: {max_sparsity}")
sparsity = max_sparsity
break
sparsity = int(min_sparsity + (max_sparsity - min_sparsity)/2.0)
return sparsity, duration
# Entry point
if __name__ == "__main__":
args = parser.parse_args()
verbosity = args.verbosity
np.random.seed(seed=args.seed)
use_gpu(initial_override=args.gpu)
# get or create the input matrix
input_vals = np.random.rand(args.filters, args.channels)
if args.infile != "random":
if 'npy' in args.infile:
input_vals = np.load(args.infile, 'r')
shp = input_vals.shape
shp_str = str(shp).replace(",","x")
newshp_str = ''
if len(shp) == 4: # K,C,R,S -> RSK,C
input_vals = np.transpose(input_vals,(2,3,0,1)).flatten().reshape((shp[2]*shp[3]*shp[0], shp[1]))
newshp_str = str(input_vals.shape).replace(",","x")
print(f"{args.infile},{shp_str},{newshp_str}")
if input_vals.shape[1] % 4 != 0:
print(f"Unfriendly shape {input_vals.shape}, not pruning.")
sys.exit()
# unstructured prune if requested
if args.unstructured > 0.0:
args.unstructured = min(args.unstructured, 1.0)
input_vals = unstructured_prune(input_vals, args.unstructured)
print(f"{args.infile} pruned to {args.unstructured*100.:>.1f} sparsity, shape is {input_vals.shape}")
# calculate some early metrics
sorted_magnitudes = np.sort(np.abs(input_vals), axis=None)
unpruned_magnitude = np.sum(sorted_magnitudes)
num_weights = sorted_magnitudes.size
unstructured_magnitude = np.sum(sorted_magnitudes[int(num_weights/2):])
unstructured_rows_magnitude = magnitude_after_pruning_rows(input_vals, rate=0.5)
simple_2to4 = apply_2_to_4(np.copy(input_vals))
simple_2to4_magnitude = sum_after_2_to_4(input_vals)
tmp_time = time.perf_counter()
simple_2to4_magnitude = sum_after_2_to_4(input_vals)
default_duration = time.perf_counter() - tmp_time
best_magnitude = unstructured_rows_magnitude
best_lost_magnitude = unpruned_magnitude - best_magnitude
base_lost_magnitude = unpruned_magnitude - simple_2to4_magnitude
# prep results table
final_metric = 'efficacy'
if args.unstructured < 0.0:
final_metric = 'min_sparsity'
if args.pretty_print:
print(f"{'strategy':<35},{'magnitude':>15},{final_metric:>15},{'duration':>15}")
print(f"{'unpruned':<35},{unpruned_magnitude:>15.3f},{'-':^15},{'-':^15}")
print(f"{'unstructured':<35},{unstructured_magnitude:>15.3f},{'-':^15},{'-':^15}")
print(f"{'50% rows':<35},{unstructured_rows_magnitude:>15.3f},{'100.0':>15},{'-':^15}")
print(f"{'default 2:4':<35},{simple_2to4_magnitude:>15.3f},{'0.0':>15},{default_duration:>15.3f}")
else:
print(f"strategy,magnitude,{final_metric},duration")
print(f"unpruned,{unpruned_magnitude},-,-")
print(f"unstructured,{unstructured_magnitude},-,-")
print(f"50%_rows,{unstructured_rows_magnitude},100.0,-")
print(f"2:4,{simple_2to4_magnitude},0.0,{default_duration}")
# try the requested strategies
for i,strategy in enumerate(args.strategies):
result = np.copy(input_vals)
np.random.seed(seed=args.seed)
duration = 0.0
min_sparsity = 0.0
strat_split = strategy.split(",")
found_permutation = None
# optimize stripe groups
if strat_split[0] == 'optimize_stripe_groups':
stripe_group_size_in_cols = 8
if len(strat_split) >= 2:
stripe_group_size_in_cols = int(strat_split[1])
escape_attempts = 100
if len(strat_split) >= 3:
escape_attempts = int(strat_split[2])
if args.unstructured >= 0.0: # just perform the search on the current matrix
result,duration,found_permutation = Exhaustive_Search(result, stripe_group_size=stripe_group_size_in_cols, escape_attempts=escape_attempts)
else: # find the minimum sparsity needed to transparently transform the input
min_sparsity,duration = find_minimum_sparsity(result, Exhaustive_Search, stripe_group_size=stripe_group_size_in_cols, escape_attempts=escape_attempts)
result = unstructured_prune(result, min_sparsity/100.0)
# channel swaps
elif strat_split[0] == 'channel_swap':
escape_attempts= 0
if len(strat_split) >= 2:
escape_attempts = int(strat_split[1])
if args.unstructured >= 0.0: # just perform the search on the current matrix
result,duration,found_permutation = Channel_Swap(result, escape_attempts=escape_attempts, verbosity=verbosity)
else: # find the minimum sparsity needed to transparently transform the input
min_sparsity,duration = find_minimum_sparsity(result, Channel_Swap, escape_attempts=escape_attempts, verbosity=verbosity)
result = unstructured_prune(result, min_sparsity/100.0)
# random permutations
elif strat_split[0] == 'random':
if args.unstructured < 0.0: # searching for minimum sparsity not supported for random permutations
continue
num_perms = 10
if len(strat_split) >= 2 and int(strat_split[1]) >= 1:
num_perms = int(strat_split[1])
# try the seeds/permutations
permutation = [c for c in range(result.shape[1])]
best_sum = sum_after_2_to_4(result)
best_perm = permutation.copy()
start_time = time.perf_counter()
for x in range(num_perms):
permutation = np.random.permutation(permutation)
cur_sum = sum_after_2_to_4(result[:,permutation])
if cur_sum > best_sum:
best_sum = cur_sum
best_perm = permutation.copy()
if verbosity > 0:
print(f"\tnew best permutation {x} found with magnitude {best_sum:>15.3f}")
elif verbosity > 5:
print(f"\tpermutation {x} magnitude too low: {cur_sum:>15.3f}")
duration = time.perf_counter() - start_time
result = result[:,best_perm]
found_permutation = best_perm
else:
print(f"Unknown strategy: {strategy}!")
sys.exit()
# report stats for this strategy
cur_mag = sum_after_2_to_4(result)
cur_eff = efficacy(best_lost_magnitude, base_lost_magnitude, unpruned_magnitude - cur_mag)*100.0
final_metric = cur_eff
if args.unstructured < 0.0:
final_metric = min_sparsity
perm_distance = ""
error = None
if args.check_permutation and found_permutation is not None:
recovered_perm = find_permutation(result, input_vals)
error = False
for c in range(len(recovered_perm)):
if recovered_perm[c] != found_permutation[c]:
if verbosity > 0:
print(f"tracked permutation at index {c} was {found_permutation[c]}, but the recovered permutation thought it was {recovered_perm[c]}")
error = True
# if requested, generate permutations that divide the efficacy space into equal steps
if args.intermediate_steps != 0:
magnitude_targets = None
if args.intermediate_steps != 0:
ratios = [step/float(args.intermediate_steps+1) for step in range(1,args.intermediate_steps+1)]
mag_diff = cur_mag - (unpruned_magnitude - base_lost_magnitude)
magnitude_targets = [(unpruned_magnitude - base_lost_magnitude) + mag_diff * ratio for ratio in ratios]
perm_distance, target_permutations = permutation_distance(found_permutation, [c for c in range(result.shape[1])], matrix=input_vals, magnitude_targets=magnitude_targets, debug=False, verbosity=verbosity)
if target_permutations is not None:
for target_permutation in target_permutations:
print(target_permutation)
error_str = ""
if error is not None:
error_str = ", correct"
if error:
error_str = ", mismatch"
if args.pretty_print:
print(f"{strategy:35},{cur_mag:>15.3f},{final_metric:>15.1f},{duration:>15.3f}{error_str:>15}")
else:
strat_string = strategy.replace(",","_")
print(f"{strat_string},{cur_mag},{final_metric},{duration}{error_str}")
if args.print_permutation and found_permutation is not None:
print(found_permutation)
|
apex-master
|
apex/contrib/sparsity/permutation_tests/permutation_test.py
|
try:
import torch
import bnp
from .batch_norm import BatchNorm2d_NHWC
del torch
del bnp
del batch_norm
except ImportError as err:
print("apex was installed without --bnp flag, contrib.groupbn is not available")
|
apex-master
|
apex/contrib/groupbn/__init__.py
|
import torch
import numpy as np
from torch.nn.modules.batchnorm import _BatchNorm
import bnp
class bn_NHWC_impl(torch.autograd.Function):
@staticmethod
def forward(ctx, x, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, is_train, bn_group, my_data, pair_data, magic, pair_data2, pair_data3, fwd_occup, fwd_grid_x, bwd_occup, bwd_grid_x, multi_stream):
if is_train:
ctx.save_for_backward(x, s, b, rm, riv, mini_m, mini_riv)
ctx.epsilon = epsilon
ctx.momentum = mom
ctx.ret_cta = ret_cta
ctx.fuse_relu = fuse_relu
ctx.my_data = my_data
ctx.pair_data = pair_data
ctx.magic = magic
ctx.pair_data2 = pair_data2
ctx.pair_data3 = pair_data3
ctx.bn_group = bn_group
ctx.bwd_occup = bwd_occup
ctx.bwd_grid_x = bwd_grid_x
ctx.multi_stream = multi_stream
res = bnp.bn_fwd_nhwc(x, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, my_data, pair_data, pair_data2, pair_data3, bn_group, magic, fwd_occup, fwd_grid_x, multi_stream)
return res
else:
return bnp.bn_fwd_eval_nhwc(x, s, b, rm, riv, ret_cta, bn_group, mom, epsilon, fuse_relu)
@staticmethod
def backward(ctx, grad_y):
x, s, b, rm, riv, mini_m, mini_riv = ctx.saved_variables
epsilon = ctx.epsilon
mom = ctx.momentum
ret_cta = ctx.ret_cta
fuse_relu = ctx.fuse_relu
my_data = ctx.my_data
pair_data = ctx.pair_data
magic = ctx.magic
pair_data2 = ctx.pair_data2
pair_data3 = ctx.pair_data3
bn_group = ctx.bn_group
bwd_occup = ctx.bwd_occup
bwd_grid_x = ctx.bwd_grid_x
multi_stream = ctx.multi_stream
dx, dscale, dbias = bnp.bn_bwd_nhwc(x, grad_y, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, my_data, pair_data, pair_data2, pair_data3, bn_group, magic, bwd_occup, bwd_grid_x, multi_stream)
return dx, dscale, dbias, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class bn_addrelu_NHWC_impl(torch.autograd.Function):
@staticmethod
def forward(ctx, x, z, s, b, rm, riv, mini_m, mini_riv, grid_dim_y, ret_cta, mom, epsilon, is_train, bn_group, my_data, pair_data, magic, pair_data2, pair_data3, fwd_occup, fwd_grid_x, bwd_occup, bwd_grid_x, multi_stream):
if is_train:
bitmask = torch.cuda.IntTensor(((x.numel()+31)//32) * 2 * grid_dim_y)
ctx.save_for_backward(x, s, b, rm, riv, mini_m, mini_riv, bitmask)
ctx.epsilon = epsilon
ctx.momentum = mom
ctx.ret_cta = ret_cta
ctx.my_data = my_data
ctx.pair_data = pair_data
ctx.magic = magic
ctx.pair_data2 = pair_data2
ctx.pair_data3 = pair_data3
ctx.bn_group = bn_group
ctx.bwd_occup = bwd_occup
ctx.bwd_grid_x = bwd_grid_x
ctx.multi_stream = multi_stream
res = bnp.bn_addrelu_fwd_nhwc(x, z, s, b, rm, riv, mini_m, mini_riv, bitmask, ret_cta, mom, epsilon, my_data, pair_data, pair_data2, pair_data3, bn_group, magic, fwd_occup, fwd_grid_x, multi_stream)
return res
else:
return bnp.bn_addrelu_fwd_eval_nhwc(x, z, s, b, rm, riv, ret_cta, bn_group, mom, epsilon)
@staticmethod
def backward(ctx, grad_y):
x, s, b, rm, riv, mini_m, mini_riv, bitmask = ctx.saved_variables
epsilon = ctx.epsilon
mom = ctx.momentum
ret_cta = ctx.ret_cta
my_data = ctx.my_data
pair_data = ctx.pair_data
magic = ctx.magic
pair_data2 = ctx.pair_data2
pair_data3 = ctx.pair_data3
bn_group = ctx.bn_group
bwd_occup = ctx.bwd_occup
bwd_grid_x = ctx.bwd_grid_x
multi_stream = ctx.multi_stream
dx, dz, dscale, dbias = bnp.bn_addrelu_bwd_nhwc(x, grad_y, s, b, rm, riv, mini_m, mini_riv, bitmask, ret_cta, mom, epsilon, my_data, pair_data, pair_data2, pair_data3, bn_group, magic, bwd_occup, bwd_grid_x, multi_stream)
return dx, dz, dscale, dbias, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class BatchNorm2d_NHWC(_BatchNorm):
# if using BatchNorm2d_NHWC simultaneously with multiple streams set multi_stream to True
def __init__(self, num_features, fuse_relu=False, bn_group=1, max_cta_per_sm=2, cta_launch_margin=12, multi_stream=False):
super(BatchNorm2d_NHWC, self).__init__(num_features)
self.fuse_relu = fuse_relu
self.multi_stream = multi_stream
self.minibatch_mean = torch.cuda.FloatTensor(num_features)
self.minibatch_riv = torch.cuda.FloatTensor(num_features)
#defaut to distributed bn disabled
self.bn_group = bn_group
self.max_cta_per_sm = max_cta_per_sm #used only in training fwd and bwd
self.cta_launch_margin = cta_launch_margin #used only in training fwd and bwd
self.my_data = None
self.pair_data = None
self.pair_data2 = None
self.pair_data3 = None
self.local_rank = 0
self.magic = torch.IntTensor([0])
#calculate cta per sm occupancies
assert(max_cta_per_sm>0) # won't be able to do much with 0 CTAs :)
self.fwd_occupancy = min(bnp.bn_fwd_nhwc_occupancy(), max_cta_per_sm)
self.bwd_occupancy = min(bnp.bn_bwd_nhwc_occupancy(), max_cta_per_sm)
self.addrelu_fwd_occupancy = min(bnp.bn_addrelu_fwd_nhwc_occupancy(), max_cta_per_sm)
self.addrelu_bwd_occupancy = min(bnp.bn_addrelu_bwd_nhwc_occupancy(), max_cta_per_sm)
#calculate grid dimentions based on occupancy numbers
mp_count = torch.cuda.get_device_properties(None).multi_processor_count
self.fwd_grid_dim_x = max(mp_count*self.fwd_occupancy - cta_launch_margin , 1)
self.bwd_grid_dim_x = max(mp_count*self.bwd_occupancy - cta_launch_margin , 1)
self.addrelu_fwd_grid_dim_x = max(mp_count*self.addrelu_fwd_occupancy - cta_launch_margin , 1)
self.addrelu_bwd_grid_dim_x = max(mp_count*self.addrelu_bwd_occupancy - cta_launch_margin , 1)
self.grid_dim_y = (num_features + 63) // 64
# allocate scratch space used by implementation
# TODO: scratch space that is not supposed to be exposed at user code. We only need one time initialization, the
# same buffer could be reused in future iterations. Currently we exposed it here instead of requesting new
# buffer from cache allocator to avoid unnecessary initialization at future iterations.
self.ret_cta = torch.cuda.ByteTensor(8192).fill_(0)
#FIXME: turn pair handles into an array
if bn_group>1:
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
assert(world_size >= bn_group)
assert(world_size % bn_group == 0)
bn_sync_steps = 1
if (bn_group==4):
bn_sync_steps = 2
if (bn_group==8):
bn_sync_steps = 3
self.ipc_buffer = torch.cuda.ByteTensor(bnp.get_buffer_size(bn_sync_steps))
self.my_data = bnp.get_data_ptr(self.ipc_buffer)
# we are walking on very thin ice here by utilizing internal `_share_cuda_()`
self.storage = self.ipc_buffer.storage()
self.share_cuda = self.storage._share_cuda_()
internal_cuda_mem = self.share_cuda
# internal_cuda_mem[1]: ipc_mem_handle
my_handle = torch.cuda.ByteTensor(np.frombuffer(internal_cuda_mem[1], dtype=np.uint8))
# internal_cuda_mem[3]: offset
my_offset = torch.cuda.IntTensor([internal_cuda_mem[3]])
handles_all = torch.empty(world_size, my_handle.size(0), dtype=my_handle.dtype, device=my_handle.device)
handles_l = list(handles_all.unbind(0))
torch.distributed.all_gather(handles_l, my_handle)
offsets_all = torch.empty(world_size, my_offset.size(0), dtype=my_offset.dtype, device=my_offset.device)
offsets_l = list(offsets_all.unbind(0))
torch.distributed.all_gather(offsets_l, my_offset)
#whom do I actually care about? that would be local_rank XOR 1
self.pair_handle = handles_l[local_rank ^ 1].cpu().contiguous()
pair_offset = offsets_l[local_rank ^ 1].cpu()
self.pair_data = bnp.get_remote_data_ptr(self.pair_handle, pair_offset)
if bn_group>2:
self.pair_handle2 = handles_l[local_rank ^ 2].cpu().contiguous()
pair_offset2 = offsets_l[local_rank ^ 2].cpu()
self.pair_data2 = bnp.get_remote_data_ptr(self.pair_handle2, pair_offset2)
if bn_group>4:
self.pair_handle3 = handles_l[local_rank ^ 4].cpu().contiguous()
pair_offset3 = offsets_l[local_rank ^ 4].cpu()
self.pair_data3 = bnp.get_remote_data_ptr(self.pair_handle3, pair_offset3)
#FIXME: get magic value into C code and eliminate from here
self.magic = torch.IntTensor([2])
self.local_rank = local_rank
def forward(self, x, z=None):
if z is not None:
assert(self.fuse_relu==True)
return bn_addrelu_NHWC_impl.apply(x, z,
self.weight, self.bias,
self.running_mean, self.running_var,
self.minibatch_mean, self.minibatch_riv, self.grid_dim_y, self.ret_cta,
self.momentum,
self.eps, self.training, self.bn_group, self.my_data, self.pair_data, (self.magic), self.pair_data2, self.pair_data3,
self.addrelu_fwd_occupancy, self.addrelu_fwd_grid_dim_x,
self.addrelu_bwd_occupancy, self.addrelu_bwd_grid_dim_x,
self.multi_stream)
else:
return bn_NHWC_impl.apply(x,
self.weight, self.bias,
self.running_mean, self.running_var,
self.minibatch_mean, self.minibatch_riv, self.ret_cta,
self.momentum,
self.eps, self.fuse_relu, self.training, self.bn_group, self.my_data, self.pair_data, (self.magic), self.pair_data2, self.pair_data3,
self.fwd_occupancy, self.fwd_grid_dim_x,
self.bwd_occupancy, self.bwd_grid_dim_x,
self.multi_stream)
def __del__(self):
if self.bn_group>1:
bnp.close_remote_data(self.pair_handle)
if self.bn_group>2:
bnp.close_remote_data(self.pair_handle2)
if self.bn_group>4:
bnp.close_remote_data(self.pair_handle3)
|
apex-master
|
apex/contrib/groupbn/batch_norm.py
|
from .batch_norm import GroupBatchNorm2d
|
apex-master
|
apex/contrib/cudnn_gbn/__init__.py
|
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
from torch import Tensor
import peer_memory_cuda as pm
import cudnn_gbn_lib
from torch.cuda.amp import custom_fwd, custom_bwd
class _GroupBatchNorm2d(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, input, weight, bias, running_mean, running_variance,
minibatch_mean, minibatch_inv_var, momentum, eps, group_size, group_rank, fwd_buffers, bwd_buffers):
ctx.save_for_backward(input, weight, minibatch_mean, minibatch_inv_var)
ctx.eps = eps
ctx.bn_group = group_size
ctx.rank_id = group_rank
ctx.peer_buffers = bwd_buffers
return cudnn_gbn_lib.forward(input, weight, bias, running_mean, running_variance,
minibatch_mean, minibatch_inv_var, momentum, eps, group_size, group_rank, fwd_buffers)
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
x, scale, minibatch_mean, minibatch_inv_var = ctx.saved_variables
eps = ctx.eps
bn_group = ctx.bn_group
rank_id = ctx.rank_id
peer_buffers = ctx.peer_buffers
dx, dscale, dbias = cudnn_gbn_lib.backward(x,
grad_output,
scale,
minibatch_mean,
minibatch_inv_var,
eps,
bn_group,
rank_id,
peer_buffers)
return dx, dscale, dbias, None, None, None, None, None, None, None, None, None, None
class GroupBatchNorm2d(_BatchNorm):
"""
synchronized batch normalization module extented from ``torch.nn.BatchNormNd``
with the added stats reduction across multiple processes.
When running in training mode, the layer reduces stats across process groups
to increase the effective batchsize for normalization layer. This is useful
in applications where batch size is small on a given process that would
diminish converged accuracy of the model.
When running in evaluation mode, the layer falls back to
``torch.nn.functional.batch_norm``.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Example::
>>> sbn = apex.contrib.GroupBatchNorm2d(100).cuda()
>>> inp = torch.randn(10, 100, 14, 14).cuda()
>>> out = sbn(inp)
>>> inp = torch.randn(3, 100, 20).cuda()
>>> out = sbn(inp)
"""
def __init__(self, num_features, group_size, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
super(GroupBatchNorm2d, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.group_size = group_size
rank = torch.distributed.get_rank()
self.group_id = rank // group_size
self.group_rank = rank % group_size
self.fwd_peer_buffers = self.get_peer_buffers(num_features)
self.bwd_peer_buffers = self.get_peer_buffers(num_features)
self.minibatch_mean = torch.cuda.FloatTensor(num_features)
self.minibatch_inv_var = torch.cuda.FloatTensor(num_features)
def get_peer_buffers(self, num_features):
# group_size * 2 (low-latency algo) * 2 (mean+var) * channels * 4 (float32)
peer_size = self.group_size * 4 * num_features * 4
raw = pm.allocate_raw(peer_size)
# exchange peer pointers with nccl
world_size = torch.distributed.get_world_size()
raw_ipc = pm.get_raw_ipc_address(raw).cuda()
raw_ipcs = [torch.empty_like(raw_ipc) for _ in range(world_size)]
torch.distributed.all_gather(raw_ipcs, raw_ipc)
group_ipcs = [raw_ipcs[x] for x in range(self.group_id * self.group_size, (self.group_id * self.group_size) + self.group_size)]
peer_raw_ipcs = torch.stack(group_ipcs).cpu()
return pm.get_raw_peers(peer_raw_ipcs, self.group_rank, raw)
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError(
"expected 4D input (got {}D input)".format(input.dim())
)
def _check_input_channels(self, input):
if input.size(1) % 8 != 0:
raise ValueError(
"GroupBatchNorm2d number of input channels should be a multiple of 8"
)
def forward(self, input : Tensor) -> Tensor:
# currently only GPU input is supported
if not input.is_cuda:
raise ValueError("GroupBatchNorm2d expected input tensor to be on GPU")
if not input.is_contiguous(memory_format=torch.channels_last):
raise ValueError("GroupBatchNorm2d expected input tensor to be in channels last memory format")
if torch.is_autocast_enabled():
input = input.to(torch.get_autocast_gpu_dtype())
if input.dtype != torch.float16:
raise ValueError("GroupBatchNorm2d expected input tensor in float16")
self._check_input_dim(input)
self._check_input_channels(input)
if not self.training:
# fall back to pytorch implementation for inference
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, self.momentum, self.eps)
return _GroupBatchNorm2d.apply(input,
self.weight, self.bias,
self.running_mean, self.running_var,
self.minibatch_mean, self.minibatch_inv_var,
self.momentum,
self.eps,
self.group_size,
self.group_rank,
self.fwd_peer_buffers,
self.bwd_peer_buffers)
|
apex-master
|
apex/contrib/cudnn_gbn/batch_norm.py
|
apex-master
|
apex/contrib/test/__init__.py
|
|
apex-master
|
apex/contrib/test/index_mul_2d/__init__.py
|
|
import random
import unittest
import torch
HAS_INDEX_MUL_2D_RELU = None
try:
from apex.contrib.index_mul_2d import index_mul_2d
except ImportError as e:
HAS_INDEX_MUL_2D_RELU = False
else:
HAS_INDEX_MUL_2D_RELU = True
@unittest.skipIf(not HAS_INDEX_MUL_2D_RELU, "`apex.contrib.index_mul_2d` is not found.")
class IndexMul2dTest(unittest.TestCase):
def setUp(self, seed=0):
torch.manual_seed(seed)
self.input1_size = random.randint(1, 1000)
self.input2_size = random.randint(1, 100000)
self.feature_size = random.randint(1, 256)
self.input1_float = torch.randn(size=(self.input1_size, self.feature_size),).cuda()
self.input2_float = torch.randn(size=(self.input2_size, self.feature_size),).cuda()
self.index1 = torch.randint(low=0, high=self.input1_size, size=(self.input2_size,)).cuda()
self.input1_float_ = self.input1_float.clone()
self.input2_float_ = self.input2_float.clone()
self.input1_float.requires_grad_()
self.input1_float_.requires_grad_()
self.input2_float.requires_grad_()
self.input2_float_.requires_grad_()
self.input1_half = torch.randn(size=(self.input1_size, self.feature_size),).cuda().half()
self.input2_half = torch.randn(size=(self.input2_size, self.feature_size),).cuda().half()
self.input1_half_ = self.input1_half.clone()
self.input2_half_ = self.input2_half.clone()
self.input1_half.requires_grad_()
self.input2_half.requires_grad_()
self.input1_half_.requires_grad_()
self.input2_half_.requires_grad_()
def test_index_mul_float(self):
out = index_mul_2d(self.input1_float, self.input2_float, self.index1)
energy = (out.float()**2).sum() / out.numel()
force = torch.autograd.grad(
energy,
self.input1_float,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
loss = (out.float()**2).sum() / out.numel() + (force.float()**2).sum()
loss.backward()
out_ = self.input1_float_[self.index1] * self.input2_float_
energy_ = (out_.float()**2).sum() / out.numel()
force_ = torch.autograd.grad(
energy_,
self.input1_float_,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
loss = (out_.float()**2).sum() / out_.numel() + (force_.float()**2).sum()
loss.backward()
torch.testing.assert_close(self.input1_float, self.input1_float_, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.input2_float, self.input2_float_, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.input1_float.grad, self.input1_float_.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.input2_float.grad, self.input2_float_.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
def test_index_mul_half(self):
out = index_mul_2d(self.input1_half, self.input2_half, self.index1)
energy = (out.float()**2).sum() / out.numel()
force = torch.autograd.grad(
energy,
self.input1_half,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
loss = (out.float()**2).sum() / out.numel() + (force.float()**2).sum()
loss.backward()
out_ = self.input1_half_[self.index1] * self.input2_half_
energy_ = (out_.float()**2).sum() / out.numel()
force_ = torch.autograd.grad(
energy_,
self.input1_half_,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
loss = (out_.float()**2).sum() / out_.numel() + (force_.float()**2).sum()
loss.backward()
torch.testing.assert_close(self.input1_half, self.input1_half_, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.input2_half, self.input2_half_, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.input1_half.grad, self.input1_half_.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.input2_half.grad, self.input2_half_.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
if __name__ == '__main__':
unittest.main()
|
apex-master
|
apex/contrib/test/index_mul_2d/test_index_mul_2d.py
|
import copy
import typing
import unittest
import torch
import torch.nn as nn
from torch.testing._internal import common_utils
SKIP_TEST = None
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
try:
from apex.contrib.cudnn_gbn import GroupBatchNorm2d as GBN
except ImportError as e:
SKIP_TEST = e
# Usage: python /path/to/cudnn_gbn/test_gbn_with_two_gpus.py
input_shapes = [
[1, 1024, 48, 72],
[1, 128, 192, 288],
[1, 128, 384, 576],
[1, 1536, 48, 72],
[1, 2048, 48, 72],
[1, 256, 1, 1],
[1, 256, 192, 288],
[1, 256, 384, 576],
[1, 256, 48, 72],
[1, 256, 96, 144],
[1, 32, 384, 576],
[1, 48, 192, 288],
[1, 64, 384, 576],
[1, 728, 48, 72],
[1, 728, 96, 144],
]
class BNModelRef(nn.Module):
def __init__(self, num_features, num_layers=1000):
super().__init__()
self.fwd = nn.Sequential(
*[
nn.BatchNorm2d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
for _ in range(num_layers)
]
)
def forward(self, x):
return self.fwd(x)
class BNModel(nn.Module):
def __init__(self, num_features, num_layers=1000):
super().__init__()
self.fwd = nn.Sequential(
*[
GBN(num_features, group_size=2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
for _ in range(num_layers)
]
)
def forward(self, x):
return self.fwd(x)
def get_rand_tensors(global_shape, device):
inp_t = torch.rand(global_shape, dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
weight = torch.rand(global_shape[1], dtype=torch.float32, device=device)
bias = torch.rand(global_shape[1], dtype=torch.float32, device=device)
_grad_out = torch.rand(global_shape, dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
return inp_t, weight, bias, _grad_out
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestCudnnGBN(NcclDistributedTestBase):
def _prep(self):
torch.cuda.manual_seed(333)
torch.manual_seed(333)
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 2)
@torch.backends.cudnn.flags(enabled=True, benchmark=True)
def _test_cudnn_gbn(
self,
num_layers: int,
shape: typing.List[int],
*,
memory_format: torch.memory_format = torch.channels_last,
) -> None:
global_shape = copy.deepcopy(shape)
global_shape[0] = self.world_size
device = torch.device("cuda", self.rank)
cudnn_gbn_model = BNModel(
num_features=shape[1],
num_layers=num_layers,
).to(device=device, memory_format=memory_format)
ref_model = BNModelRef(
num_features=shape[1],
num_layers=num_layers,
).to(device=device, memory_format=memory_format)
input, weight, bias, grad_out = get_rand_tensors(global_shape, device)
with torch.no_grad():
ref_model.fwd[0].weight.copy_(weight)
ref_model.fwd[0].bias.copy_(bias)
cudnn_gbn_model.fwd[0].weight.copy_(weight)
cudnn_gbn_model.fwd[0].bias.copy_(bias)
ref_input = input.clone().detach().requires_grad_()
input = input[self.rank : self.rank + 1, ...].clone().detach().requires_grad_()
ref_grad_out = grad_out.half().clone().detach()
grad_out = grad_out[self.rank : self.rank + 1, ...].half().clone().detach()
with torch.cuda.amp.autocast():
out = cudnn_gbn_model(input)
ref_out = ref_model(ref_input.half())
out.backward(grad_out)
ref_out.backward(ref_grad_out)
kwargs = {"rtol": 3.5e-3, "atol": 3e-2, "msg": f"shape: {shape}"}
torch.testing.assert_close(ref_out[self.rank : self.rank + 1], out, **kwargs)
torch.testing.assert_close(ref_input.grad[self.rank : self.rank + 1], input.grad, **kwargs)
# compensating the averaging over processes done by DDP
# in order to produce mathematically equivalent result
# https://github.com/NVIDIA/apex/issues/134#issuecomment-458307368
torch.testing.assert_close(
ref_model.fwd[0].weight.grad / self.world_size, cudnn_gbn_model.fwd[0].weight.grad, **kwargs
)
torch.testing.assert_close(
ref_model.fwd[0].bias.grad / self.world_size, cudnn_gbn_model.fwd[0].bias.grad, **kwargs
)
def test_cudnngbn(self):
if self.world_size != 2:
self.skipTest(f"This test is written for world_size of 2 but {self.world_size}")
for shape in input_shapes:
self._prep()
self._test_cudnn_gbn(1, shape)
if __name__ == "__main__":
common_utils.run_tests()
|
apex-master
|
apex/contrib/test/cudnn_gbn/test_cudnn_gbn_with_two_gpus.py
|
apex-master
|
apex/contrib/test/cudnn_gbn/__init__.py
|
|
import unittest
import torch
import torch.nn.functional as F
reference_available = True
try:
from torchvision.ops.focal_loss import sigmoid_focal_loss
except ImportError:
reference_available = False
SKIP_TEST = None
try:
from apex.contrib.focal_loss import focal_loss
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
@unittest.skipIf(not reference_available, "Reference implementation `torchvision.ops.focal_loss.sigmoid_focal_loss` is not available.")
class FocalLossTest(unittest.TestCase):
N_SAMPLES = 12
N_CLASSES = 8
ALPHA = 0.24
GAMMA = 2.0
REDUCTION = "sum"
def test_focal_loss(self) -> None:
if not reference_available:
self.skipTest("This test needs `torchvision` for `torchvision.ops.focal_loss.sigmoid_focal_loss`.")
else:
x = torch.randn(FocalLossTest.N_SAMPLES, FocalLossTest.N_CLASSES).cuda()
with torch.no_grad():
x_expected = x.clone()
x_actual = x.clone()
x_expected.requires_grad_()
x_actual.requires_grad_()
classes = torch.randint(0, FocalLossTest.N_CLASSES, (FocalLossTest.N_SAMPLES,)).cuda()
with torch.no_grad():
y = F.one_hot(classes, FocalLossTest.N_CLASSES).float()
expected = sigmoid_focal_loss(
x_expected,
y,
alpha=FocalLossTest.ALPHA,
gamma=FocalLossTest.GAMMA,
reduction=FocalLossTest.REDUCTION,
)
actual = sum([focal_loss.FocalLoss.apply(
x_actual[i:i+1],
classes[i:i+1].long(),
torch.ones([], device="cuda"),
FocalLossTest.N_CLASSES,
FocalLossTest.ALPHA,
FocalLossTest.GAMMA,
0.0,
) for i in range(FocalLossTest.N_SAMPLES)])
# forward parity
torch.testing.assert_close(expected, actual)
expected.backward()
actual.backward()
# grad parity
torch.testing.assert_close(x_expected.grad, x_actual.grad)
if __name__ == "__main__":
torch.manual_seed(42)
unittest.main()
|
apex-master
|
apex/contrib/test/focal_loss/test_focal_loss.py
|
apex-master
|
apex/contrib/test/focal_loss/__init__.py
|
|
apex-master
|
apex/contrib/test/xentropy/__init__.py
|
|
import unittest
import random
import time
import numpy as np
import torch
SKIP_TEST = None
try:
from apex.contrib import xentropy as label_smoothing
except ImportError as e:
SKIP_TEST = e
def label_smoothing_raw(x, target, padding_idx, smoothing):
logprobs = torch.nn.functional.log_softmax(x, dim=-1, dtype=torch.float32)
non_pad_mask = (target != padding_idx)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)[non_pad_mask]
smooth_loss = -logprobs.mean(dim=-1)[non_pad_mask]
loss = (1.0 - smoothing) * nll_loss + smoothing * smooth_loss
return loss
def label_smoothing_opt_1(x, target, padding_idx, smoothing):
logprobs = torch.nn.functional.log_softmax(x, dim=-1, dtype=torch.float32)
pad_mask = (target == padding_idx)
ll_loss = logprobs.gather(dim=-1, index=target.unsqueeze(1)).squeeze(1)
smooth_loss = logprobs.mean(dim=-1)
loss = (smoothing - 1.0) * ll_loss - smoothing * smooth_loss
loss.masked_fill_(pad_mask, 0)
return loss
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class LabelSmoothingTest(unittest.TestCase):
def setUp(self, seed=1234):
super().setUp()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Set pytorch print precision
torch.set_printoptions(precision=10)
def gen_test_inputs(self, N, T, H, smoothing, padding_idx, dtype=torch.half):
logits = torch.randn((N*T, H), dtype=dtype, device='cuda',
requires_grad=True)
labels = torch.randint(0, H, [N*T], device='cuda')
for i in random.sample(range(N*T), N*T//6):
labels[i] = padding_idx
half_to_float = (logits.dtype == torch.half)
return logits, labels, half_to_float
def print_max_diff_elem(self, ref, tst):
ref, tst = ref.flatten(), tst.flatten()
diff = (ref - tst).abs().max()
idx = (ref - tst).abs().argmax()
print("Max atol idx: {}, diff: {:.6f}, ref: {:.6f}, tst: {:.6f}".format(
idx, diff, ref[idx], tst[idx]))
def _test_label_smoothing_function(self, dtype):
# Set label smoothing configuration
smoothing, padding_idx = 0.1, 0
N, T, H = 128, 74, 32320
iters = 10
loss_func = label_smoothing.SoftmaxCrossEntropyLoss.apply
for i in range(iters):
logits, labels, half_to_float = self.gen_test_inputs(
N, T, H, smoothing, padding_idx)
# Run original softmax cross entropy with label smoothing
logits.grad = None
losses = label_smoothing_raw(logits, labels, padding_idx, smoothing)
loss = losses.sum()
loss.backward()
ref_loss = loss.clone().detach()
ref_grad = logits.grad.clone().detach()
# Run optimized softmax cross entropy with label smoothing
logits.grad = None
losses = loss_func(logits, labels, smoothing, padding_idx, half_to_float)
loss = losses.sum()
loss.backward()
val_loss = loss.clone().detach()
val_grad = logits.grad.clone().detach()
# Validate
self.print_max_diff_elem(ref_grad, val_grad)
torch.testing.assert_close(val_loss, ref_loss)
torch.testing.assert_close(val_grad, ref_grad)
def test_label_smoothing_function_fp16(self):
self._test_label_smoothing_function(torch.half)
def test_label_smoothing_function_bf16(self):
self._test_label_smoothing_function(torch.bfloat16)
def test_label_smoothing_perf(self):
# Set label smoothing configuration
smoothing, padding_idx = 0.1, 0
N, T, H = 128, 74, 32320
iters = 1000
loss_func = label_smoothing.SoftmaxCrossEntropyLoss.apply
print()
logits, labels, half_to_float = self.gen_test_inputs(
N, T, H, smoothing, padding_idx)
# Run original softmax cross entropy with label smoothing
torch.cuda.synchronize()
ts = time.time()
for i in range(iters):
logits.grad = None
losses = label_smoothing_raw(logits, labels, padding_idx, smoothing)
loss = losses.sum() / N
loss.backward()
torch.cuda.synchronize()
print("Raw time {:.2f} s elapsed for {} iterations, norm {:.4f}".format(
time.time() - ts, iters, logits.grad.norm()))
# Run optimized softmax cross entropy with label smoothing
torch.cuda.synchronize()
ts = time.time()
for i in range(iters):
logits.grad = None
losses = loss_func(logits, labels, smoothing, padding_idx, half_to_float)
loss = losses.sum() / N
loss.backward()
torch.cuda.synchronize()
print("Opt time {:.2f} s elapsed for {} iterations, norm {:.4f}".format(
time.time() - ts, iters, logits.grad.norm()))
if __name__ == '__main__':
unittest.main()
|
apex-master
|
apex/contrib/test/xentropy/test_label_smoothing.py
|
import unittest
import os
import torch
from torch.testing._internal import common_utils
from torch.testing._internal.common_device_type import instantiate_device_type_tests
SKIP_TEST = None
try:
from apex import fused_dense
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class FusedDenseTest(common_utils.TestCase):
def _test_fused_dense(self, dtype, seed=0):
os.environ["TORCH_ALLOW_TF32_CUBLAS_OVERRIDE"] = "0"
torch.manual_seed(seed)
seq_length = 512
sequences = 3
hidden_dim = 1024
ref_inputs = torch.randn(sequences*seq_length, hidden_dim,
dtype=dtype, device=torch.device("cuda")).requires_grad_(True)
tst_inputs = ref_inputs.clone().detach().requires_grad_(True)
dense = fused_dense.FusedDense(1024, 3072)
dense.to(dtype=dtype)
dense.cuda()
y_tst = dense(tst_inputs)
y_ref = torch.matmul(ref_inputs, dense.weight.t())+dense.bias
dy = torch.randn_like(y_tst).to(dtype=dtype)
y_tst.backward(dy)
dw_ref = torch.matmul(dy.t(), ref_inputs)
dx_ref = torch.matmul(dy, dense.weight.clone())
db_ref = dy.sum(0, False)
torch.testing.assert_close(
ref_inputs, tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(
y_ref, y_tst, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(
dw_ref, dense.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(
dx_ref, tst_inputs.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(
db_ref, dense.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
@common_utils.parametrize("dtype", [torch.half, torch.float, torch.bfloat16])
def test_fused_dense(self, dtype):
self._test_fused_dense(dtype)
instantiate_device_type_tests(FusedDenseTest, globals(), only_for=("cuda",))
if __name__ == "__main__":
common_utils.run_tests()
|
apex-master
|
apex/contrib/test/fused_dense/test_fused_dense.py
|
apex-master
|
apex/contrib/test/layer_norm/__init__.py
|
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.layer_norm.layer_norm import FastLayerNorm
import fast_layer_norm as fln
except ImportError as e:
SKIP_TEST = e
class GPUTimer:
def __init__(self, stream):
self.start_ = torch.cuda.Event(enable_timing=True)
self.stop_ = torch.cuda.Event(enable_timing=True)
self.stream_ = stream
def start(self):
self.stream_.record_event(self.start_)
def stop(self):
self.stream_.record_event(self.stop_)
def sync(self):
self.stream_.synchronize()
def millis(self):
return self.start_.elapsed_time(self.stop_)
def size_in_bytes(t):
return torch.numel(t) * t.element_size()
def metrics(y_ref, y, epsilon=1e-6):
y_ref = y_ref.float()
y = y.float()
relerr, mse = (
(y_ref - y).abs().sum() / (y_ref.abs().sum() + epsilon),
(y_ref - y).square().mean(),
)
return relerr.item(), mse.item()
device = torch.device("cuda")
fp32 = torch.float32
fp16 = torch.float16
bf16 = torch.bfloat16
def backward_(dz, x, mu, rs, gamma):
wtype = gamma.dtype
itype = x.dtype
otype = dz.dtype
ctype = mu.dtype
mu = mu.unsqueeze(1)
rs = rs.unsqueeze(1)
hidden_size = gamma.numel()
y = rs * (x.to(ctype) - mu)
dbeta = dz.view(-1, hidden_size).sum(0, dtype=ctype)
dgamma = (dz * y).view(-1, hidden_size).sum(0, dtype=ctype)
dy = dz.view(-1, hidden_size).to(ctype) * gamma.unsqueeze(0).to(ctype)
mdy = dy.mean(1, keepdim=True, dtype=ctype)
mdyy = (dy * y).mean(1, keepdim=True, dtype=ctype)
dx = rs * (dy - mdyy * y - mdy)
return dx.to(itype), dgamma.to(wtype), dbeta.to(wtype)
def benchmark_(S, B, hidden_size, itype, wtype, runs=100):
epsilon = 1e-5
x = torch.randn((S * B, hidden_size), dtype=itype, device=device)
beta = torch.randn(hidden_size, dtype=wtype, device=device)
gamma = torch.randn(hidden_size, dtype=wtype, device=device)
dz = torch.randn(x.shape, dtype=wtype, device=device)
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
timer = GPUTimer(stream)
# warmup
for r in range(runs):
z, mu, rsigma = fln.ln_fwd(x, gamma, beta, epsilon)
timer.start()
for r in range(runs):
z, mu, rsigma = fln.ln_fwd(x, gamma, beta, epsilon)
timer.stop()
timer.sync()
total_bytes_fwd = sum([size_in_bytes(t) for t in [x, z, gamma, beta, mu, rsigma]])
ms_fwd = timer.millis() / runs
print(
"[FWD] Time: {:.4f}ms Throughput: {:.4f} GB/sec".format(
ms_fwd, total_bytes_fwd * 1e-6 / ms_fwd
)
)
timer.start()
for r in range(runs):
dx, dgamma, dbeta, dbp, dgp = fln.ln_bwd(dz, x, mu, rsigma, gamma)
timer.stop()
timer.sync()
total_bytes_bwd = sum(
[
size_in_bytes(t)
for t in [dz, x, mu, rsigma, gamma, dx, dgamma, dbeta, dbp, dbp, dgp, dgp]
]
)
ms_bwd = timer.millis() / runs
print(
"[BWD] Time: {:.4f}ms Throughput: {:.4f} GB/sec".format(
ms_bwd, total_bytes_bwd * 1e-6 / ms_bwd
)
)
def _test_impl(S, B, hidden_size, itype, wtype, ctype=fp32):
seed = 1243
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
otype = wtype
print("========================================================")
print(f"S={S} B={B} Hidden={hidden_size} {itype} {wtype}")
print("--------------------------------------------------------")
x = torch.randn(S * B, hidden_size, dtype=itype, device=device)
gamma = torch.randn(hidden_size, dtype=wtype, device=device) * 0.2
beta = torch.randn(hidden_size, dtype=wtype, device=device) * 0.2
epsilon = 1e-5
x.requires_grad = True
gamma.requires_grad = True
beta.requires_grad = True
mu_ref = x.mean(1, dtype=ctype, keepdim=True)
v = torch.square(x - mu_ref).mean(1, dtype=ctype, keepdim=True)
rs_ref = torch.rsqrt(v + epsilon)
y_ref = rs_ref * (x.to(ctype) - mu_ref)
z_ref = (gamma.unsqueeze(0) * (y_ref).to(otype) + beta.unsqueeze(0)).to(otype)
mu_ref = mu_ref.flatten()
rs_ref = rs_ref.flatten()
dz = torch.randn_like(z_ref)
# z_ref.backward(dz)
# dx_ref = x.grad
# dgamma_ref = gamma.grad
# dbeta_ref = beta.grad
dx_ref, dg_ref, db_ref = backward_(dz, x, mu_ref, rs_ref, gamma)
z, mu, rs = fln.ln_fwd(x, gamma, beta, epsilon)
dx, dg, db, dg_part, db_part = fln.ln_bwd(dz, x, mu, rs, gamma)
re_z, mse_z = metrics(z_ref, z)
re_mu, mse_mu = metrics(mu_ref, mu)
re_rs, mse_rs = metrics(rs_ref, rs)
re_dx, mse_dx = metrics(dx_ref, dx)
re_dg, mse_dg = metrics(dg_ref, dg)
re_db, mse_db = metrics(db_ref, db)
print(f" z: relerr={re_z :.4e} mse={mse_z :.4e}")
print(f"mu: relerr={re_mu:.4e} mse={mse_mu:.4e}")
print(f"rs: relerr={re_mu:.4e} mse={mse_mu:.4e}")
print(f"dx: relerr={re_dx:.4e} mse={mse_dx:.4e}")
print(f"dg: relerr={re_dg:.4e} mse={mse_dg:.4e}")
print(f"db: relerr={re_db:.4e} mse={mse_db:.4e}")
def check_err(x, relerr):
tol = 1e-3 if x.dtype == torch.float16 else 5e-6
return relerr < tol
return [
check_err(x, re)
for x, re in zip([z, mu, rs, dx, dg, db], [re_z, re_mu, re_rs, re_dx, re_dg, re_db])
]
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestFastLayerNorm(unittest.TestCase):
# TODO(crcrpar): Try `torch.testing.assert_close` instead and migrate to it if it's working.
def assertAll(self, l):
if not all(l):
print(l)
for x in l:
self.assertTrue(x)
def test_all_configs(self):
hidden_sizes = [
768,
1024,
1536,
2048,
2304,
3072,
3840,
4096,
5120,
6144,
8192,
10240,
12288,
12800,
14336,
15360,
16384,
18432,
20480,
24576,
25600,
30720,
32768,
40960,
49152,
65536,
]
for h in hidden_sizes:
with self.subTest(f"hidden_size={h}"):
self.assertAll(_test_impl(256, 2, h, fp32, fp32))
self.assertAll(_test_impl(256, 2, h, fp16, fp16))
self.assertAll(_test_impl(256, 2, h, fp32, fp16))
self.assertAll(_test_impl(256, 2, h, bf16, bf16))
self.assertAll(_test_impl(256, 2, h, fp32, bf16))
def test_run_benchmark(self):
for (S, B, hidden_size, runs) in (
(512, 32, 768, 1000),
(512, 32, 1024, 1000),
(512, 8, 4096, 1000),
(512, 8, 5120, 1000),
(512, 8, 6144, 1000),
(256, 2, 20480, 500),
(256, 2, 25600, 500),
(256, 2, 40960, 250),
(256, 2, 65536, 250),
):
with self.subTest(f"(S, B, hidden_size)=({S}, {B}, {hidden_size})"):
benchmark_(S, B, hidden_size, fp16, fp16, runs)
def test_compat_with_autocast(self):
autocast_dtypes = (
(torch.half, torch.bfloat16) if torch.cuda.is_bf16_supported() else (torch.half,)
)
input_shape = (512, 32, 768)
layer_norm = FastLayerNorm(input_shape[-1]).cuda()
input = torch.randn(input_shape).cuda()
for dtype in autocast_dtypes:
layer_norm.zero_grad(set_to_none=True)
with self.subTest(f"autocast_dtype={dtype}"):
with torch.cuda.amp.autocast(enabled=True, dtype=dtype):
out = layer_norm(input)
self.assertEqual(dtype, out.dtype)
grad = torch.randn_like(out)
out.backward(grad)
self.assertEqual(torch.float32, layer_norm.weight.grad.dtype)
if __name__ == "__main__":
unittest.main()
|
apex-master
|
apex/contrib/test/layer_norm/test_fast_layer_norm.py
|
import os
import inspect
import torch
from torch.cuda.amp import GradScaler
from torch.testing._internal import common_utils
from apex.parallel.distributed import flat_dist_call
from apex.contrib.optimizers.distributed_fused_lamb import DistributedFusedLAMB
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
def get_init_weights_func():
@torch.no_grad()
def init_weights(m):
if isinstance(m, torch.nn.Linear):
m.weight.fill_(1.0)
return init_weights
class ModelFoo(torch.nn.Module):
def __init__(self):
super(ModelFoo, self).__init__()
self.linear = torch.nn.Linear(128, 128, bias = False)
self.loss = torch.nn.MSELoss()
def forward(self, input_tensor, gt):
y = self.linear(input_tensor)
loss = self.loss(y, gt)
return loss
# A test for distributed fused Lamb optimizer: run several iterations and see if loss decreases
# There are two instances of the same test because based on `world_size` the optimizer decides what collectives operation to use.
# If torch.distributed.get_world_size() == torch.cuda.device_count() it uses only `all_gather`.
# If torch.distributed.get_world_size() < torch.cuda.device_count() it uses both `all_gather` and `reduce_scatter`.
class NcclDistributedFusedLAMB(NcclDistributedTestBase):
@property
def world_size(self) -> int:
return torch.cuda.device_count()
@common_utils.parametrize("no_copy", [False, True])
@common_utils.parametrize("opt_kwargs", [
dict(overlap_reductions=True, dwu_num_blocks=2, dwu_num_chunks=2,
fused_norm=False, fuse_scale=False, clip_after_ar=True,
full_ar=False),
dict(overlap_reductions=False, dwu_num_blocks=1, dwu_num_chunks=1,
fused_norm=True, fuse_scale=True, clip_after_ar=False),
])
def test_distributed_fused_lamb(self, no_copy, opt_kwargs):
if no_copy and 'no_copy' not in inspect.getfullargspec(torch.distributed.reduce_scatter).args:
self.skipTest("does not support no_copy")
if no_copy and 'no_copy' not in inspect.getfullargspec(torch.distributed.all_gather).args:
self.skipTest("does not support no_copy")
assert torch.distributed.is_initialized()
gpu_count = torch.distributed.get_world_size()
init_scale = 100
lr = torch.tensor(0.1).cuda()
grad_scaler = GradScaler(init_scale=init_scale, growth_interval=1000)
model = ModelFoo()
model = model.cuda().half()
model.apply(get_init_weights_func())
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if 'full_ar' not in opt_kwargs:
opt_kwargs['full_ar'] = gpu_count == torch.cuda.device_count()
# Aidyn-A: not sure what parameters are the best for testing purposes,
# setting up whatever I think appropriate.
optimizer = DistributedFusedLAMB(
optimizer_grouped_parameters,
lr=0.1,
betas=(0.9, 0.9),
eps=1e-6,
max_grad_norm=1.0,
dwu_group_size=gpu_count,
dwu_num_rs_pg=1,
dwu_num_ar_pg=1,
dwu_num_ag_pg=1,
use_nvlamb=False,
set_param_views_to_flat_buffer=False,
e5m2_allgather=False,
**opt_kwargs
)
optimizer.set_global_scale(init_scale)
optimizer._reduce_scatter_no_copy = no_copy
optimizer._all_gather_no_copy = no_copy
flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) )
x = torch.randn(4096, 128, dtype=torch.float16).cuda()
y = torch.randn(4096, 128, dtype=torch.float16).cuda()
losses = []
for _ in range(10):
loss = model(x, y)
optimizer._lazy_init_stage1()
grad_scaler.scale(loss).backward()
optimizer._lazy_init_stage2()
optimizer._lr = lr
optimizer.complete_reductions()
optimizer.set_global_scale(grad_scaler._get_scale_async())
grad_scaler.step(optimizer)
grad_scaler.update()
optimizer.zero_grad(set_to_none=True)
losses.append(loss.item())
self.assertTrue(losses == sorted(losses, reverse=True))
common_utils.instantiate_parametrized_tests(NcclDistributedFusedLAMB)
class NcclDistributedFusedLAMB_partial_ar(NcclDistributedFusedLAMB):
@property
def world_size(self) -> int:
return max(torch.cuda.device_count()-1, 1)
if __name__ == "__main__":
common_utils.run_tests()
|
apex-master
|
apex/contrib/test/optimizers/test_distributed_fused_lamb.py
|
apex-master
|
apex/contrib/test/optimizers/__init__.py
|
|
from contextlib import contextmanager
import io
from typing import Callable, Optional, Tuple
import unittest
import warnings
import torch
from torch.testing._internal import common_utils
SKIP_TEST = None
try:
from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam
except ImportError as e:
SKIP_TEST = e
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
class SimpleModel(torch.nn.Module):
def __init__(self, num_layers, size):
super().__init__()
self.params = torch.nn.ParameterList([
torch.nn.Parameter(torch.rand(1, size) + 1)
for _ in range(num_layers)
])
def forward(self, x):
y = 0
for i, param in enumerate(self.params):
y += (i+1) * param * x
return y
def make_models(
num_layers: int,
size: int,
adam_w_mode: bool = True,
model_dtype: torch.dtype = torch.float32,
optim_dtype: Optional[torch.dtype] = None,
grad_sync_dtype: Optional[torch.dtype] = None,
param_sync_dtype: Optional[torch.dtype] = None,
device: torch.device = 'cuda',
process_group: Optional[torch.distributed.ProcessGroup] = None,
average_grad_sync: bool =True,
overlap_communication: bool = True,
contiguous_buffers: bool = False,
store_params: bool = False,
store_param_remainders: bool = False,
bucket_cap_mb: float = 71/(4*1024*1024),
):
# Construct models with same parameters
ref_model = SimpleModel(num_layers, size).to(dtype=model_dtype, device=device)
dist_model = SimpleModel(num_layers, size).to(dtype=model_dtype, device=device)
with torch.no_grad():
for ref_param, dist_param in zip(dist_model.parameters(),
ref_model.parameters()):
dist_param.copy_(ref_param)
# Initialize reference model with data-parallelism
rank = torch.distributed.get_rank()
ref_model = torch.nn.parallel.DistributedDataParallel(
ref_model,
device_ids=[rank] if device=='cuda' else None,
output_device=rank if device=='cuda' else None,
process_group=process_group,
)
# Construct optimizers with same hyperparameters
if optim_dtype is None:
optim_dtype = model_dtype
optim_args = dict(lr=0.1, betas=(0.1,0.2), eps=0.25, weight_decay=0.1)
ref_optim_class = torch.optim.AdamW if adam_w_mode else torch.optim.Adam
ref_optim = ref_optim_class(
[
{'params': list(ref_model.parameters())[1::2], 'lr': 0.2},
{'params': list(ref_model.parameters())[0::2]},
],
**optim_args,
)
dist_optim = DistributedFusedAdam(
[
{'params': list(dist_model.parameters())[1::2], 'lr': 0.2},
{'params': list(dist_model.parameters())[0::2]},
],
adam_w_mode=adam_w_mode,
overlap_grad_sync=overlap_communication,
overlap_param_sync=overlap_communication,
bucket_cap_mb=bucket_cap_mb,
dtype=optim_dtype,
grad_sync_dtype=grad_sync_dtype,
param_sync_dtype=param_sync_dtype,
process_group=process_group,
average_grad_sync=average_grad_sync,
contiguous_param_buffer=contiguous_buffers,
contiguous_grad_buffer=contiguous_buffers,
store_params=store_params,
store_param_remainders=store_param_remainders,
**optim_args,
)
return ref_model, ref_optim, dist_model, dist_optim
@contextmanager
def dummy_context():
try:
yield
finally:
pass
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestDistributedFusedAdam(NcclDistributedTestBase):
seed = 1234
def test_matches_pytorch(
self,
rtol: Optional[float] = None,
atol: Optional[float] = None,
num_layers: int = 11,
layer_size: int = 7,
batch_size: int = 3,
num_steps: int = 3,
micro_batch_steps: int = 3,
adam_w_mode: bool = True,
overlap_communication: bool = True,
use_nosync: bool = True,
model_dtype: torch.dtype = torch.float32,
optim_dtype: Optional[torch.dtype] = None,
grad_sync_dtype: Optional[torch.dtype] = None,
param_sync_dtype: Optional[torch.dtype] = None,
device: torch.device = 'cuda',
contiguous_buffers: bool = False,
store_params: bool = False,
store_param_remainders: bool = False,
bucket_cap_mb: float = 71/(4*1024*1024),
init_optim_func: Optional[Callable[[DistributedFusedAdam], None]] = None,
):
torch.manual_seed(self.seed + self.rank)
# Identical models with data-parallel and ZeRO
ref_model, ref_optim, dist_model, dist_optim = make_models(
num_layers,
layer_size,
adam_w_mode=adam_w_mode,
model_dtype=model_dtype,
optim_dtype=optim_dtype,
grad_sync_dtype=grad_sync_dtype,
param_sync_dtype=param_sync_dtype,
device=device,
overlap_communication=overlap_communication,
contiguous_buffers=contiguous_buffers,
store_params=store_params,
store_param_remainders=store_param_remainders,
bucket_cap_mb=bucket_cap_mb,
)
# Initialize distributed optimizer
if init_optim_func is not None:
init_optim_func(dist_optim)
# Training loop
for step in range(num_steps):
# Reset gradients
ref_optim.zero_grad()
dist_optim.zero_grad()
# Forward and backward passes
for micro_step in range(micro_batch_steps):
# Synthetic data
x = torch.rand(batch_size, layer_size) - 0.5
dy = torch.rand_like(x) - 0.5
x = x.to(dtype=model_dtype, device=device)
dy = dy.to(dtype=model_dtype, device=device)
# Reference implementation
x_ref = x.detach().clone().requires_grad_(True)
y_ref = ref_model(x_ref)
y_ref.backward(dy)
# Distributed implementation
x_dist = x.detach().clone().requires_grad_(True)
y_dist = dist_model(x_dist)
backward_context = dummy_context
if use_nosync and micro_step < micro_batch_steps-1:
backward_context = dist_optim.no_sync
with backward_context():
y_dist.backward(dy)
# Check that data tensors match
torch.testing.assert_close(
y_dist, y_ref, rtol=rtol, atol=atol)
torch.testing.assert_close(
x_dist.grad, x_ref.grad, rtol=rtol, atol=atol)
# Optimization step
ref_optim.step()
dist_optim.step()
# Check that parameters match
for ref_param, dist_param in zip(ref_model.parameters(),
dist_model.parameters()):
torch.testing.assert_close(
dist_param, ref_param, rtol=rtol, atol=atol)
def test_matches_pytorch_l2_reg(self):
self.test_matches_pytorch(adam_w_mode=False)
def test_matches_pytorch_no_overlap(self):
self.test_matches_pytorch(
overlap_communication=False,
use_nosync=False,
)
def test_matches_pytorch_sync_every_step(self):
self.test_matches_pytorch(use_nosync=False)
def test_matches_pytorch_contiguous_buffers(self):
self.test_matches_pytorch(contiguous_buffers=True)
def test_matches_pytorch_fp64(self):
self.test_matches_pytorch(
rtol=1.3e-6,
atol=1e-5,
model_dtype=torch.float64,
optim_dtype=torch.float32,
)
def test_matches_pytorch_fp16(self):
self.test_matches_pytorch(
rtol=5e-3,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.float16,
optim_dtype=torch.float16,
)
def test_matches_pytorch_bf16(self):
self.test_matches_pytorch(
rtol=5e-2,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.bfloat16,
optim_dtype=torch.bfloat16,
)
def test_matches_pytorch_fp16_params(self):
self.test_matches_pytorch(
rtol=5e-3,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.float16,
optim_dtype=torch.float32,
param_sync_dtype=torch.float16,
store_params=True,
)
def test_matches_pytorch_bf16_grads(self):
self.test_matches_pytorch(
rtol=5e-2,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.float32,
optim_dtype=torch.float32,
grad_sync_dtype=torch.bfloat16,
)
def test_matches_pytorch_bf16_param_remainders(self):
self.test_matches_pytorch(
rtol=5e-2,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.bfloat16,
optim_dtype=torch.float32,
param_sync_dtype=torch.bfloat16,
store_params=False,
store_param_remainders=True,
)
def test_matches_pytorch_multi_dtypes(self):
def init_optim(optim: DistributedFusedAdam):
params = list(optim.parameters())
optim.init_params(params[0::3], grad_sync_dtype=torch.bfloat16)
optim.init_params(params[1::3], param_sync_dtype=torch.bfloat16)
self.test_matches_pytorch(
rtol=5e-2,
atol=1e-5,
init_optim_func=init_optim,
)
def test_raises_on_mismatch(self):
torch.manual_seed(self.seed + self.rank)
# Identical models with data-parallel and ZeRO
num_layers = 11
layer_size = 7
ref_model, ref_optim, dist_model, dist_optim = make_models(
num_layers,
layer_size,
)
# Only perform training step with distributed model
dist_optim.zero_grad()
x = torch.rand(3, layer_size) - 0.5
x = x.to(dtype=torch.float32, device='cuda')
dy = torch.rand_like(x) - 0.5
y = dist_model(x)
y.backward(dy)
dist_optim.step()
# Check that parameters do not match
for ref_param, dist_param in zip(ref_model.parameters(),
dist_model.parameters()):
self.assertRaises(
AssertionError,
torch.testing.assert_close,
dist_param, ref_param,
)
def test_clip_grad_norm(self):
torch.manual_seed(self.seed + self.rank)
# Identical models with data-parallel and ZeRO
ref_model, ref_optim, dist_model, dist_optim = make_models(1, 1)
# Training steps with pre-determined gradients
xs = [3, 1, 4, 1, 5, 9]
dys = [1, -1, 1, -1, 1, -1]
for x, dy in zip(xs, dys):
x = torch.tensor([[x]], dtype=torch.float32, device='cuda')
dy = torch.tensor([[dy]], dtype=torch.float32, device='cuda')
# Reference implementation
ref_optim.zero_grad()
y_ref = ref_model(x.detach())
y_ref.backward(dy.detach())
ref_grad_norm = torch.nn.utils.clip_grad_norm_(ref_model.parameters(), 3.5)
ref_optim.step()
# Distributed implementation
dist_optim.zero_grad()
y_dist = dist_model(x.detach())
y_dist.backward(dy.detach())
dist_grad_norm = dist_optim.clip_grad_norm(3.5)
dist_optim.step()
# Check that parameters match
torch.testing.assert_close(dist_grad_norm, ref_grad_norm)
for ref_param, dist_param in zip(ref_model.parameters(),
dist_model.parameters()):
torch.testing.assert_close(dist_param, ref_param)
def test_grad_scaler(self):
torch.manual_seed(self.seed + self.rank)
# Identical models with data-parallel and ZeRO
ref_model, ref_optim, dist_model, dist_optim = make_models(1, 1)
grad_scaler_args = dict(
init_scale=3.21,
growth_factor=1.23,
backoff_factor=0.876,
growth_interval=1,
)
ref_scaler = torch.cuda.amp.GradScaler(**grad_scaler_args)
dist_scaler = torch.cuda.amp.GradScaler(**grad_scaler_args)
# Training steps with pre-determined gradients
xs = [3, 1, 4, 1, 5, 9]
dys = [1, float('inf'), 1, 1, float('nan'), -1]
for x, dy in zip(xs, dys):
x = torch.tensor([[x]], dtype=torch.float32, device='cuda')
dy = torch.tensor([[dy]], dtype=torch.float32, device='cuda')
# Reference implementation
ref_optim.zero_grad()
y_ref = ref_model(x.detach())
ref_scaler.scale(y_ref).backward(dy.detach())
ref_scaler.step(ref_optim)
ref_scaler.update()
# Distributed implementation
dist_optim.zero_grad()
y_dist = dist_model(x.detach())
dist_scaler.scale(y_dist).backward(dy.detach())
dist_scaler.step(dist_optim)
dist_scaler.update()
# Check that parameters match
for ref_param, dist_param in zip(ref_model.parameters(),
dist_model.parameters()):
torch.testing.assert_close(dist_param, ref_param)
def test_checkpoint(
self,
rtol: Optional[float] = None,
atol: Optional[float] = None,
num_layers: int = 2,
layer_size: int = 2,
num_steps: int = 3,
save_group_size: Optional[int] = None,
load_group_size: Optional[int] = None,
save_model_kwargs: Optional[dict] = None,
load_model_kwargs: Optional[dict] = None,
):
"""Test state_dict and load_state_dict functions
Two models are constructed, possibly on different process
groups. One of the models is trained for a few steps, a
checkpoint is saved, and the checkpoint is loaded on the other
model. Both models are then trained for a few steps and
checked to make sure that they produce identical results.
Arguments:
rtol (float): Relative tolerance for numerical checks (see
torch.allclose).
atol (float): Absolute tolerance for numerical checks (see
torch.allclose).
num_layers (int): Number of layers in test model.
layer_size (int): Number of features in model layers.
num_steps (int): Number of training steps to perform
before and after checkpointing.
save_group_size (int): Process group size for model that
saves the checkpoint. Uses the default process group
by default.
load_group_size (int): Process group size for model that
loads the checkpoint. Uses the default process group
by default.
save_model_kwargs (dict): keyword arguments passed to
make_models when constructing the model that saves the
checkpoint.
load_model_kwargs (dict): keyword arguments passed to
make_models when constructing the model that loads the
checkpoint.
"""
# Initialize process groups
world_size = torch.distributed.get_world_size()
if save_group_size is None:
save_group_size = world_size
save_group = None
else:
if save_group_size > world_size:
self.skipTest(
f"Requires {save_group_size} ranks, found {world_size}"
)
save_ranks = list(range(save_group_size))
save_group = torch.distributed.new_group(ranks=save_ranks)
if load_group_size is None:
load_group_size = world_size
load_group = None
else:
if load_group_size > world_size:
self.skipTest(
f"Requires {load_group_size} ranks, found {world_size}"
)
load_ranks = list(range(load_group_size))
load_group = torch.distributed.new_group(ranks=load_ranks)
# Construct two models with same config and different params
torch.manual_seed(self.seed)
if self.rank < save_group_size:
if not save_model_kwargs:
save_model_kwargs = {}
_, _, model_save, optim_save = make_models(
num_layers,
layer_size,
process_group=save_group,
average_grad_sync=False,
**save_model_kwargs,
)
optim_save.init_params(reversed(list(model_save.parameters())))
torch.manual_seed(self.seed+1)
if self.rank < load_group_size:
if not load_model_kwargs:
load_model_kwargs = {}
_, _, model_load, optim_load = make_models(
num_layers,
layer_size,
process_group=load_group,
average_grad_sync=False,
**load_model_kwargs,
)
optim_load.init_params(list(model_load.parameters()))
batch_size = 2 * save_group_size * load_group_size
def make_global_batch() -> torch.Tensor:
"""Generate random tensor on root rank and broadcast"""
x = torch.empty(batch_size, layer_size, device='cuda')
if self.rank == 0:
torch.rand(x.size(), out=x)
x -= 0.5
torch.distributed.broadcast(x, src=0)
return x
def to_local_batch(
global_batch: torch.Tensor,
group: Optional[torch.distributed.ProcessGroup],
) -> Optional[torch.Tensor]:
"""Get local portion of tensor that is replicated across all ranks"""
group_size = torch.distributed.get_world_size(group)
if group_size < 0:
return None
local_batch_size = batch_size // group_size
batch_start = self.rank * local_batch_size
batch_end = (self.rank + 1) * local_batch_size
return global_batch[batch_start:batch_end, ...]
def to_global_batch(
local_batch: torch.Tensor,
group: Optional[torch.distributed.ProcessGroup],
) -> torch.Tensor:
"""Gather distributed tensor and broadcast to all ranks"""
# Allocate buffer
global_batch = torch.empty(batch_size, layer_size, device='cuda')
# Gather data on root rank
group_size = torch.distributed.get_world_size(group)
if group_size > 0:
local_batches = None
if self.rank == 0:
local_batch_size = batch_size // group_size
local_batches = [
global_batch[rank*local_batch_size:(rank+1)*local_batch_size, ...]
for rank in range(group_size)
]
torch.distributed.gather(
local_batch,
local_batches,
dst=0,
group=group,
)
# Broadcast data to all ranks
torch.distributed.broadcast(global_batch, src=0)
return global_batch
# Train one of the models
torch.manual_seed(self.seed+2)
for step in range(num_steps):
if self.rank < save_group_size:
optim_save.zero_grad()
x = make_global_batch()
dy = make_global_batch()
if self.rank < save_group_size:
x = to_local_batch(x, save_group)
dy = to_local_batch(dy, save_group)
y = model_save(x)
y.backward(dy)
optim_save.step()
# Make sure models are different
if self.rank < min(save_group_size, load_group_size):
for param_save, param_load in zip(model_save.parameters(),
model_load.parameters()):
self.assertRaises(
AssertionError,
torch.testing.assert_close,
param_load,
param_save,
rtol=rtol,
atol=atol,
)
# Save state
state_bytes = None
if self.rank < save_group_size:
state_dict = {
'model': model_save.state_dict(),
'optim': optim_save.state_dict(),
}
byte_stream = io.BytesIO()
torch.save(state_dict, byte_stream)
state_bytes = byte_stream.getvalue()
# Broadcast state from root rank and load
if self.rank < load_group_size:
if load_group_size != save_group_size:
if self.rank != 0:
state_bytes = None
state_bytes = [state_bytes]
torch.distributed.broadcast_object_list(
state_bytes,
src=0,
group=load_group,
)
state_bytes = state_bytes[0]
state_dict = torch.load(io.BytesIO(state_bytes))
model_load.load_state_dict(state_dict['model'])
optim_load.load_state_dict(state_dict['optim'])
# Make sure models are identical
if self.rank < min(save_group_size, load_group_size):
for param_save, param_load in zip(model_save.parameters(),
model_load.parameters()):
torch.testing.assert_close(
param_load,
param_save,
rtol=rtol,
atol=atol
)
# Train both models
torch.manual_seed(self.seed+3)
for step in range(num_steps):
# Reset grads
if self.rank < save_group_size:
optim_save.zero_grad()
if self.rank < load_group_size:
optim_load.zero_grad()
# Synthetic data
x = make_global_batch()
dy = make_global_batch()
# Training step for model that saved checkpoint
y_save = None
dx_save = None
if self.rank < save_group_size:
x_save = to_local_batch(x, save_group)
x_save = x_save.detach().clone().requires_grad_(True)
dy_save = to_local_batch(dy, save_group)
y_save = model_save(x_save)
y_save.backward(dy_save)
dx_save = x_save.grad
y_save = to_global_batch(y_save, save_group)
dx_save = to_global_batch(dx_save, save_group)
# Training step for model that loaded checkpoint
y_load = None
dx_load = None
if self.rank < load_group_size:
x_load = to_local_batch(x, load_group)
x_load = x_load.detach().clone().requires_grad_(True)
dy_load = to_local_batch(dy, load_group)
y_load = model_load(x_load)
y_load.backward(dy_load)
dx_load = x_load.grad
y_load = to_global_batch(y_load, load_group)
dx_load = to_global_batch(dx_load, load_group)
# Check that data tensors match
torch.testing.assert_close(y_load, y_save, rtol=rtol, atol=atol)
torch.testing.assert_close(dx_load, dx_save, rtol=rtol, atol=atol)
# Optimizer step
if self.rank < save_group_size:
optim_save.step()
if self.rank < load_group_size:
optim_load.step()
# Check that parameters match
if self.rank < min(save_group_size, load_group_size):
for param_save, param_load in zip(model_save.parameters(),
model_load.parameters()):
torch.testing.assert_close(
param_load,
param_save,
rtol=rtol,
atol=atol,
)
def test_checkpoint_save_1gpu(self):
"""Test loading checkpoint with one GPU"""
self.test_checkpoint(save_group_size=1)
def test_checkpoint_load_1gpu(self):
"""Test saving checkpoint with one GPU"""
self.test_checkpoint(load_group_size=1)
def test_checkpoint_bf16(self):
"""Test checkpoint with BF16 model"""
self.test_checkpoint(
rtol=5e-2,
atol=1e-5,
save_model_kwargs=dict(
model_dtype=torch.bfloat16,
optim_dtype=torch.float32,
param_sync_dtype=torch.bfloat16,
store_params=False,
store_param_remainders=True,
),
load_model_kwargs=dict(
model_dtype=torch.bfloat16,
optim_dtype=torch.float32,
param_sync_dtype=torch.bfloat16,
store_params=False,
store_param_remainders=True,
),
)
def test_bucket_low_utilization_warning(self):
"""Test warning when bucket utilization is low"""
layer_size = 2*1024*1024
num_layers = 4
fairish_bucket_cap_mb = 4*num_layers*layer_size/(1024*1024)
# Check that warning is raised when bucket utilization is low
with self.assertWarnsRegex(Warning, ".*Consider decreasing the bucket_cap_mb argument."):
self.test_matches_pytorch(
num_layers=num_layers,
layer_size=layer_size,
bucket_cap_mb=fairish_bucket_cap_mb * 2,
contiguous_buffers=True,
)
# Check that warning is not raised when bucket utilization is high
with warnings.catch_warnings(record=True) as warns:
self.test_matches_pytorch(
num_layers=num_layers,
layer_size=layer_size,
bucket_cap_mb=fairish_bucket_cap_mb,
contiguous_buffers=True,
)
for w in warns:
self.assertNotRegex(str(w.message), ".*Consider decreasing the bucket_cap_mb argument.")
if __name__ == "__main__":
# Assume script has been run with torchrun
common_utils.run_tests()
|
apex-master
|
apex/contrib/test/optimizers/test_dist_adam.py
|
import unittest
import torch
from torch.testing._internal import common_utils
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
SKIP_TEST = None
try:
from apex.contrib.bottleneck import Bottleneck, SpatialBottleneck
from apex.contrib.bottleneck import HaloExchangerPeer
from apex.contrib.peer_memory import PeerMemoryPool
except ImportError as e:
SKIP_TEST = e
def ground_truth_bottleneck(C, dtype, explicit_nhwc):
bottleneck = Bottleneck(C, C, C, use_cudnn=True, explicit_nhwc=explicit_nhwc)
bottleneck.to(dtype=dtype, device="cuda")
for p in bottleneck.parameters():
torch.distributed.broadcast(p, 0)
for b in bottleneck.buffers():
torch.distributed.broadcast(b, 0)
return bottleneck
def print_bottleneck_p_and_b(bottleneck):
with torch.no_grad():
for n, p in bottleneck.named_parameters():
print("%s :: %s" % (n, str(p.norm(p=2, dtype=torch.float32))))
for n, p in bottleneck.named_buffers():
print("%s :: %s" % (n, str(p.norm(p=2, dtype=torch.float32))))
def has_nan(x):
if isinstance(x, list) or isinstance(x, tuple):
for xx in x:
if torch.any(torch.isnan(xx)):
return True
return False
elif isinstance(x, dict):
for k, v in x.items():
if torch.any(torch.isnan(v)):
return True
else:
return torch.any(torch.isnan(x))
def rel_diff_t(xx1, xx2):
return ((xx1 - xx2).norm(p=2, dtype=torch.float32) / (xx1 + xx2).norm(p=2, dtype=torch.float32)).item()
def rel_diff(x1, x2):
if isinstance(x1, list) or isinstance(x1, tuple):
return [rel_diff_t(xx1, xx2) for xx1, xx2 in zip(x1, x2)]
elif isinstance(x1, dict):
return [rel_diff_t(xx1, xx2) for (k1, xx1), (k2, xx2) in zip(x1.items(), x2.items())]
else:
return rel_diff_t(x1, x2)
def graph_it(bottleneck, x):
print("Graphing")
with torch.no_grad():
x = x.clone()
x.grad = None
x.requires_grad = True
return torch.cuda.make_graphed_callables(bottleneck, (x,))
def clone_inputs(bottleneck, x, dy=None):
with torch.no_grad():
x = x.clone()
x.grad = None
x.requires_grad = True
if dy is None:
y = bottleneck(x)
dy = torch.randn_like(y) / 1e2
torch.distributed.broadcast(dy, 0)
return x, dy
def fprop_and_bprop(bottleneck, x, dy):
y = bottleneck(x)
y.backward(dy)
dgrad = x.grad.detach()
wgrad = {}
for n, p in bottleneck.named_parameters():
wgrad[n] = p.grad.detach()
return x, y, dy, dgrad, wgrad
def ground_truth(N, C, H, W, dtype, memory_format, bottleneck):
if memory_format == 1:
# 1 -> explicit nhwc
explicit_nhwc = True
with torch.no_grad():
x = torch.randn([N, H, W, C], dtype=dtype, device="cuda")
torch.distributed.broadcast(x, 0)
x, dy = clone_inputs(bottleneck, x)
return fprop_and_bprop(bottleneck, x, dy)
else:
# 2 -> native nhwc
# 3 -> nchw
explicit_nhwc = False
assert False, "Not implemented yet"
def print_ground_truth(gt):
x, y, dy, dgrad, wgrad = gt
if has_nan(y) or has_nan(dgrad) or has_nan(wgrad):
print("Error! Ground truth has NAN")
else:
print("Ok! No NAN found in ground truth")
def apply_to_different_bottleneck(gt, bottleneck):
with torch.no_grad():
x, _, dy, _, _ = gt
x, dy = clone_inputs(bottleneck, x, dy)
return fprop_and_bprop(bottleneck, x, dy)
def compare_single_field(results, f1, f2, l0, l1, l2):
if has_nan(f1) and has_nan(f2):
results[l0] = "both NAN"
elif has_nan(f1):
results[l0] = "%s.%s NAN" % (l1, l0)
elif has_nan(f2):
results[l0] = "%s.%s NAN" % (l2, l0)
else:
results[l0] = "%s" % (str(rel_diff(f1, f2)))
def compare(gt, bt):
x1, y1, dy1, dgrad1, wgrad1 = gt
x2, y2, dy2, dgrad2, wgrad2 = bt
results = {}
compare_single_field(results, y1, y2, "y", "gt", "bt")
compare_single_field(results, dy1, dy2, "dy", "gt", "bt")
compare_single_field(results, dgrad1, dgrad2, "dgrad", "gt", "bt")
compare_single_field(results, wgrad1, wgrad2, "wgrad", "gt", "bt")
for i in range(torch.distributed.get_world_size()):
if i == torch.distributed.get_rank():
print(i, results)
torch.distributed.barrier()
def spatial_parallel_bottleneck(C, dtype, explicit_nhwc, gt_bottleneck, spatial_parallel_args):
spatial_bottleneck = SpatialBottleneck(
C,
C,
C,
use_cudnn=True,
explicit_nhwc=explicit_nhwc,
spatial_parallel_args=spatial_parallel_args,
)
spatial_bottleneck.to(dtype=dtype, device="cuda")
with torch.no_grad():
sp = {}
for n, p in spatial_bottleneck.named_parameters():
sp[n] = p
for n, p in gt_bottleneck.named_parameters():
sp[n].copy_(p)
sb = {}
for n, b in spatial_bottleneck.named_buffers():
sb[n] = b
for n, b in gt_bottleneck.named_buffers():
sb[n].copy_(b)
return spatial_bottleneck
def n_way_spatial(halex, gt_bottleneck, gt, explicit_nhwc, world_size, rank, fp32_reduce=False):
assert explicit_nhwc, "Only tested for explicit nhwc"
x, _, dy, _, _ = gt
N, H, W, C = list(x.shape) # Tensor is already shaped properly for n-way parallel
dtype = x.dtype
spatial_group_size = world_size
spatial_group_rank = rank
spatial_communicator = None
spatial_halo_exchanger = halex
spatial_method = 1 # 1 -> overlap halo and main conv, 2 -> wait for halo, conv on padded x
use_delay_kernel = False
spatial_parallel_args = (
spatial_group_size,
spatial_group_rank,
spatial_communicator,
spatial_halo_exchanger,
spatial_method,
use_delay_kernel,
)
spatial_bottleneck = spatial_parallel_bottleneck(C, dtype, explicit_nhwc, gt_bottleneck, spatial_parallel_args)
with torch.no_grad():
Hs = H // spatial_group_size
xs = x[:, spatial_group_rank * Hs : (spatial_group_rank + 1) * Hs, :, :].clone()
dys = dy[:, spatial_group_rank * Hs : (spatial_group_rank + 1) * Hs, :, :].clone()
xs.requires_grad = True
spatial_bottleneck = graph_it(spatial_bottleneck, xs)
_, y, _, dgrad, wgrad = fprop_and_bprop(spatial_bottleneck, xs, dys)
# gather output pieces
for n, p in wgrad.items():
if fp32_reduce:
p32 = p.float()
torch.distributed.all_reduce(p32)
p.copy_(p32.half())
else:
torch.distributed.all_reduce(p)
ys = [torch.empty_like(y) for _ in range(spatial_group_size)]
torch.distributed.all_gather(ys, y)
y = torch.cat(ys, dim=1)
dgrads = [torch.empty_like(dgrad) for _ in range(spatial_group_size)]
torch.distributed.all_gather(dgrads, dgrad)
dgrad = torch.cat(dgrads, dim=1)
return x, y, dy, dgrad, wgrad
def main():
torch.use_deterministic_algorithms(True)
torch.distributed.init_process_group("nccl")
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(rank)
explicit_nhwc = True
dtype = torch.float16
N, C, H, W = 1, 64, 200, 336
Hs = ((H + 8 * world_size - 1) // (8 * world_size)) * 8
H = Hs * world_size
gt_bottleneck = ground_truth_bottleneck(C, dtype, explicit_nhwc)
gt = ground_truth(N, C, H, W, dtype, 1, gt_bottleneck)
# verify that spatial bottleneck with group_size 1 produces same results as ground truth bottleneck
spatial_bottleneck = spatial_parallel_bottleneck(C, dtype, explicit_nhwc, gt_bottleneck, None)
bt = apply_to_different_bottleneck(gt, spatial_bottleneck)
compare(gt, bt)
# print_bottleneck_p_and_b(gt_bottleneck)
# print_bottleneck_p_and_b(spatial_bottleneck)
group_size = world_size
group = rank // group_size
ranks = [group * group_size + i for i in range(group_size)]
rank_in_group = rank % group_size
spatial_group_size = world_size
spatial_communicator = None
peer_pool = PeerMemoryPool(0, 64 * 1024 * 1024, ranks)
# class HaloExchangerNoComm(HaloExchanger):
# def __init__(self, ranks, rank_in_group):
# class HaloExchangerAllGather(HaloExchanger):
# def __init__(self, ranks, rank_in_group, comm):
# class HaloExchangerSendRecv(HaloExchanger):
# def __init__(self, ranks, rank_in_group):
# class HaloExchangerPeer(HaloExchanger):
# def __init__(self, ranks, rank_in_group, peer_pool, explicit_nhwc, numSM=1):
# halex = HaloExchangerAllGather(ranks, rank_in_group)
# halex = HaloExchangerSendRecv(ranks, rank_in_group)
halex = HaloExchangerPeer(ranks, rank_in_group, peer_pool, explicit_nhwc, numSM=0)
# print("halex.signals = %s" % (str(halex.signals)))
# Make sure peer memory halo exchanger has finished initializing flags on all ranks before proceeding
# torch.cuda.synchronize()
# torch.distributed.barrier()
bt2 = n_way_spatial(halex, gt_bottleneck, gt, explicit_nhwc, world_size, rank, fp32_reduce=True)
compare(gt, bt2)
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestBottleneck(NcclDistributedTestBase):
# PyTorch's float16 tolerance values, see https://pytorch.org/docs/stable/testing.html#torch.testing.assert_close
fp16_tolerance = {"atol": 1e-5, "rtol": 1e-3}
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 2)
def test_bottleneck_without_peer_memory(self) -> None:
explicit_nhwc: bool = True
dtype: torch.dtype = torch.float16
N, C, H, W = 1, 64, 200, 336
Hs = ((H + 8 * self.world_size - 1) // (8 * self.world_size)) * 8
H = Hs * self.world_size
gt_bottleneck = ground_truth_bottleneck(C, dtype, explicit_nhwc)
gt = ground_truth(N, C, H, W, dtype, 1, gt_bottleneck)
spatial_bottleneck = spatial_parallel_bottleneck(C, dtype, explicit_nhwc, gt_bottleneck, None)
bt = apply_to_different_bottleneck(gt, spatial_bottleneck)
self.assertEqual(gt, bt, **self.fp16_tolerance)
def test_bottleneck_with_peer_memory(self) -> None:
explicit_nhwc: bool = True
dtype: torch.dtype = torch.float16
N, C, H, W = 1, 64, 200, 336
Hs = ((H + 8 * self.world_size - 1) // (8 * self.world_size)) * 8
H = Hs * self.world_size
gt_bottleneck = ground_truth_bottleneck(C, dtype, explicit_nhwc)
gt = ground_truth(N, C, H, W, dtype, 1, gt_bottleneck)
group = self.rank // self.world_size
ranks = [group * self.world_size + i for i in range(self.world_size)]
rank_in_group = self.rank % self.world_size
spatial_group_size, spatial_communicator = self.world_size, None
peer_pool = PeerMemoryPool(0, 64 * 1024 * 1024, ranks)
halo_exchanger_peer = HaloExchangerPeer(ranks, rank_in_group, peer_pool, explicit_nhwc, numSM=0)
bt2 = n_way_spatial(
halo_exchanger_peer, gt_bottleneck, gt, explicit_nhwc, self.world_size, self.rank, fp32_reduce=True
)
# TODO(crcrpar): Investigate the implementation to mitigate the numerical errors.
# NOTE(crcrpar): This assert often fails due to numerical errors.
# self.assertEqual(gt, bt2, **self.fp16_tolerance)
if __name__ == "__main__":
common_utils.run_tests()
|
apex-master
|
apex/contrib/test/bottleneck/test_bottleneck_module.py
|
apex-master
|
apex/contrib/test/bottleneck/__init__.py
|
|
apex-master
|
apex/contrib/test/conv_bias_relu/__init__.py
|
|
import copy
import math
import random
import unittest
import torch
import torch.nn.functional as F
HAS_CONV_BIAS_RELU = None
try:
from apex.contrib.conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU, ConvFrozenScaleBiasReLU
except ImportError as e:
HAS_CONV_BIAS_RELU = False
else:
HAS_CONV_BIAS_RELU = True
@unittest.skipIf(not HAS_CONV_BIAS_RELU, "`apex.contrib.conv_bias_relu` is not found.")
class FusedDenseTest(unittest.TestCase):
def setUp(self, seed=0):
super().setUp()
torch.manual_seed(seed)
self.batch_size = random.randint(1, 64)
self.in_channels = random.randint(1, 64) * 8
self.out_channels = random.randint(1, 64) * 8
self.in_height = self.in_width = random.randint(5, 100)
self.conv_kernel_size = random.randint(1, 5)
self.conv_pad = random.randint(0, int(self.conv_kernel_size / 2))
self.conv_stride = random.randint(1, 5)
self.conv_dilation = 1
self.out_height = self.out_width = \
math.floor((self.in_height + 2 * self.conv_pad - \
self.conv_dilation * (self.conv_kernel_size - 1) - 1) / self.conv_stride + 1)
self.x = torch.randint(low=-16, high=16,
size=[self.batch_size, self.in_channels, self.in_height, self.in_width]) \
.cuda().to(memory_format=torch.channels_last).float()
self.x_ = self.x.clone()
self.x.requires_grad_()
self.x_.requires_grad_()
self.mask = torch.randn([self.batch_size, self.out_channels, self.out_height, self.out_width]).cuda().to(memory_format=torch.channels_last)
self.mask = (self.mask > 0).to(torch.int8)
self.mask_ = self.mask.clone()
self.scale = torch.randn([1, self.out_channels, 1, 1]).half().cuda()
self.scale_ = self.scale.clone()
self.bias = torch.randn([1, self.out_channels, 1, 1]).half().cuda()
self.bias_ = self.bias.clone()
self.conv1 = torch.nn.Conv2d(self.in_channels, self.out_channels, self.conv_kernel_size,
stride=self.conv_stride, padding=self.conv_pad).cuda().to(memory_format=torch.channels_last)
self.conv1_ = copy.deepcopy(self.conv1)
self.conv2 = torch.nn.Conv2d(self.in_channels, self.out_channels, self.conv_kernel_size,
stride=self.conv_stride, padding=self.conv_pad, bias=False).cuda().to(memory_format=torch.channels_last)
self.conv2_ = copy.deepcopy(self.conv2)
print()
print('> input=[{}, {}, {}, {}]'.format(self.batch_size, self.in_channels, self.in_height, self.in_width))
print('> kernel=[{}, {}, {}, {}], stride={}, pad={}'.format(self.out_channels, self.in_channels,
self.conv_kernel_size, self.conv_kernel_size,
self.conv_stride, self.conv_pad))
def test_conv_bias_relu(self):
with torch.cuda.amp.autocast(dtype=torch.half):
out = ConvBiasReLU(self.x, self.conv1.weight, self.conv1.bias.reshape(1, -1, 1, 1), self.conv_pad, self.conv_stride)
loss = (out.float()**2).sum() / out.numel()
loss.backward()
with torch.cuda.amp.autocast(dtype=torch.half):
out_ = F.relu(self.conv1_(self.x_))
loss_ = (out_**2).sum() / out_.numel()
loss_.backward()
torch.testing.assert_close(out_, out, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.bias.grad, self.conv1.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.weight.grad, self.conv1.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.x_.grad, self.x.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
def test_conv_bias(self):
with torch.cuda.amp.autocast(dtype=torch.half):
out = ConvBias(self.x, self.conv1.weight, self.conv1.bias.reshape(1, -1, 1, 1), self.conv_pad, self.conv_stride)
loss = (out.float()**2).sum() / out.numel()
loss.backward()
with torch.cuda.amp.autocast(dtype=torch.half):
out_ = self.conv1_(self.x_)
loss_ = (out_**2).sum() / out_.numel()
loss_.backward()
torch.testing.assert_close(out, out_, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.bias.grad, self.conv1.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.weight.grad, self.conv1.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.x_.grad, self.x.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
def test_conv_bias_mask_relu(self):
with torch.cuda.amp.autocast(dtype=torch.half):
out = ConvBiasMaskReLU(self.x, self.conv1.weight, self.conv1.bias.reshape(1, -1, 1, 1), self.mask, self.conv_pad, self.conv_stride)
loss = (out.float()**2).sum() / out.numel()
loss.backward()
with torch.cuda.amp.autocast(dtype=torch.half):
out_ = F.relu(self.conv1_(self.x_) * self.mask_)
loss_ = (out_**2).sum() / out_.numel()
loss_.backward()
torch.testing.assert_close(out, out_, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.bias.grad, self.conv1.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.weight.grad, self.conv1.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.x_.grad, self.x.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
def test_conv_frozen_scale_bias_relu(self):
with torch.cuda.amp.autocast(dtype=torch.half):
out = ConvFrozenScaleBiasReLU(self.x, self.conv2.weight, self.scale, self.bias, self.conv_pad, self.conv_stride)
loss = (out.float()**2).sum() / out.numel()
loss.backward()
with torch.cuda.amp.autocast(dtype=torch.half):
out_ = F.relu(self.conv2_(self.x_) * self.scale_ + self.bias_)
loss_ = (out_**2).sum() / out_.numel()
loss_.backward()
torch.testing.assert_close(out, out_, atol=2.5e-3, rtol=2.5e-3, equal_nan=True)
torch.testing.assert_close(self.conv2_.weight.grad, self.conv2.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.x_.grad, self.x.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
if __name__ == '__main__':
unittest.main()
|
apex-master
|
apex/contrib/test/conv_bias_relu/test_conv_bias_relu.py
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnNormAddTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=True, impl="default"
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=True, impl="fast"
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_self_multihead_attn_norm_add(self):
grads = torch.randn_like(self.tst_inputs)
for _ in range(0, 5):
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
|
apex-master
|
apex/contrib/test/multihead_attn/test_self_multihead_attn_norm_add.py
|
apex-master
|
apex/contrib/test/multihead_attn/__init__.py
|
|
import unittest
import torch
import torch.nn.functional as F
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import fast_mask_softmax_dropout_func
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class FusedSoftmaxTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.mask = (torch.randn(self.sequences, self.seq_length) > 0).cuda()
self.mask = self.mask.half() * -10000
self.ref_inputs = torch.randn(
self.heads * self.sequences,
self.seq_length,
self.seq_length,
dtype=torch.float16,
device=torch.device("cuda"),
).requires_grad_(True)
self.tst_inputs = self.ref_inputs.clone().detach().requires_grad_(True)
def test_fused_softmax(self):
grads = torch.randn_like(self.tst_inputs)
y_ref = self.ref_inputs.view(self.sequences, self.heads, self.seq_length, self.seq_length)
y_ref = y_ref + self.mask.unsqueeze(1).unsqueeze(2)
y_ref = y_ref.view(self.sequences * self.heads, self.seq_length, self.seq_length)
y_ref = F.softmax(y_ref, dim=-1)
y_ref = torch._fused_dropout(y_ref, 1.0)
y_tst = fast_mask_softmax_dropout_func(True, self.heads, self.tst_inputs, self.mask, True, 0.0)
y_ref[0].backward(grads)
y_tst.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(y_ref[0], y_tst, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
|
apex-master
|
apex/contrib/test/multihead_attn/test_mha_fused_softmax.py
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import EncdecMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class EncdecMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = EncdecMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=False, impl="default"
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs_q = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
self.ref_inputs_k = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = EncdecMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=False, impl="fast"
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs_q = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
self.tst_inputs_k = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_encdec_multihead_attn(self):
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs_q,
self.ref_inputs_k,
self.ref_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs_q,
self.tst_inputs_k,
self.tst_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
torch.testing.assert_close(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(self.ref_inputs_k, self.tst_inputs_k, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
with torch.no_grad():
ref_grads = torch.randn_like(ref_outputs)
tst_grads = ref_grads.clone()
ref_outputs.backward(ref_grads)
tst_outputs.backward(tst_grads)
torch.testing.assert_close(self.ref_inputs_q.grad, self.tst_inputs_q.grad, atol=1e-3, rtol=1e-3)
def test_encdec_multihead_attn_time_mask(self):
grads = torch.randn_like(self.tst_inputs_q)
time_mask_byte = torch.triu(
torch.ones(
self.tst_inputs_q.size(0), self.tst_inputs_k.size(0), device=torch.device("cuda"), dtype=torch.uint8
),
1,
)
time_mask_bool = time_mask_byte.to(torch.bool)
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs_q,
self.ref_inputs_k,
self.ref_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=time_mask_bool,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs_q,
self.tst_inputs_k,
self.tst_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=time_mask_byte,
is_training=True,
)
self.ref_inputs_q.backward(grads)
self.tst_inputs_q.backward(grads)
torch.testing.assert_close(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(self.ref_inputs_k, self.tst_inputs_k, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs_q.grad, self.tst_inputs_q.grad, atol=1e-3, rtol=1e-3)
def test_encdec_multihead_attn_pad_mask(self):
grads = torch.randn_like(self.tst_inputs_q)
pad_mask_byte = torch.tril(
torch.ones(
self.tst_inputs_k.size(1), self.tst_inputs_k.size(0), device=torch.device("cuda"), dtype=torch.uint8
),
1,
)
pad_mask_bool = pad_mask_byte.to(torch.bool)
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs_q,
self.ref_inputs_k,
self.ref_inputs_k,
key_padding_mask=pad_mask_bool,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs_q,
self.tst_inputs_k,
self.tst_inputs_k,
key_padding_mask=pad_mask_byte,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs_q.backward(grads)
self.tst_inputs_q.backward(grads)
torch.testing.assert_close(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(self.ref_inputs_k, self.tst_inputs_k, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs_q.grad, self.tst_inputs_q.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
|
apex-master
|
apex/contrib/test/multihead_attn/test_encdec_multihead_attn.py
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(
self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
include_norm_add=False,
separate_qkv_params=True,
mask_additive=True,
impl="default",
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(
self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
include_norm_add=False,
separate_qkv_params=True,
mask_additive=True,
impl="fast",
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_self_multihead_attn_additive_mask(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
|
apex-master
|
apex/contrib/test/multihead_attn/test_fast_self_multihead_attn_bias.py
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import EncdecMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class EncdecMultiheadAttnNormAddTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = EncdecMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=True, impl="default"
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs_q = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
self.ref_inputs_k = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = EncdecMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=True, impl="fast"
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs_q = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
self.tst_inputs_k = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_encdec_multihead_attn_norm_add(self):
grads = torch.randn_like(self.tst_inputs_q)
for _ in range(5):
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs_q,
self.ref_inputs_k,
self.ref_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs_q,
self.tst_inputs_k,
self.tst_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs_q.backward(grads)
self.tst_inputs_q.backward(grads)
torch.testing.assert_close(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(self.ref_inputs_k, self.tst_inputs_k, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs_q.grad, self.tst_inputs_q.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
|
apex-master
|
apex/contrib/test/multihead_attn/test_encdec_multihead_attn_norm_add.py
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=False, impl="default"
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=False, impl="fast"
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_self_multihead_attn(self):
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
with torch.no_grad():
ref_grads = torch.randn_like(self.tst_inputs)
tst_grads = ref_grads.clone()
ref_outputs.backward(ref_grads)
tst_outputs.backward(tst_grads)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
def test_self_multihead_attn_time_mask(self):
grads = torch.randn_like(self.tst_inputs)
time_mask_byte = torch.triu(
torch.ones(
self.tst_inputs.size(0), self.tst_inputs.size(0), device=torch.device("cuda"), dtype=torch.uint8
),
1,
)
time_mask_bool = time_mask_byte.to(torch.bool)
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=time_mask_bool,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=time_mask_byte,
is_training=True,
)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
def test_self_multihead_attn_pad_mask(self):
grads = torch.randn_like(self.tst_inputs)
pad_mask_byte = torch.tril(
torch.ones(
self.tst_inputs.size(1), self.tst_inputs.size(0), device=torch.device("cuda"), dtype=torch.uint8
),
1,
)
pad_mask_bool = pad_mask_byte.to(torch.bool)
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=pad_mask_bool,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=pad_mask_byte,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
|
apex-master
|
apex/contrib/test/multihead_attn/test_self_multihead_attn.py
|
apex-master
|
apex/contrib/test/group_norm/__init__.py
|
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are not permit-
# ted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import unittest
SKIP_TEST = None
try:
from apex.contrib.group_norm.group_norm import cuda_group_norm_nhwc_one_pass
from apex.contrib.group_norm.group_norm import cuda_group_norm_nhwc_two_pass
from apex.contrib.group_norm.group_norm import torch_group_norm
from apex.contrib.group_norm import GroupNorm
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class GroupNormTest(unittest.TestCase):
def setUp(self, seed=0):
super().setUp()
torch.manual_seed(seed)
def verify_group_norm(self,
tst_func,
N=32,
C=128,
H=256,
W=256,
G=32,
ref_func=torch_group_norm,
xdtype=torch.float16,
wdtype=torch.float32,
eps=1e-5,
memory_format=torch.channels_last,
device='cuda',
act=""):
# create data
x_shape = (N, C, H, W)
w_shape = (C,)
weight = torch.rand(w_shape,
dtype=wdtype,
device='cuda',
requires_grad=True)
bias = torch.rand(w_shape,
dtype=wdtype,
device='cuda',
requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=xdtype, device='cuda')
x = x.to(memory_format=memory_format)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
# forward pass
y_ref = ref_func(x, G, weight, bias, eps, act)
if tst_func is GroupNorm:
gn = GroupNorm(G, C, eps, device=device, dtype=wdtype, act=act)
with torch.no_grad():
gn.weight = torch.nn.Parameter(weight)
gn.bias = torch.nn.Parameter(bias)
y_tst = gn(x)
else:
y_tst = tst_func(x, G, weight, bias, eps, act)
# backward pass
y_ref.backward(dy, retain_graph=True)
dx_ref, dw_ref, db_ref = [t.grad.clone() for t in [x, weight, bias]]
x.grad.zero_()
weight.grad.zero_()
bias.grad.zero_()
y_tst.backward(dy, retain_graph=True)
if tst_func is GroupNorm:
dx_tst, dw_tst, db_tst = x.grad, gn.weight.grad, gn.bias.grad
else:
dx_tst, dw_tst, db_tst = [t.grad.clone() for t in [x, weight, bias]]
# compare
torch.testing.assert_close(y_tst, y_ref, atol=4e-2, rtol=0)
torch.testing.assert_close(dx_tst, dx_ref, atol=1e-2, rtol=0)
torch.testing.assert_close(dw_tst, dw_ref, atol=1e-2, rtol=0)
torch.testing.assert_close(db_tst, db_ref, atol=1e-2, rtol=0)
def test_fp16_one_pass_algo(self):
self.verify_group_norm(cuda_group_norm_nhwc_one_pass, act="")
def test_fp16_two_pass_algo(self):
self.verify_group_norm(cuda_group_norm_nhwc_two_pass, act="")
def test_fp16_one_pass_algo_with_swish(self):
self.verify_group_norm(cuda_group_norm_nhwc_one_pass, act="swish")
def test_fp16_two_pass_algo_with_swish(self):
self.verify_group_norm(cuda_group_norm_nhwc_two_pass, act="swish")
def test_bf16_one_pass_algo(self):
self.verify_group_norm(cuda_group_norm_nhwc_one_pass,
xdtype=torch.bfloat16,
act="")
def test_bf16_two_pass_algo(self):
self.verify_group_norm(cuda_group_norm_nhwc_two_pass,
xdtype=torch.bfloat16,
act="")
def test_bf16_one_pass_algo_with_swish(self):
self.verify_group_norm(cuda_group_norm_nhwc_one_pass,
xdtype=torch.bfloat16,
act="swish")
def test_bf16_two_pass_algo_with_swish(self):
self.verify_group_norm(cuda_group_norm_nhwc_two_pass,
xdtype=torch.bfloat16,
act="swish")
def test_fp32_one_pass_algo(self):
self.verify_group_norm(cuda_group_norm_nhwc_one_pass,
xdtype=torch.float32,
act="")
def test_fp32_two_pass_algo(self):
self.verify_group_norm(cuda_group_norm_nhwc_two_pass,
xdtype=torch.float32,
act="")
def test_fp32_one_pass_algo_with_swish(self):
self.verify_group_norm(cuda_group_norm_nhwc_one_pass,
xdtype=torch.float32,
act="swish")
def test_fp32_two_pass_algo_with_swish(self):
self.verify_group_norm(cuda_group_norm_nhwc_two_pass,
xdtype=torch.float32,
act="swish")
def test_group_norm_module(self):
self.verify_group_norm(GroupNorm, G=16, act="swish")
def test_16_groups(self):
sizes = [
[8, 2560, 16, 16],
[8, 1920, 32, 32],
[8, 1920, 16, 16],
[8, 2560, 8, 8],
]
for sz in sizes:
n, c, h, w = sz
self.verify_group_norm(GroupNorm,
N=n,
C=c,
H=h,
W=w,
G=16,
act="swish")
if __name__ == '__main__':
unittest.main()
|
apex-master
|
apex/contrib/test/group_norm/test_group_norm.py
|
import random
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.clip_grad import clip_grad_norm_
except ImportError as e:
SKIP_TEST = e
def make_params(
num_params,
sizes=[1,2,3,4,5],
num_dims=[1,2,3],
dtypes=[torch.float32],
devices=['cuda'],
make_copy=False,
):
"""Construct parameters with random configurations"""
# Construct parameters
params = []
for _ in range(num_params):
dims = [random.choice(sizes) for _ in range(random.choice(num_dims))]
dtype = random.choice(dtypes)
device = random.choice(devices)
p = torch.nn.Parameter(torch.randn(dims, dtype=dtype, device=device))
p.grad = torch.randn_like(p)
params.append(p)
# Copy parameters if needed
if make_copy:
params_copy = []
for p in params:
p_copy = p.clone().detach()
p_copy.grad = p.grad.clone().detach()
params_copy.append(p_copy)
return params, params_copy
else:
return params
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class ClipGradNormTest(unittest.TestCase):
def setUp(self, seed=1234):
super().setUp()
random.seed(seed)
torch.manual_seed(seed)
def test_matches_pytorch(
self,
num_params=41,
dtypes=[torch.float32, torch.float16, torch.float64],
devices=['cuda', 'cpu'],
max_norm=0.54321,
norm_type=2.0,
rtol=1e-3,
atol=1e-20,
):
"""Make sure PyTorch and Apex gradient clipping produce same results"""
# Construct identical sets of parameters
torch_params, apex_params = make_params(
num_params,
dtypes=dtypes,
devices=devices,
make_copy=True,
)
# Apply gradient clipping
torch_norm = torch.nn.utils.clip_grad_norm_(
torch_params,
max_norm,
norm_type=norm_type,
)
apex_norm = clip_grad_norm_(
apex_params,
max_norm,
norm_type=norm_type,
)
# Make sure PyTorch and Apex get same results
torch.testing.assert_close(
apex_norm, torch_norm,
rtol=rtol,
atol=atol,
check_dtype=False,
)
for torch_p, apex_p in zip(torch_params, apex_params):
torch.testing.assert_close(
apex_p, torch_p,
rtol=0,
atol=0,
) # Params should be unaffected
torch.testing.assert_close(
apex_p.grad, torch_p.grad,
rtol=rtol,
atol=atol,
)
def test_matches_pytorch_fp16(self):
self.test_matches_pytorch(num_params=11, dtypes=[torch.float16])
def test_matches_pytorch_fp32(self):
self.test_matches_pytorch(dtypes=[torch.float32], rtol=1e-6)
def test_matches_pytorch_fp64(self):
self.test_matches_pytorch(dtypes=[torch.float64], rtol=1e-15)
def test_matches_pytorch_cpu(self):
self.test_matches_pytorch(devices=['cpu'])
def test_matches_pytorch_infnorm(self):
self.test_matches_pytorch(norm_type=float('inf'))
def test_matches_pytorch_1norm(self):
self.test_matches_pytorch(norm_type=1.0)
def test_raises_on_mismatch(self):
# Construct different sets of parameters
torch_params, apex_params = make_params(7, make_copy=True)
with torch.no_grad():
torch_params[0].grad.view(-1)[0] = 1.23
apex_params[0].grad.view(-1)[0] = 3.21
# Apply gradient clipping
torch_norm = torch.nn.utils.clip_grad_norm_(
torch_params,
0.54321,
)
apex_norm = clip_grad_norm_(
apex_params,
0.54321,
)
# Make sure PyTorch and Apex get different results
self.assertRaises(
AssertionError,
torch.testing.assert_close,
apex_norm, torch_norm,
rtol=1e-3,
atol=1e-20,
check_dtype=False,
)
for torch_p, apex_p in zip(torch_params, apex_params):
self.assertRaises(
AssertionError,
torch.testing.assert_close,
apex_p.grad, torch_p.grad,
rtol=1e-3,
atol=1e-20,
)
def test_raises_on_nan(self):
params = make_params(5, num_dims=[1])
params[2].grad[-1] = float('NaN')
self.assertRaises(
RuntimeError, clip_grad_norm_, params, 1.0, error_if_nonfinite=True)
def test_raises_on_inf(self):
params = make_params(5, num_dims=[1])
params[2].grad[-1] = float('inf')
self.assertRaises(
RuntimeError, clip_grad_norm_, params, 1.0, error_if_nonfinite=True)
if __name__ == "__main__":
unittest.main()
|
apex-master
|
apex/contrib/test/clip_grad/test_clip_grad.py
|
apex-master
|
apex/contrib/test/clip_grad/__init__.py
|
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.transducer import TransducerJoint
from apex.contrib.transducer import _transducer_ref as transducer_ref
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TransducerJointTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
def gen_input(self, for_vector_kernel):
self.B = 4
T_min = 51
T_max = 101
U_min = 12
U_max = 25
if for_vector_kernel:
H = 512
else:
H = 509
dtype = torch.float16
device = "cuda"
self.f_tst = torch.randn((self.B, T_max, H), dtype=dtype, requires_grad=True, device=device)
self.g_tst = torch.randn((self.B, U_max, H), dtype=dtype, requires_grad=True, device=device)
self.h_grad = torch.randn(self.B, T_max, U_max, H, dtype=dtype, device=device)
self.f_len = torch.randint(T_min, T_max+1, (self.B,), dtype=torch.int, device=device)
self.g_len = torch.randint(U_min, U_max+1, (self.B,), dtype=torch.int, device=device)
self.f_len[torch.randint(0, self.B, (1,)).item()] = T_max
self.g_len[torch.randint(0, self.B, (1,)).item()] = U_max
self.dropout_prob = 0.5
# Make sure gradients from out-of-bound locations are zero. This should be guaranteed by
# the loss function
for b in range(self.B):
self.h_grad[b, self.f_len[b]:, :, :] = 0
self.h_grad[b, :, self.g_len[b]:, :] = 0
self.h_grad_packed = self._pack(self.h_grad, self.f_len, self.g_len)
def _pack(self, x, f_len, g_len):
B = x.size(0)
list_x = []
for b in range(B):
list_x_row = [x[b, t, :g_len[b]] for t in range(f_len[b])]
x_row = torch.cat(list_x_row)
list_x.append(x_row)
x_packed = torch.cat(list_x).data.clone()
x_packed.requires_grad = True
batch_offset = torch.cumsum(f_len * g_len, dim=0)
return x_packed
def _unpack(self, x, f_len, g_len):
batch_offset = torch.cumsum(f_len * g_len, dim=0)
x_unpacked = torch.zeros_like(self.h_grad, dtype=torch.uint8)
B = self.h_grad.size(0)
H = self.h_grad.size(-1)
for b in range(B):
my_batch_offset = 0 if b == 0 else batch_offset[b-1]
my_f_len = f_len[b]
my_g_len = g_len[b]
for t in range(my_f_len):
x_unpacked[b, t, :my_g_len] = x[my_batch_offset + t*my_g_len :
my_batch_offset + t*my_g_len + my_g_len]
return x_unpacked
def run_transducer_joint(self, for_vector_kernel, pack_output, relu, dropout):
self.gen_input(for_vector_kernel=for_vector_kernel)
# Generate reference
f_ref = self.f_tst.data.clone()
g_ref = self.g_tst.data.clone()
f_ref.requires_grad = True
g_ref.requires_grad = True
my_joint = TransducerJoint(pack_output=pack_output, relu=relu, dropout=dropout,
dropout_prob=self.dropout_prob, probe_mask=True)
if not pack_output:
h_tst = my_joint( f=self.f_tst,
g=self.g_tst,
f_len=self.f_len,
g_len=self.g_len)
h_tst.backward(self.h_grad)
if dropout:
mask = my_joint.mask_probe[0]
else:
batch_offset = torch.cumsum(self.f_len * self.g_len, dim=0)
h_tst = my_joint( f=self.f_tst,
g=self.g_tst,
f_len=self.f_len,
g_len=self.g_len,
batch_offset=batch_offset,
packed_batch=batch_offset[-1])
h_tst.backward(self.h_grad_packed)
if dropout:
mask_packed = my_joint.mask_probe[0]
mask = self._unpack(mask_packed, self.f_len, self.g_len)
# reference
h_ref, f_grad_ref, g_grad_ref \
= transducer_ref.transducer_joint_reference(f=f_ref,
g=g_ref,
h_grad=self.h_grad,
f_len=self.f_len,
g_len=self.g_len,
pack_output=pack_output,
relu=relu,
dropout=dropout,
dropout_prob=self.dropout_prob,
mask=mask if dropout else None)
f_grad_tst = self.f_tst.grad
g_grad_tst = self.g_tst.grad
self.assertTrue(torch.allclose(h_ref, h_tst, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(f_grad_ref, f_grad_tst, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(g_grad_ref, g_grad_tst, atol=1e-4, rtol=1e-4))
def test_transducer_joint(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=True, relu=False, dropout=False)
def test_transducer_joint_vec(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=False, relu=False, dropout=False)
def test_transducer_joint_pack(self):
self.run_transducer_joint(for_vector_kernel=False, pack_output=True, relu=False, dropout=False)
def test_transducer_joint_vec_pack(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=True, relu=False, dropout=False)
def test_transducer_joint_relu(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=True, relu=True, dropout=False)
def test_transducer_joint_vec_relu(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=False, relu=True, dropout=False)
def test_transducer_joint_pack_relu(self):
self.run_transducer_joint(for_vector_kernel=False, pack_output=True, relu=True, dropout=False)
def test_transducer_joint_vec_pack_relu(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=True, relu=True, dropout=False)
@unittest.expectedFailure
def test_transducer_joint_relu_dropout(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=True, relu=True, dropout=True)
@unittest.expectedFailure
def test_transducer_joint_vec_relu_dropout(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=False, relu=True, dropout=True)
@unittest.expectedFailure
def test_transducer_joint_pack_relu_dropout(self):
self.run_transducer_joint(for_vector_kernel=False, pack_output=True, relu=True, dropout=True)
@unittest.expectedFailure
def test_transducer_joint_vec_pack_relu_dropout(self):
self.run_transducer_joint(for_vector_kernel=True, pack_output=True, relu=True, dropout=True)
if __name__ == '__main__':
unittest.main()
|
apex-master
|
apex/contrib/test/transducer/test_transducer_joint.py
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.transducer import TransducerLoss
from apex.contrib.transducer import _transducer_ref as transducer_ref
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TransducerLossTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
def gen_input(self, scalar_t, for_vector_kernel):
self.B = 5
T_min = 23
T_max = 51
U_min = 12
U_max = 25
V = 16 if for_vector_kernel else 14
self.blank_idx = V - 1
device = "cuda"
self.x_tst = torch.randn((self.B, T_max, U_max, V), dtype=scalar_t, requires_grad=True,
device=device)
self.y = torch.randint(0, self.blank_idx, (self.B, U_max-1), dtype=torch.int, device=device)
self.f_len = torch.randint(T_min, T_max+1, (self.B,), dtype=torch.int, device=device)
self.y_len = torch.randint(U_min-1, U_max, (self.B,), dtype=torch.int, device=device)
self.f_len[torch.randint(0, self.B, (1,)).item()] = T_max
self.y_len[torch.randint(0, self.B, (1,)).item()] = U_max-1
self.x_tst_packed, self.batch_offset = self._pack(self.x_tst)
# Generate reference
x_ref = self.x_tst.data.clone()
x_ref.requires_grad = True
loss_grad = torch.ones(x_ref.size(0), dtype=x_ref.dtype, device=x_ref.device)/x_ref.size(0)
_, _, self.grad_ref, self.loss_ref \
= transducer_ref.transducer_loss_reference( x=x_ref,
label=self.y,
f_len=self.f_len,
y_len=self.y_len,
blank_idx=self.blank_idx,
loss_grad=loss_grad)
def _pack(self, x):
list_x = []
for b in range(self.B):
list_x_row = [x[b, t, : self.y_len[b]+1] for t in range(self.f_len[b])]
x_row = torch.cat(list_x_row)
list_x.append(x_row)
x_packed = torch.cat(list_x).data.clone()
x_packed.requires_grad = True
batch_offset = torch.cumsum(self.f_len * (self.y_len+1), dim=0)
return x_packed, batch_offset
def _unpack(self, x):
x_unpacked = torch.zeros(self.B, self.f_len.max(), self.y_len.max()+1, x.size(-1),
dtype=x.dtype, device=x.device)
for b in range(self.B):
my_batch_offset = 0 if b == 0 else self.batch_offset[b-1]
my_f_len = self.f_len[b]
my_g_len = self.y_len[b] + 1
for t in range(my_f_len):
for u in range(my_g_len):
x_unpacked[b, t, u] = x[my_batch_offset + t*my_g_len + u]
return x_unpacked
def run_transducer_loss(self, scalar_t, fuse_softmax_backward, packed_input, for_vector_kernel):
self.gen_input(scalar_t, for_vector_kernel)
my_loss = TransducerLoss( fuse_softmax_backward=fuse_softmax_backward,
packed_input=packed_input)
if not packed_input:
loss_tst = my_loss( x=self.x_tst,
label=self.y,
f_len=self.f_len,
y_len=self.y_len,
blank_idx=self.blank_idx)
loss_tst.mean().backward()
grad_tst = self.x_tst.grad
else:
loss_tst = my_loss( x=self.x_tst_packed,
label=self.y,
f_len=self.f_len,
y_len=self.y_len,
blank_idx=self.blank_idx,
batch_offset=self.batch_offset,
max_f_len=max(self.f_len))
loss_tst.mean().backward()
grad_tst_packed = self.x_tst_packed.grad
grad_tst = self._unpack(grad_tst_packed)
return loss_tst, grad_tst
def test_transducer_loss_fp32(self):
loss_tst, grad_tst = self.run_transducer_loss( scalar_t=torch.float32,
fuse_softmax_backward=False,
packed_input=False,
for_vector_kernel=False)
self.assertTrue(torch.allclose(self.loss_ref, loss_tst, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.grad_ref, grad_tst, atol=1e-5, rtol=1e-5))
def test_transducer_loss_fp16(self):
loss_tst, grad_tst = self.run_transducer_loss( scalar_t=torch.float16,
fuse_softmax_backward=False,
packed_input=False,
for_vector_kernel=False)
self.assertTrue(torch.allclose(self.loss_ref, loss_tst, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.grad_ref, grad_tst, atol=1e-4, rtol=1e-3))
def test_transducer_loss_fp16_backward_fusion(self):
loss_tst, grad_tst = self.run_transducer_loss( scalar_t=torch.float16,
fuse_softmax_backward=True,
packed_input=False,
for_vector_kernel=False)
self.assertTrue(torch.allclose(self.loss_ref, loss_tst, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.grad_ref, grad_tst, atol=1e-4, rtol=1e-3))
def test_transducer_loss_fp16_backward_fusion_packed(self):
loss_tst, grad_tst = self.run_transducer_loss( scalar_t=torch.float16,
fuse_softmax_backward=True,
packed_input=True,
for_vector_kernel=False)
self.assertTrue(torch.allclose(self.loss_ref, loss_tst, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.grad_ref, grad_tst, atol=1e-4, rtol=1e-3))
def test_transducer_loss_fp16_backward_fusion_packed_vec(self):
loss_tst, grad_tst = self.run_transducer_loss( scalar_t=torch.float16,
fuse_softmax_backward=True,
packed_input=True,
for_vector_kernel=True)
self.assertTrue(torch.allclose(self.loss_ref, loss_tst, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.grad_ref, grad_tst, atol=1e-4, rtol=1e-3))
if __name__ == '__main__':
unittest.main()
|
apex-master
|
apex/contrib/test/transducer/test_transducer_loss.py
|
apex-master
|
apex/contrib/test/transducer/__init__.py
|
|
import unittest
import torch
from torch.testing._internal import common_utils
SKIP_TEST = None
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
try:
from apex.contrib.peer_memory import PeerMemoryPool, PeerHaloExchanger1d
except ImportError as e:
SKIP_TEST = e
# How to run:
# python /path/to/test_peer_halo_exchange_module.py
# Output of this function is used as ground truth in module tests.
def nccl_halo_ex(peer_rank, peer_group_size, y, half_halo, explicit_nhwc, H_split):
if explicit_nhwc:
if H_split:
_, Hp, _, _ = list(y.shape)
H = Hp - 2 * half_halo
top_out_halo = y[:, half_halo : 2 * half_halo, :, :]
top_inp_halo = y[:, :half_halo, :, :]
btm_out_halo = y[:, H : H + half_halo, :, :]
btm_inp_halo = y[:, H + half_halo : H + 2 * half_halo, :, :]
else:
_, _, Wp, _ = list(y.shape)
W = Wp - 2 * half_halo
top_out_halo = y[:, :, half_halo : 2 * half_halo, :]
top_inp_halo = y[:, :, :half_halo, :]
btm_out_halo = y[:, :, W : W + half_halo, :]
btm_inp_halo = y[:, :, W + half_halo : W + 2 * half_halo, :]
else:
if H_split:
_, _, Hp, _ = list(y.shape)
H = Hp - 2 * half_halo
top_out_halo = y[:, :, half_halo : 2 * half_halo, :]
top_inp_halo = y[:, :, :half_halo, :]
btm_out_halo = y[:, :, H : H + half_halo, :]
btm_inp_halo = y[:, :, H + half_halo : H + 2 * half_halo, :]
else:
_, _, _, Wp = list(y.shape)
W = Wp - 2 * half_halo
top_out_halo = y[:, :, :, half_halo : 2 * half_halo]
top_inp_halo = y[:, :, :, :half_halo]
btm_out_halo = y[:, :, :, W : W + half_halo]
btm_inp_halo = y[:, :, :, W + half_halo : W + 2 * half_halo]
mf = torch.channels_last if y.is_contiguous(memory_format=torch.channels_last) else torch.contiguous_format
top_out_halo = top_out_halo.contiguous()
btm_out_halo = btm_out_halo.contiguous()
top_inp_halos = [torch.empty_like(top_out_halo) for _ in range(peer_group_size)]
torch.distributed.all_gather(top_inp_halos, top_out_halo)
btm_inp_halos = [torch.empty_like(btm_out_halo) for _ in range(peer_group_size)]
torch.distributed.all_gather(btm_inp_halos, btm_out_halo)
top_rank = (peer_rank + peer_group_size - 1) % peer_group_size
btm_rank = (peer_rank + 1) % peer_group_size
if peer_rank == 0:
top_inp_halo.zero_()
else:
top_inp_halo.copy_(btm_inp_halos[top_rank].to(memory_format=mf))
if peer_rank == peer_group_size - 1:
btm_inp_halo.zero_()
else:
btm_inp_halo.copy_(top_inp_halos[btm_rank].to(memory_format=mf))
def single_test(
peer_rank,
peer_group_size,
halo_ex,
C,
H,
W,
half_halo,
dtype,
memory_format,
H_split,
num_steps,
numSM=1,
):
if memory_format == 1:
# 1 -> explicit nhwc
explicit_nhwc = True
if H_split:
y = torch.randn([1, H + 2 * half_halo, W, C], dtype=dtype, device="cuda")
ym = y[:, half_halo : H + half_halo, :, :]
else:
y = torch.randn([1, H, W + 2 * half_halo, C], dtype=dtype, device="cuda")
ym = y[:, :, half_halo : W + half_halo, :]
else:
# 2 -> native nhwc
# 3 -> nchw
explicit_nhwc = False
if H_split:
y = torch.randn([1, C, H + 2 * half_halo, W], dtype=dtype, device="cuda")
if memory_format == 2:
y = y.to(memory_format=torch.channels_last)
ym = y[:, :, half_halo : H + half_halo, :]
else:
y = torch.randn([1, C, H, W + 2 * half_halo], dtype=dtype, device="cuda")
if memory_format == 2:
y = y.to(memory_format=torch.channels_last)
ym = y[:, :, :, half_halo : W + half_halo]
y3 = y.clone()
list_y = []
for step in range(num_steps):
halo_ex(y, H_split, explicit_nhwc, numSM)
list_y.append(y.clone())
y.copy_(y3)
halo_ex.peer_pool.reset()
torch.distributed.barrier()
y2 = y3.clone()
list_y2 = []
for step in range(num_steps):
nccl_halo_ex(peer_rank, peer_group_size, y2, half_halo, explicit_nhwc, H_split)
list_y2.append(y2.clone())
y2.copy_(y3)
if memory_format == 1:
memory_format_str = "explicit_nhwc"
elif memory_format == 2:
memory_format_str = "native nhwc"
elif memory_format == 3:
memory_format_str = "nchw"
else:
memory_format_str = "???"
torch.testing.assert_close(list_y, list_y2, msg=memory_format_str)
# is_equal = [torch.all(torch.eq(yy, yy2)) for yy, yy2 in zip(list_y, list_y2)]
# is_equal = torch.tensor(is_equal, dtype=torch.bool)
# is_equal = torch.all(is_equal)
# if peer_rank == 0:
# if is_equal:
# print(
# "SUCCESS : N,C,H,W = 1,%d,%d,%d, half_halo=%d, %s, %s, %s"
# % (
# C,
# H,
# W,
# half_halo,
# str(dtype),
# memory_format_str,
# "H-split" if H_split else "W-split",
# )
# )
# else:
# print(
# "FAILURE : N,C,H,W = 1,%d,%d,%d, half_halo=%d, %s, %s, %s"
# % (
# C,
# H,
# W,
# half_halo,
# str(dtype),
# memory_format_str,
# "H-split" if H_split else "W-split",
# )
# )
#
# peer memory flag sync relies on there being at least one barrier per step
# torch.distributed.barrier()
def H_split_tests(N, C, H, W, half_halo, rank, world_size, halo_ex, num_steps):
Hr = 8 * world_size
Hp = ((H + Hr - 1) // Hr) * 8
for i in range(4):
div = int(pow(2, i))
single_test(
rank,
world_size,
halo_ex,
C * div,
Hp // div,
W // div,
half_halo,
torch.float16,
1,
True,
num_steps,
)
single_test(
rank,
world_size,
halo_ex,
C * div,
Hp // div,
W // div,
half_halo,
torch.float16,
2,
True,
num_steps,
)
single_test(
rank,
world_size,
halo_ex,
C * div,
Hp // div,
W // div,
half_halo,
torch.float16,
3,
True,
num_steps,
)
def W_split_tests(N, C, H, W, half_halo, rank, world_size, halo_ex, num_steps):
Wr = 8 * world_size
Wp = ((W + Wr - 1) // Wr) * 8
for i in range(4):
div = int(pow(2, i))
single_test(
rank,
world_size,
halo_ex,
C * div,
H // div,
Wp // div,
half_halo,
torch.float16,
1,
False,
num_steps,
)
single_test(
rank,
world_size,
halo_ex,
C * div,
H // div,
Wp // div,
half_halo,
torch.float16,
2,
False,
num_steps,
)
single_test(
rank,
world_size,
halo_ex,
C * div,
H // div,
Wp // div,
half_halo,
torch.float16,
3,
False,
num_steps,
)
def main():
# for this trivial example peer_rank == rank and peer_group_size == world_size
torch.distributed.init_process_group("nccl")
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(rank)
peer_ranks = [i for i in range(world_size)]
pool = PeerMemoryPool(0, 2 * 1024 * 1024, peer_ranks)
num_steps = 100
half_halo = 1
halo_ex = PeerHaloExchanger1d(peer_ranks, rank, pool, half_halo)
H_split_tests(1, 64, 336, 200, half_halo, rank, world_size, halo_ex, num_steps)
W_split_tests(1, 64, 200, 336, half_halo, rank, world_size, halo_ex, num_steps)
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestPeerMemory(NcclDistributedTestBase):
HALF_HALO = 1
NUM_STEPS = 100
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 2)
# TODO(crcrpar): Check if `world_size` being multiple of 2 is must.
def _check_world_size_and_may_skip(self) -> None:
if not (self.world_size >= 2 and self.world_size % 2 == 0):
self.skipTest(f"world_size is expected to be a multiple of 2 but, {self.world_size}")
def get_halo_excnahger_1d(self):
peer_ranks = [i for i in range(self.world_size)]
pool = PeerMemoryPool(64 * 1024, 2 * 1024 * 1024, peer_ranks)
halo_exchanger_1d = PeerHaloExchanger1d(peer_ranks, self.rank, pool, TestPeerMemory.HALF_HALO)
return halo_exchanger_1d
def test_height_split(self):
self._check_world_size_and_may_skip()
H_split_tests(
1,
64,
336,
200,
TestPeerMemory.HALF_HALO,
self.rank,
self.world_size,
self.get_halo_excnahger_1d(),
TestPeerMemory.NUM_STEPS,
)
def test_width_split(self):
self._check_world_size_and_may_skip()
W_split_tests(
1,
64,
200,
336,
TestPeerMemory.HALF_HALO,
self.rank,
self.world_size,
self.get_halo_excnahger_1d(),
TestPeerMemory.NUM_STEPS,
)
if __name__ == "__main__":
common_utils.run_tests()
|
apex-master
|
apex/contrib/test/peer_memory/test_peer_halo_exchange_module.py
|
apex-master
|
apex/contrib/test/peer_memory/__init__.py
|
|
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###############################################################################
import math
import unittest
import torch
import numpy as np
SKIP_TEST = None
try:
import fmhalib as mha
except ImportError as e:
SKIP_TEST = e
def _get_device_properties(device = torch.device("cuda")):
# type: (str or torch.device) -> Tuple[int, int]
properties = torch.cuda.get_device_properties(device)
return properties.major, properties.minor
def py_mha(qkv, amask, b, s, h, d):
qkv = qkv.view(b, s, h, 3, d)
q = qkv[:, :, :, 0, :].permute(0,2,1,3)
k = qkv[:, :, :, 1, :].permute(0,2,1,3)
v = qkv[:, :, :, 2, :].permute(0,2,1,3)
p = torch.matmul(q.float(), k.permute(0,1,3,2).float())
p_masked = p / math.sqrt(d) + (1.0 - amask) * -10000.0
s = torch.softmax(p_masked, -1).to(qkv.dtype)
ctx = torch.matmul(s, v)
ctx = ctx.permute(0,2,1,3).contiguous()
ctx.retain_grad()
return ctx
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
@unittest.skipIf(not _get_device_properties() == (8, 0), "FMHA only supports sm80")
class TestFMHA(unittest.TestCase):
def run_test(self, s: int, b: int, zero_tensors: bool):
print(f'Test s={s} b={b}, zero_tensors={zero_tensors}')
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
dtype = torch.float16
device = torch.device('cuda')
h = 16
d = 64
slens = [s] * b
a = torch.tensor(np.array([0] + slens), dtype=torch.int32)
amask = torch.ones(b,h,s,s, dtype=dtype, device=device)
seqlens = torch.tensor(slens, dtype=torch.int32, device=device)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=device)
total = cu_seqlens[-1].item()
qkv = torch.randn((b,s,h,3,d), device=device, dtype=dtype)
qkv_vs = qkv.permute(0,1,3,2,4).contiguous().view(b*s, 3, h,d)
qkv.requires_grad = True
if b < 4:
ctx, S_ = mha.fwd(qkv_vs, cu_seqlens, 0.0, s, True, True, zero_tensors, None)
else:
ctx, S_ = mha.fwd(qkv_vs, cu_seqlens, 0.0, s, True, False, zero_tensors, None)
ctx = ctx.view(b,s,h,d)
ctx_ref = py_mha(qkv, amask, b,s,h,d)
self.assertTrue(torch.allclose(ctx_ref.float(), ctx.float(), atol=1e-3))
labels = torch.randn_like(ctx_ref)
diff = ctx_ref - labels
l = (diff * diff).sum() / b
l.backward()
dw = ctx_ref.grad.permute(0,2,1,3)
dw2 = dw.permute(0,2,1,3).clone().detach().contiguous()
if b < 4:
dqkv2, _, _ = mha.bwd_nl(dw2, qkv_vs, S_, cu_seqlens, 0.0, s, zero_tensors)
else:
dqkv2, _ = mha.bwd(dw2, qkv_vs, S_, cu_seqlens, 0.0, s, zero_tensors)
dqkv2 = dqkv2.permute(0,2,1,3).view(b,s, h,3,d)
self.assertTrue(torch.allclose(qkv.grad.float(), dqkv2.float(), atol=1e-3))
def test_128(self):
self.run_test(128, 32, False)
self.run_test(128, 32, True)
self.run_test(128, 56, False)
self.run_test(128, 56, True)
def test_256(self):
self.run_test(256, 32, False)
self.run_test(256, 32, True)
self.run_test(256, 56, False)
self.run_test(256, 56, True)
def test_384(self):
self.run_test(384, 32, False)
self.run_test(384, 32, True)
self.run_test(384, 56, False)
self.run_test(384, 56, True)
def test_512(self):
self.run_test(512, 32, False)
self.run_test(512, 32, True)
self.run_test(512, 56, False)
self.run_test(512, 56, True)
self.run_test(512, 2, False)
self.run_test(512, 2, True)
self.run_test(512, 3, False)
self.run_test(512, 3, True)
if __name__ == '__main__':
unittest.main()
|
apex-master
|
apex/contrib/test/fmha/test_fmha.py
|
apex-master
|
apex/contrib/test/fmha/__init__.py
|
|
try:
import torch
import focal_loss_cuda
from .focal_loss import focal_loss
del torch
del focal_loss_cuda
del focal_loss
except ImportError as err:
print("apex was installed without --focal_loss flag, apex.contrib.focal_loss is not available")
|
apex-master
|
apex/contrib/focal_loss/__init__.py
|
import torch
import focal_loss_cuda
class FocalLoss(torch.autograd.Function):
@staticmethod
def forward(
ctx,
cls_output,
cls_targets_at_level,
num_positives_sum,
num_real_classes,
alpha,
gamma,
label_smoothing=0.0,
):
loss, partial_grad = focal_loss_cuda.forward(
cls_output,
cls_targets_at_level,
num_positives_sum,
num_real_classes,
alpha,
gamma,
label_smoothing,
)
ctx.save_for_backward(partial_grad, num_positives_sum)
return loss
@staticmethod
def backward(ctx, grad_loss):
partial_grad, num_positives_sum = ctx.saved_tensors
# The backward kernel is actually in-place to save memory space,
# partial_grad and grad_input are the same tensor.
grad_input = focal_loss_cuda.backward(grad_loss, partial_grad, num_positives_sum)
return grad_input, None, None, None, None, None, None
def focal_loss(
cls_output: torch.Tensor,
cls_targets_at_level: torch.Tensor,
num_positive_sum: torch.Tensor,
num_real_classes: int,
alpha: float,
gamma: float,
label_smoothing: float = 0.0,
) -> torch.Tensor:
"""Fused focal loss function."""
return FocalLoss.apply(
cls_output,
cls_targets_at_level,
num_positive_sum,
num_real_classes,
alpha,
gamma,
label_smoothing,
)
|
apex-master
|
apex/contrib/focal_loss/focal_loss.py
|
import torch
import xentropy_cuda
class SoftmaxCrossEntropyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False):
losses, max_log_sum_exp = xentropy_cuda.forward(
logits, labels, smoothing, half_to_float)
losses.masked_fill_(labels==padding_idx, 0)
ctx.save_for_backward(logits, max_log_sum_exp, labels,
torch.FloatTensor([smoothing]),
torch.LongTensor([padding_idx]))
return losses
@staticmethod
def backward(ctx, grad_loss):
logits, max_log_sum_exp, labels, smoothing, padding_idx = ctx.saved_tensors
if not grad_loss.is_contiguous():
grad_loss = grad_loss.contiguous()
grad_loss.masked_fill_(labels==padding_idx.item(), 0)
grad_logits = xentropy_cuda.backward(
grad_loss.contiguous(), logits, max_log_sum_exp,
labels, smoothing.item())
return grad_logits, None, None, None, None
|
apex-master
|
apex/contrib/xentropy/softmax_xentropy.py
|
from .softmax_xentropy import SoftmaxCrossEntropyLoss
__all__ = [
"SoftmaxCrossEntropyLoss",
]
|
apex-master
|
apex/contrib/xentropy/__init__.py
|
from .layer_norm import FastLayerNorm
|
apex-master
|
apex/contrib/layer_norm/__init__.py
|
import torch
from torch.nn import init
from apex._autocast_utils import _cast_if_autocast_enabled
import fast_layer_norm
class FastLayerNormFN(torch.autograd.Function):
@staticmethod
def forward(ctx, x, gamma, beta, epsilon):
x = x.contiguous()
gamma = gamma.contiguous()
beta = beta.contiguous()
hidden_size = gamma.numel()
xmat = x.view((-1, hidden_size))
ymat, mu, rsigma = fast_layer_norm.ln_fwd(xmat, gamma, beta, epsilon)
ctx.save_for_backward(x, gamma, mu, rsigma)
return ymat.view(x.shape)
@staticmethod
def backward(ctx, dy):
# assert dy.is_contiguous()
dy = dy.contiguous() # this happens!
x, gamma, mu, rsigma = ctx.saved_tensors
hidden_size = gamma.numel()
xmat = x.view((-1, hidden_size))
dymat = dy.view(xmat.shape)
dxmat, dgamma, dbeta, _, _ = fast_layer_norm.ln_bwd(dymat, xmat, mu, rsigma, gamma)
dx = dxmat.view(x.shape)
return dx, dgamma, dbeta, None
def _fast_layer_norm(x, weight, bias, epsilon):
args = _cast_if_autocast_enabled(x, weight, bias, epsilon)
with torch.cuda.amp.autocast(enabled=False):
return FastLayerNormFN.apply(*args)
class FastLayerNorm(torch.nn.Module):
def __init__(self, hidden_size, eps=1e-5):
super().__init__()
self.epsilon = eps
self.weight = torch.nn.Parameter(torch.empty(hidden_size))
self.bias = torch.nn.Parameter(torch.empty(hidden_size))
self.reset_parameters()
def reset_parameters(self):
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, x):
return _fast_layer_norm(x, self.weight, self.bias, self.epsilon)
|
apex-master
|
apex/contrib/layer_norm/layer_norm.py
|
import types
import torch
import importlib
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
use_mt (boolean, optional): use multi tensor apply for lower launch
latency. (default: False)
.. _Adam - A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params,
lr=1e-3, bias_correction = True,
betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt = False,
weight_decay=0., max_grad_norm=0., amsgrad=False, use_mt=False,
amp_scale_adjustment=1.0):
global fused_adam_cuda
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
self._use_multi_tensor = False
if use_mt:
if not multi_tensor_applier.available:
print("Warning: multi_tensor_applier is unavailable")
else:
self._use_multi_tensor = True
self._overflow_buf = torch.cuda.IntTensor([0])
self._amp_scale_adjustment = amp_scale_adjustment
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(FusedAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if hasattr(self, "_amp_stash"):
grads = self._amp_stash.grads
output_params = self._amp_stash.output_params
scale = self._amp_stash.scale*self._amp_scale_adjustment
grad_norms = self._amp_stash.grad_norms
if grads is None:
grads_group = [None]*len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0])!=list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
output_params_group = [None]*len(self.param_groups)
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0])!=list:
output_params_group = [output_params]
else:
output_params_group = output_params
if grad_norms is None:
grad_norms = [None]*len(self.param_groups)
for group, grads_this_group, output_params_this_group, grad_norm in zip(self.param_groups, grads_group, output_params_group, grad_norms):
if grads_this_group is None:
grads_this_group = [None]*len(group['params'])
if output_params_this_group is None:
output_params_this_group = [None]*len(group['params'])
# compute combined scale factor for this group
combined_scale = scale
if group['max_grad_norm'] > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
if clip > 1:
combined_scale = clip * scale
bias_correction = 1 if group['bias_correction'] else 0
if self._use_multi_tensor:
if output_params:
tensorlists = [[],[],[],[],[]]
else:
tensorlists = [[],[],[],[]]
tensordevice = None
for p, grad, output_param in zip(group['params'], grads_this_group, output_params_this_group):
#note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
out_p = torch.tensor([], dtype = torch.float) if output_param is None else output_param
if self._use_multi_tensor:
pl = [p.data, exp_avg, exp_avg_sq, grad]
if output_param is not None:
pl.append(out_p)
for tl, t in zip(tensorlists, pl):
tl.append(t)
if tensordevice is None:
tensordevice = p.device
elif tensordevice != p.device:
raise RuntimeError('FusedAdam does not support use_mt with tensors on multiple device')
else:
with torch.cuda.device(p.device):
fused_adam_cuda.adam(p.data,
out_p,
exp_avg,
exp_avg_sq,
grad,
group['lr'],
beta1,
beta2,
group['eps'],
combined_scale,
state['step'],
self.eps_mode,
bias_correction,
group['weight_decay'])
if self._use_multi_tensor:
with torch.cuda.device(tensordevice):
multi_tensor_applier(
fused_adam_cuda.adam_mt,
self._overflow_buf,
tensorlists,
group['lr'],
beta1,
beta2,
group['eps'],
combined_scale,
state['step'],
self.eps_mode,
bias_correction,
group['weight_decay'])
return loss
|
apex-master
|
apex/contrib/optimizers/fused_adam.py
|
from .fp16_optimizer import FP16_Optimizer
from .fused_adam import FusedAdam
from .fused_lamb import FusedLAMB
|
apex-master
|
apex/contrib/optimizers/__init__.py
|
import collections
import contextlib
from dataclasses import dataclass
import enum
import inspect
import io
import itertools
import threading
from typing import Any, Callable, Iterable, List, Optional, Set, Tuple, Union
import warnings
import torch
from torch.distributed.distributed_c10d import _get_default_group
from apex.multi_tensor_apply import multi_tensor_applier
import amp_C
import distributed_adam_cuda
# Fallback to private functions if using PyTorch <1.13.0
try:
from torch.distributed.distributed_c10d import get_global_rank
except ImportError:
from torch.distributed.distributed_c10d import _get_global_rank
get_global_rank = _get_global_rank
try:
from torch.distributed.distributed_c10d import reduce_scatter_tensor
except ImportError:
from torch.distributed.distributed_c10d import _reduce_scatter_base
reduce_scatter_tensor = _reduce_scatter_base
try:
from torch.distributed.distributed_c10d import all_gather_into_tensor
except ImportError:
from torch.distributed.distributed_c10d import _all_gather_base
all_gather_into_tensor = _all_gather_base
# Import context manager to coalesce NCCL calls
# Note: Replace these backward compatibility shims once PyTorch
# exposes a stable public API for coalescing communication.
from torch.distributed.distributed_c10d import _coalescing_manager
if "device" not in inspect.signature(_coalescing_manager).parameters:
# PyTorch <=1.13.1 does not have device arg
_coalescing_manager_no_device_arg = _coalescing_manager
@contextlib.contextmanager
def _coalescing_manager(group, device, reqs):
with _coalescing_manager_no_device_arg(group, reqs):
yield
if "reqs" in inspect.signature(_coalescing_manager).parameters:
# PyTorch <=2.0.1 handles synchronization externally to coalescing
# manager
_coalescing_manager_with_reqs_arg = _coalescing_manager
class _CoalescingManager:
def __init__(self):
self.works: List[torch.distributed.Work] = []
def append(self, work: torch.distributed.Work) -> None:
if work:
self.works.append(work)
def wait(self) -> None:
for work in self.works:
work.wait()
@contextlib.contextmanager
def _coalescing_manager(
group: Optional[torch.distributed.ProcessGroup] = None,
device: Optional[torch.device] = None,
async_ops: bool = False,
) -> contextlib.AbstractContextManager:
assert device is not None
cm = _CoalescingManager()
with _coalescing_manager_with_reqs_arg(
group,
device,
cm.works,
):
yield cm
if not async_ops:
cm.wait()
def _coalescing_manager_append_work(
cm: _CoalescingManager,
work: torch.distributed.Work,
) -> None:
"""Add asynchronous request to coalescing manager"""
cm.append(work)
else:
# PyTorch >2.0.1 handles synchronization within coalescing
# manager
def _coalescing_manager_append_work(
cm: torch.distributed._CoalescingManager,
work: torch.distributed.Work,
) -> None:
"""Dummy function for backward compatibility
Coalescing manager already keeps track of asynchronous
communication.
"""
pass
# Import optional CUDA kernels
_FOUND_DEPRECATED_FUSED_ADAM: bool = False
try:
import fused_adam_cuda
_FOUND_DEPRECATED_FUSED_ADAM = True
except ImportError:
warnings.warn(
"Could not find recommended CUDA kernels when importing "
"`DistributedFusedAdam`. "
"For best performance, Apex should be installed with "
"`--deprecated_fused_adam`."
)
def _round_to_multiple(
number: int,
multiple: int,
round_up: bool = True,
) -> int:
"""Assumes arguments are positive integers"""
return (number + multiple - 1 if round_up else number) // multiple * multiple
def _devices_match(device1: torch.device, device2: torch.device) -> bool:
"""Whether two PyTorch devices are equivalent"""
device1 = torch.device(device1)
device2 = torch.device(device2)
if device1.type != device2.type:
return False
if device1.type == "cuda":
index1 = device1.index
index2 = device2.index
if index1 is None:
index1 = torch.cuda.current_device()
if index2 is None:
index2 = torch.cuda.current_device()
if index1 != index2:
return False
return True
def _multi_tensor_copy(
buffers_in: List[torch.Tensor],
buffers_out: List[torch.Tensor],
dummy_overflow_buf: Optional[torch.Tensor] = None,
) -> None:
"""Copy between corresponding buffers
Uses fused copy kernel if possible.
"""
# Group buffers by device and dtype
buffer_groups = collections.defaultdict(list)
for buf_in, buf_out in zip(buffers_in, buffers_out):
if buf_in.data_ptr() == buf_out.data_ptr() or buf_in.numel() == 0:
# Nothing to be done if input and output buffers are same
# or have no entries
continue
if buf_in.dtype == buf_out.dtype:
# Just copy bytes if dtypes are same
buf_in = buf_in.view(torch.uint8)
buf_out = buf_out.view(torch.uint8)
key = (buf_in.is_cuda, buf_in.dtype, buf_out.is_cuda, buf_out.dtype)
buffer_groups[key].append((buf_in, buf_out))
# Copy each group of buffers
for key, buffers in buffer_groups.items():
# Check if buffers support fused kernel
is_cuda_in, dtype_in, is_cuda_out, dtype_out = key
supported_dtypes = (torch.float32, torch.float16)
use_fused_kernel = (
dtype_in in supported_dtypes and dtype_out in supported_dtypes
) or (dtype_in == torch.uint8 and dtype_out == torch.uint8)
use_fused_kernel = use_fused_kernel and is_cuda_in and is_cuda_out
# Copy buffers
if use_fused_kernel and _FOUND_DEPRECATED_FUSED_ADAM:
if dummy_overflow_buf is None:
dummy_overflow_buf = torch.zeros([1], dtype=torch.int32, device="cuda")
multi_tensor_applier(
fused_adam_cuda.maybe_cast_mt,
dummy_overflow_buf,
list(zip(*buffers)),
)
else:
for buf_in, buf_out in buffers:
buf_out.copy_(buf_in)
@contextlib.contextmanager
def _disable_pre_forward_hook(
param: torch.nn.Parameter,
) -> contextlib.AbstractContextManager:
"""Prevent parameter from calling pre-forward hook"""
hook_is_enabled = getattr(
param,
"_pre_forward_hook_is_enabled",
False,
)
if hook_is_enabled:
param._pre_forward_hook_is_enabled = False
try:
yield
finally:
if hook_is_enabled:
param._pre_forward_hook_is_enabled = True
@torch.no_grad()
def _bf16_rem_to_fp32(
bf16: torch.Tensor,
rem: torch.Tensor,
fp32: torch.Tensor,
) -> None:
"""Pack BF16 tensor and 16-bit remainders into FP32 tensor"""
# Check inputs
assert bf16.size() == rem.size() == fp32.size(), (
"Tensor dimensions do not match: "
f"bf16={list(bf16.size())}, "
f"rem={list(rem.size())}, "
f"fp32={list(fp32.size())}, "
)
assert bf16.dtype is torch.bfloat16, f"bf16 buffer has invalid dtype ({bf16.dtype})"
assert rem.dtype is torch.int16, f"rem buffer has invalid dtype ({rem.dtype})"
assert fp32.dtype is torch.float32, f"fp32 buffer has invalid dtype ({fp32.dtype})"
# Undo bf16 rounding
bf16 = bf16.view(torch.int16) - torch.where(rem < 0, 1, 0)
# Pack bf16 and remainder into little-endian fp32
fp32 = fp32.unsqueeze(-1).view(torch.int16)
fp32 = torch.stack((rem, bf16), dim=-1, out=fp32)
class DistributedFusedAdam(torch.optim.Optimizer):
"""Adam optimizer with ZeRO algorithm.
Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext --distributed_adam --deprecated_fused_adam``.
This implements the ZeRO-2 algorithm, which distributes the
optimizer state and gradients between parallel processes. In
particular, the parameters are flattened, grouped into fixed-size
buckets, and the optimizer state for each bucket is sharded over
the parallel processes. Options are provided to overlap the
gradient synchronization with the backward pass compute.
Adam was proposed in `Adam: A Method for Stochastic
Optimization`_, AdamW in `Decoupled Weight Decay Regularization`_,
and ZeRO in `ZeRO: Memory Optimizations Toward Training Trillion
Parameter Models`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts
defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
bias_correction (bool, optional): apply correction factor to
moment estimates. (default: True)
betas (Tuple[float, float], optional): coefficients used for
computing running averages of gradient and its square.
(default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
adam_w_mode (boolean, optional): Decouple weight decay
regularization (also known as AdamW algorithm) (default:
True)
weight_decay (float, optional): weight decay (L2 penalty)
(default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad
variant of this algorithm from the paper
`On the Convergence of Adam and Beyond`_ (default: False).
This is not yet supported.
dtype (torch.dtype, optional): datatype for optimizer state
(default: torch.float32)
grad_sync_dtype (torch.dtype, optional): datatype for gradient
synchronization (default: same as dtype)
param_sync_dtype (torch.dtype, optional): datatype for
parameter synchronization (default: same as dtype)
device (torch.device, optional): device for optimizer state
(default: cuda). Currently only supports GPU with one GPU
per process.
process_group (torch.distributed.ProcessGroup, optional):
parallel processes participating in optimizer (default:
default group in torch.distributed). This group is
interpreted as a 2D grid with dimensions
distributed_size x redundant_size.
distributed_process_group (torch.distributed.ProcessGroup,
optional): parallel processes to distribute optimizer
state over (default: same as process_group)
redundant_process_group (torch.distributed.ProcessGroup,
optional): parallel processes to replicate optimizer state
over (default: group only containing calling process)
average_grad_sync (bool, optional): whether to use average
reduction for gradient synchronization rather than sum
(default: True)
overlap_grad_sync (boolean, optional): whether to overlap
gradient synchronization with backward pass compute
(default: True)
overlap_param_sync (boolean, optional): whether to overlap
parameter synchronization with forward pass compute
(default: False). This is an experimental feature.
bucket_cap_mb (float, optional): bucket size in megabytes
(default: 100)
pipeline_size (int, optional): number of buckets to process
simultaneously in optimizer step (default: 2)
contiguous_param_buffer (bool, optional): convert parameters
into views into large persistent buffers (default: False).
This enables some performance optimizations (e.g. avoiding
some memory copies), but may add memory overhead (e.g. if
the memory allocator can't reuse the original parameter
buffers).
contiguous_grad_buffer (bool, optional): allocate gradient
buckets out of a large persistent buffers (default:
False). This allows individual parameter gradients to be
accessed externally (see grad_buffer_view function). It
enables some performance optimizations (e.g. avoiding some
memory copies), but prevents some memory optimizations
(e.g. the memory allocator can't reuse buffers for
gradient buckets).
store_params (bool, optional): store a distributed copy of the
parameters as optimizer state (default: True). This may be
desirable if the optimizer dtype has higher precision than
the parameter dtype.
store_param_remainders (bool, optional): if model is BF16 and
optimizer is FP32, store bits required to reconstruct FP32
params (default: False). This is an experimental feature.
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
.. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101
.. _ZeRO\: Memory Optimizations Toward Training Trillion Parameter Models:
https://arxiv.org/abs/1910.02054
"""
@dataclass
class ParameterFragment:
"""Buffer ranges for a parameter fragment
Describes corresponding regions in parameter buffer and
parameter bucket.
"""
# Parameter group index
param_group_id: int
# Parameter index within parameter group
param_id: int
# Bucket index
bucket_id: int
# Range within flattened parameter buffer
param_range: Tuple[int, int]
# Range within bucket
bucket_range: Tuple[int, int]
# Whether fragment is in local shard of bucket
in_local_shard: bool
# Range within local shard
shard_range: Optional[Tuple[int, int]]
# Range of local fragment shard within bucket
shard_bucket_range: Optional[Tuple[int, int]]
# Range of local fragment shard within parameter
shard_param_range: Optional[Tuple[int, int]]
class StateBucket:
"""Optimizer state for a bucket"""
def __init__(
self,
bucket_size: int,
shard_size: int,
dtype: torch.dtype,
device: torch.device,
grad_sync_dtype: torch.dtype,
param_sync_dtype: torch.dtype,
contiguous_buffer_offset: int = 0,
store_params: bool = False,
store_param_remainders: bool = False,
):
# Size of parameter bucket
self.bucket_size: int = bucket_size
# Size of local shard of parameter bucket
self.shard_size: int = shard_size
# Data type for state
self.dtype = dtype
# Data type for gradient synchronization
self.grad_sync_dtype = grad_sync_dtype
# Data type for parameter synchronization
self.param_sync_dtype = param_sync_dtype
# Size of the filled region in the bucket
self.filled_size: int = 0
# Is it able to continue filling
self.able_to_fill: bool = True
# Offset to bucket in contiguous buffers
self.contiguous_buffer_offset: int = contiguous_buffer_offset
# Buffer ranges corresponding to parameter fragments
self.fragments: List[ParameterFragment] = []
# Local shard of parameters
self.params_shard: Optional[torch.Tensor] = None
if store_params:
self.params_shard = torch.zeros(
[shard_size],
dtype=self.dtype,
device=device,
)
# Local shard of parameter remainders
self.param_remainders_shard: Optional[torch.Tensor] = None
if store_param_remainders:
self.param_remainders_shard = torch.zeros(
[shard_size],
dtype=torch.int16,
device=device,
)
# Local shard of first moment estimate
self.exp_avg_shard: torch.Tensor = torch.zeros(
[shard_size],
dtype=self.dtype,
device=device,
)
# Local shard of second moment estimate
self.exp_avg_sq_shard: torch.Tensor = torch.zeros(
[shard_size],
dtype=self.dtype,
device=device,
)
def dtypes(self) -> Tuple[torch.dtype, torch.dtype, torch.dtype]:
return (
self.dtype,
self.grad_sync_dtype,
self.param_sync_dtype,
)
class GradientStatus(enum.Enum):
"""Status of gradients within a bucket"""
# Gradients are ready to use
READY = enum.auto()
# Bucket is partially filled with unreduced gradients
PARTIALLY_FILLED = enum.auto()
# Bucket is fully filled with unreduced gradients
FULLY_FILLED = enum.auto()
# Asynchronous reduction is in progress
SYNCING = enum.auto()
class GradientBucket:
"""Gradient buffers and state for a bucket"""
def __init__(self):
# Local shard of gradients
self.grads_shard: Optional[torch.Tensor] = None
# Local contribution to gradients
self.grads_bucket: Optional[torch.Tensor] = None
# Buffer for gradient reduce-scatter
self.sync_grads_shard: Optional[torch.Tensor] = None
# Status of gradients
self.status: GradientStatus = DistributedFusedAdam.GradientStatus.READY
# Params that have generated grads
self.grads_generated: Set[torch.nn.Parameter] = set()
class ParameterStatus(enum.Enum):
"""Status of parameters within a bucket"""
# Parameters are sharded between processes
SHARDED = enum.auto()
# Asynchronous communication is in progress
SYNCING = enum.auto()
# Parameters are ready to use
READY = enum.auto()
class ParameterBucket:
"""Parameter buffers and state for a bucket"""
def __init__(self):
# Local shard of parameters
self.params_shard: Optional[torch.Tensor] = None
# Gathered parameter values
self.params_bucket: Optional[torch.Tensor] = None
# Status of parameters
self.status: ParameterStatus = DistributedFusedAdam.ParameterStatus.SHARDED
# Params that have been updated
self.params_updated: Set[torch.nn.Parameter] = set()
# Enable custom logic for AMP grad scaling
_step_supports_amp_scaling: bool = True
_custom_amp_unscale_grads: bool = True
def __init__(
self,
params: Union[Iterable[torch.nn.Parameter], Iterable[dict]],
lr: float = 1e-3,
bias_correction: bool = True,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
adam_w_mode: bool = True,
weight_decay: float = 0.0,
amsgrad: bool = False,
dtype: torch.dtype = torch.float32,
grad_sync_dtype: Optional[torch.dtype] = None,
param_sync_dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = "cuda",
process_group: Optional[torch.distributed.ProcessGroup] = None,
distributed_process_group: Optional[torch.distributed.ProcessGroup] = None,
redundant_process_group: Optional[torch.distributed.ProcessGroup] = None,
average_grad_sync: bool = True,
overlap_grad_sync: bool = True,
overlap_param_sync: bool = False,
bucket_cap_mb: float = 100.0,
pipeline_size: int = 2,
contiguous_param_buffer: bool = False,
contiguous_grad_buffer: bool = False,
store_params: bool = True,
store_param_remainders: bool = False,
):
defaults = dict(
lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
)
super().__init__(params, defaults)
# Adam options
self.adam_w_mode: bool = adam_w_mode
if amsgrad:
raise RuntimeError(
"DistributedFusedAdam does not support the AMSGrad variant."
)
# Datatype options
if grad_sync_dtype is None:
grad_sync_dtype = dtype
if param_sync_dtype is None:
param_sync_dtype = dtype
supported_dtypes = (torch.float32, torch.float16, torch.bfloat16)
if (
dtype not in supported_dtypes
or grad_sync_dtype not in supported_dtypes
or param_sync_dtype not in supported_dtypes
):
raise RuntimeError(
"Unsupported dtypes for DistributedFusedAdam "
f"(dtype={dtype}, "
f"grad_sync_dtype={grad_sync_dtype}, "
f"param_sync_dtype={param_sync_dtype}))"
)
self.dtype: torch.dtype = dtype
self.grad_sync_dtype: torch.dtype = grad_sync_dtype
self.param_sync_dtype: torch.dtype = param_sync_dtype
# Device options
if not _devices_match(device, "cuda"):
raise RuntimeError(
"Invalid device for DistributedFusedAdam " f"(device={device})"
)
self.device: torch.device = torch.device("cuda", torch.cuda.current_device())
# Process groups
self.process_group: torch.distributed.ProcessGroup = (
_get_default_group() if process_group is None else process_group
)
self.distributed_process_group: torch.distributed.ProcessGroup = (
self.process_group
if distributed_process_group is None
else distributed_process_group
)
self.redundant_process_group: Optional[
torch.distributed.ProcessGroup
] = redundant_process_group
self.process_group_size: int = torch.distributed.get_world_size(
self.process_group
)
self.distributed_rank: int = torch.distributed.get_rank(
self.distributed_process_group
)
self.distributed_size: int = torch.distributed.get_world_size(
self.distributed_process_group
)
self.redundant_size: int = (
1
if self.redundant_process_group is None
else torch.distributed.get_world_size(self.redundant_process_group)
)
if self.process_group_size != self.distributed_size * self.redundant_size:
raise RuntimeError(
"Invalid process group configuration "
f"(process group size = {self.process_group_size}, "
f"distributed process group size = {self.distributed_size}, "
f"redundant process group size = {self.redundant_size})"
)
self.process_group_root: int = get_global_rank(self.process_group, 0)
# Use average reduction for grad sync
self.average_grad_sync: bool = average_grad_sync
# Copy param grads to bucket as soon as available
self.greedy_grad_copy: bool = True
# Synchronize grad buckets as soon as their grads are available
self.overlap_grad_sync: bool = overlap_grad_sync
# Try synchronizing param buckets just before param is needed
self.overlap_param_sync: bool = overlap_param_sync
# Number of buckets to synchronize at a time
self.pipeline_size: int = pipeline_size
# Store params or param remainders
if store_param_remainders:
if store_params:
raise RuntimeError(
"Attempted to construct DistributedFusedAdam "
"with store_params=True and store_param_remainders=True"
)
if self.dtype != torch.float32 or self.param_sync_dtype != torch.bfloat16:
raise RuntimeError(
"DistributedFusedAdam requires "
"BF16 params and FP32 optimizer state "
"when storing parameter remainders "
f"(dtype={self.dtype}, "
f"param_sync_dtype={self.param_sync_dtype}))"
)
self.store_params: bool = store_params
self.store_param_remainders: bool = store_param_remainders
# Determine bucket sizes
dtype_size = torch.finfo(self.grad_sync_dtype).bits // 8
self.alignment: int = 128 // dtype_size
bucket_size = 1024 * 1024 * bucket_cap_mb / dtype_size
shard_size = int(bucket_size / self.distributed_size)
shard_size = _round_to_multiple(shard_size, self.alignment, round_up=False)
shard_size = max(shard_size, self.alignment)
self.default_shard_size: int = shard_size
# Optimizer state
self.state["buckets"]: List[StateBucket] = []
self.state["step"]: int = 0
# Gradient state
self._grads_buckets: Dict[int, GradientBucket] = collections.defaultdict(
self.GradientBucket
)
# Param state
self._params_buckets: Dict[int, ParameterBucket] = collections.OrderedDict()
# Whether to allocate contiguous buffers for parameters
self.contiguous_param_buffer: bool = contiguous_param_buffer
# Whether to allocate contiguous buffers for gradients
self.contiguous_grad_buffer: bool = contiguous_grad_buffer
# Contiguous buffers for parameters
self._param_buffers: Dict[
Tuple[torch.dtype, torch.dtype, torch.dtype], torch.Tensor
] = {}
# Contiguous buffers for gradients
self._grad_buffers: Dict[
Tuple[torch.dtype, torch.dtype, torch.dtype], torch.Tensor
] = {}
# Side streams for optimizer step and communication
self._pipeline_streams: List[torch.cuda.Stream] = [
torch.cuda.Stream() for _ in range(self.pipeline_size + 1)
]
# Scale by factor before optimizer step. Used for grad
# clipping and gradient scaler.
self._grad_scale: torch.Tensor = torch.full(
[], 1.0, dtype=torch.float32, device=self.device
)
# Norm of parameter gradients. Used for gradient clipping and
# gradient scaler.
self._grad_norm: Optional[torch.Tensor] = None
# Dummy flag for multi-tensor kernels
# Note: Apex multi-tensor kernels have a noop_flag argument
# that is intended to detect non-finite values. It shouldn't
# have any effect with the kernels used in the optimizer, but
# we still set it to zero out of an abundance of caution.
self._dummy_overflow_buf: torch.Tensor = torch.zeros(
[1], dtype=torch.int32, device=self.device
)
# Check if collectives have no_copy option
self._gather_no_copy: bool = (
"no_copy" in inspect.getfullargspec(torch.distributed.gather).args
)
# Make sure parameter values are same across processes
self._broadcast_params()
# Lock for callbacks
self._lock: threading.Lock = threading.Lock()
# Attach hooks for gradient synchronization
self._register_post_backward_hooks()
# Attach hooks for param synchronization
if self.overlap_param_sync:
self._register_pre_forward_hooks()
def _broadcast_params(self) -> None:
"""Broadcast parameter values from root rank"""
process_group = self.process_group
with _coalescing_manager(process_group, self.device, async_ops=True) as cm:
for param_group in self.param_groups:
for param in param_group["params"]:
_coalescing_manager_append_work(
cm,
torch.distributed.broadcast(
param,
src=self.process_group_root,
group=process_group,
async_op=True,
),
)
cm.wait()
def _make_post_backward_hook(
self,
param: torch.nn.Parameter,
param_group_id: int,
param_id: int,
) -> Callable:
"""Create callback function to call after param generates grad
Lazily initialize parameter and try launching grad sync.
"""
def post_backward_hook(*unused) -> None:
if getattr(param, "_pre_forward_hook_is_enabled", False):
raise RuntimeError(
"A parameter called its post-backward hook "
"before its pre-forward hook. "
"Please manually interact with the parameter "
"before the forward pass (e.g. by calling data_ptr) "
"or run DistributedFusedAdam with overlap_param_sync=False."
)
with self._lock:
need_to_initialize = "fragments" not in self.state[param]
if need_to_initialize:
self._init_param_state(param, param_group_id, param_id)
if self.greedy_grad_copy:
self._grad_copy(param)
if self.overlap_grad_sync:
self._try_start_bucket_grad_sync(
params=[param],
ignore_last_bucket=need_to_initialize,
)
return post_backward_hook
def _register_post_backward_hooks(self) -> None:
"""Attach hooks for gradient synchronization"""
self._grad_accs = []
for param_group_id, group in enumerate(self.param_groups):
for param_id, param in enumerate(group["params"]):
if param.requires_grad:
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
hook = self._make_post_backward_hook(
param,
param_group_id,
param_id,
)
grad_acc.register_hook(hook)
self._grad_accs.append(grad_acc)
def _make_pre_forward_hook(
self,
param: torch.nn.Parameter,
param_group_id: int,
param_id: int,
) -> Callable:
"""Create callback function to call before param forward pass
Make sure param has been synchronized and try launching next
param sync.
"""
def pre_forward_hook(*unused) -> None:
with self._lock:
if "fragments" not in self.state[param]:
return
self._param_copy(param)
if self.overlap_param_sync:
self._try_start_bucket_param_sync()
return pre_forward_hook
def _register_pre_forward_hooks(self) -> None:
"""Attach hooks for parameter synchronization
If _pre_forward_hook_is_enabled is set in a parameter, then
the callback will be called the first time any of its
attributes are accessed. This is hackily done by
monkey-patching the parameter class, so proceed with caution.
"""
for param_group_id, group in enumerate(self.param_groups):
for param_id, param in enumerate(group["params"]):
# Monkey-patch parameter class
cls = param.__class__
if not getattr(cls, "_has_pre_forward_hook", False):
# Monkey-patch magic methods to call __getattribute__
special_funcs = [
"__abs__",
"__add__",
"__and__",
"__bool__",
"__complex__",
"__contains__",
"__deepcopy__",
"__delitem__",
"__div__",
"__eq__",
"__float__",
"__floordiv__",
"__ge__",
"__getitem__",
"__gt__",
"__iadd__",
"__iand__",
"__idiv__",
"__ifloordiv__",
"__ilshift__",
"__imod__",
"__imul__",
"__index__",
"__int__",
"__invert__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__iter__",
"__itruediv__",
"__ixor__",
"__le__",
"__len__",
"__long__",
"__lshift__",
"__lt__",
"__matmul__",
"__mod__",
"__mul__",
"__neg__",
"__nonzero__",
"__or__",
"__pos__",
"__pow__",
"__radd__",
"__rand__",
"__rdiv__",
"__reduce__",
"__reduce_ex__",
"__reversed__",
"__rfloordiv__",
"__rlshift__",
"__rmatmul__",
"__rmod__",
"__rmul__",
"__ror__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__setitem__",
"__sizeof__",
"__sub__",
"__torch_function__",
"__truediv__",
"__xor__",
]
for func_name in special_funcs:
def make_augmented_func() -> Callable:
base_func_name = f"_base_{func_name}"
def augmented_func(self, *args, **kwargs):
return getattr(self, base_func_name)(*args, **kwargs)
return augmented_func
setattr(cls, f"_base_{func_name}", getattr(cls, func_name))
setattr(cls, func_name, make_augmented_func())
# Monkey-patch __getattribute__ to call pre-forward hook
def make_getattribute() -> Callable[[str], Any]:
special_attrs = {
"_pre_forward_hook_is_enabled",
"_pre_forward_hook",
"__del__",
"__delattr__",
"__dir__",
"__getattr__",
"__getattribute__",
"__hash__",
"__init__",
"__new__",
"__setattr__",
}
def getattribute_with_pre_forward_hook(self, name: str):
"""Variant of __getattribute__ that can call pre-forward hook"""
if name not in special_attrs:
if getattr(self, "_pre_forward_hook_is_enabled", False):
self._pre_forward_hook_is_enabled = False
self._pre_forward_hook()
return object.__getattribute__(self, name)
return getattribute_with_pre_forward_hook
cls.__getattribute__ = make_getattribute()
cls._has_pre_forward_hook = True
# Register pre-forward callback
param._pre_forward_hook_is_enabled = False
param._pre_forward_hook = self._make_pre_forward_hook(
param,
param_group_id,
param_id,
)
@torch.no_grad()
def init_param_buffer(self) -> None:
"""Allocate contiguous buffers for param buckets
This converts the parameters into views into contiguous
buffers. This enables some performance optimizations (e.g.
avoiding some memory copies), but may add memory overhead
(e.g. if the memory allocator can't reuse the original
parameter buffers). To minimize memory overhead, this buffer
should be initialized before the first training step.
"""
# Make sure all params are initialized
self.contiguous_param_buffer = True
self.init_params()
# Construct param buffers
buffer_sizes = collections.defaultdict(lambda: 0)
for bucket in self.state["buckets"]:
dtypes = bucket.dtypes()
buffer_sizes[dtypes] = max(
bucket.contiguous_buffer_offset + bucket.bucket_size,
buffer_sizes[dtypes],
)
for dtypes, buffer_size in buffer_sizes.items():
_, _, param_sync_dtype = dtypes
self._param_buffers[dtypes] = torch.zeros(
[buffer_size],
dtype=param_sync_dtype,
device=self.device,
)
# Figure out corresponding positions in params and param buffer
params = list(self.parameters())
param_flat_views = []
param_buffer_views = []
for i, param in enumerate(params):
fragment = self.state[param]["fragments"][0]
bucket_id = fragment.bucket_id
bucket = self.state["buckets"][bucket_id]
param_size = param.numel()
bucket_start, _ = fragment.bucket_range
buffer_offset = bucket.contiguous_buffer_offset
buffer_start = buffer_offset + bucket_start
buffer_end = buffer_start + param_size
param_buffer = self._param_buffers[bucket.dtypes()]
param_buffer_view = param_buffer[buffer_start:buffer_end].detach()
if not _devices_match(param_buffer_view.device, param.device):
raise RuntimeError(
"Attempted to change a parameter with device={param.device} "
f"into a buffer view with device={param_buffer_view.device}"
)
if param_buffer_view.dtype != param.dtype:
raise RuntimeError(
f"Attempted to change a parameter with dtype={param.dtype} "
f"into a buffer view with dtype={param_view_buffer.dtype}"
)
param_flat_views.append(param.detach().view(-1))
param_buffer_views.append(param_buffer_view)
# Copy values into param buffer
_multi_tensor_copy(
param_flat_views,
param_buffer_views,
dummy_overflow_buf=self._dummy_overflow_buf,
)
# Make all params a view into the param buffer
for param, buffer_view in zip(params, param_buffer_views):
param.data = buffer_view.view(param.size())
def _init_grad_buffer(self) -> None:
"""Allocate contiguous buffer for grad buckets"""
# Make sure all params are initialized
self.contiguous_grad_buffer = True
self.init_params()
# Construct grad buffers
buffer_sizes = collections.defaultdict(lambda: 0)
for bucket in self.state["buckets"]:
dtypes = bucket.dtypes()
buffer_sizes[dtypes] = max(
bucket.contiguous_buffer_offset + bucket.bucket_size,
buffer_sizes[dtypes],
)
for dtypes, buffer_size in buffer_sizes.items():
_, grad_sync_dtype, _ = dtypes
self._grad_buffers[dtypes] = torch.zeros(
[buffer_size],
dtype=grad_sync_dtype,
device=self.device,
)
def parameters(self) -> Iterable[torch.nn.Parameter]:
"""Returns an iterator over optimizer parameters"""
return itertools.chain.from_iterable(
group["params"] for group in self.param_groups
)
def init_params(
self,
params: Optional[Iterable[torch.nn.Parameter]] = None,
dtype: Optional[torch.dtype] = None,
grad_sync_dtype: Optional[torch.dtype] = None,
param_sync_dtype: Optional[torch.dtype] = None,
) -> None:
"""Initialize optimizer state for parameters
Ignores parameters that have already been initialized.
Arguments:
params (iterable, optional): parameters to initialize
(default: all parameters)
"""
# Default cases
if params is None:
params = self.parameters()
elif isinstance(params, torch.Tensor):
params = [params]
# Ignore parameters that have already been initialized
params = [param for param in params if "fragments" not in self.state[param]]
if not params:
return
# Get indices corresponding to parameters
id_map = dict()
for param_group_id, group in enumerate(self.param_groups):
for param_id, param in enumerate(group["params"]):
id_map[param] = (param_group_id, param_id)
# Initialize parameters
for param in params:
if param in id_map:
param_group_id, param_id = id_map[param]
self._init_param_state(
param,
param_group_id,
param_id,
dtype=dtype,
grad_sync_dtype=grad_sync_dtype,
param_sync_dtype=param_sync_dtype,
)
# Check if buckets are underutilized
num_params = sum(1 for param in self.parameters())
num_initialized_params = sum(
1 for param in self.parameters() if "fragments" in self.state[param]
)
if num_initialized_params == num_params:
bucket_size = sum(bucket.bucket_size for bucket in self.state["buckets"])
filled_size = sum(bucket.filled_size for bucket in self.state["buckets"])
buckets_utilization = filled_size / bucket_size
if buckets_utilization < 0.7:
warnings.warn(
f"Only {buckets_utilization:.1%} of buckets are used. "
"Consider decreasing the bucket_cap_mb argument."
)
def init_params_bucket(
self,
params: Iterable[torch.nn.Parameter],
dtype: Optional[torch.dtype] = None,
grad_sync_dtype: Optional[torch.dtype] = None,
param_sync_dtype: Optional[torch.dtype] = None,
) -> None:
"""Initialize optimizer state for parameters in one effective bucket
The buckets corresponding to the provided parameters are
configured so they all perform communication together. Ignores
parameters that have already been initialized.
Arguments:
params (iterable): parameters to initialize
"""
# Ignore parameters that have already been initialized
if isinstance(params, torch.Tensor):
params = [params]
params = [param for param in params if "fragments" not in self.state[param]]
if not params:
return
# Get indices corresponding to parameters
id_map = dict()
for param_group_id, group in enumerate(self.param_groups):
for param_id, param in enumerate(group["params"]):
id_map[param] = [param_group_id, param_id]
param_ids = [tuple([param] + id_map[param]) for param in params]
# Mark existings bucket as fully filled
for bucket in self.state["buckets"]:
bucket.able_to_fill = False
# Initialize optimizer state for parameters
start_bucket_id = len(self.state["buckets"])
self.init_params(
params,
dtype=dtype,
grad_sync_dtype=grad_sync_dtype,
param_sync_dtype=param_sync_dtype,
)
end_bucket_id = len(self.state["buckets"])
# Make sure all added buckets depend on provided params
for bucket_id in range(start_bucket_id, end_bucket_id):
bucket = self.state["buckets"][bucket_id]
bucket_size = bucket.bucket_size
bucket.able_to_fill = False
ids_in_bucket = set(
(fragment.param_group_id, fragment.param_id)
for fragment in bucket.fragments
)
for param, param_group_id, param_id in param_ids:
if (param_group_id, param_id) not in ids_in_bucket:
param_size = param.numel()
fragment = self.ParameterFragment(
param_group_id=param_group_id,
param_id=param_id,
bucket_id=bucket_id,
param_range=(param_size, param_size),
bucket_range=(bucket_size, bucket_size),
in_local_shard=False,
shard_range=None,
shard_bucket_range=None,
shard_param_range=None,
)
self.state[param]["fragments"].append(fragment)
bucket.fragments.append(fragment)
def _init_param_state(
self,
param: torch.nn.Parameter,
param_group_id: int,
param_id: int,
dtype: Optional[torch.dtype] = None,
grad_sync_dtype: Optional[torch.dtype] = None,
param_sync_dtype: Optional[torch.dtype] = None,
) -> None:
"""Initialize optimizer state for a parameter"""
# Return immediately if already initialized
if "fragments" in self.state[param]:
return
self.state[param]["fragments"] = []
# Data type configuration
if dtype is None:
dtype = self.dtype
if grad_sync_dtype is None:
grad_sync_dtype = self.grad_sync_dtype
if param_sync_dtype is None:
param_sync_dtype = self.param_sync_dtype
assert (
dtype == self.dtype
), "Optimizer states with non-default dtypes are not supported"
store_params = (
self.store_params
or dtype != self.dtype
or param_sync_dtype != self.param_sync_dtype
)
store_param_remainders = (
self.store_param_remainders
and dtype == self.dtype
and param_sync_dtype == self.param_sync_dtype
)
def last_bucket_id() -> int:
"""Index of last optimizer state bucket with desired dtypes
-1 if there are no such buckets.
"""
dtypes = (dtype, grad_sync_dtype, param_sync_dtype)
bucket_id = len(self.state["buckets"]) - 1
while bucket_id > 0:
bucket = self.state["buckets"][bucket_id]
if bucket.dtypes() == dtypes:
break
bucket_id -= 1
return bucket_id
def make_bucket(
bucket_size: int,
shard_size: int,
buffer_offset: int,
) -> None:
"""Construct new optimizer state bucket"""
self.state["buckets"].append(
self.StateBucket(
bucket_size,
shard_size,
dtype,
self.device,
grad_sync_dtype,
param_sync_dtype,
contiguous_buffer_offset=buffer_offset,
store_params=store_params,
store_param_remainders=store_param_remainders,
)
)
# Make sure there is at least one bucket with expected dtypes
if last_bucket_id() < 0:
shard_size = self.default_shard_size
bucket_size = shard_size * self.distributed_size
buffer_offset = 0
make_bucket(bucket_size, shard_size, buffer_offset)
# Split parameter values into fragments
# Note: Each fragment resides within a bucket
param_start = 0
param_size = param.numel()
while param_start < param_size:
# Get current bucket
bucket_id = last_bucket_id()
bucket = self.state["buckets"][bucket_id]
fragment_id = len(bucket.fragments)
bucket_size = bucket.bucket_size
shard_size = bucket.shard_size
# Determine fragment position within bucket
bucket_start = _round_to_multiple(
bucket.filled_size,
self.alignment,
round_up=True,
)
fragment_size = min(param_size - param_start, bucket_size - bucket_start)
param_end = param_start + fragment_size
bucket_end = bucket_start + fragment_size
# Create new bucket if current one is full
if fragment_size <= 0 or not bucket.able_to_fill:
shard_size = self.default_shard_size
bucket_size = shard_size * self.distributed_size
buffer_offset = bucket.contiguous_buffer_offset + bucket.bucket_size
make_bucket(bucket_size, shard_size, buffer_offset)
continue
# Fragment position within local shard
shard_id = self.distributed_rank
shard_start = bucket_start - shard_size * shard_id
shard_end = bucket_end - shard_size * shard_id
shard_start = min(max(shard_start, 0), shard_size)
shard_end = min(max(shard_end, 0), shard_size)
in_local_shard = shard_start < shard_end
shard_range = None
shard_bucket_range = None
shard_param_range = None
if in_local_shard:
shard_range = (shard_start, shard_end)
shard_bucket_start = shard_start + shard_size * shard_id
shard_bucket_end = shard_bucket_start + shard_end - shard_start
shard_bucket_range = (shard_bucket_start, shard_bucket_end)
shard_param_start = shard_bucket_start - bucket_start + param_start
shard_param_end = shard_param_start + shard_end - shard_start
shard_param_range = (shard_param_start, shard_param_end)
# Record fragment info
fragment = self.ParameterFragment(
param_group_id=param_group_id,
param_id=param_id,
bucket_id=bucket_id,
param_range=(param_start, param_end),
bucket_range=(bucket_start, bucket_end),
in_local_shard=in_local_shard,
shard_range=shard_range,
shard_bucket_range=shard_bucket_range,
shard_param_range=shard_param_range,
)
self.state[param]["fragments"].append(fragment)
bucket.fragments.append(fragment)
bucket.filled_size = bucket_end
param_start = param_end
# Initialize main param buffer
if store_params:
for fragment in self.state[param]["fragments"]:
if fragment.in_local_shard:
bucket = self.state["buckets"][fragment.bucket_id]
param_start, param_end = fragment.shard_param_range
shard_start, shard_end = fragment.shard_range
model_param_fragment = param.detach().view(-1)[
param_start:param_end
]
main_param_fragment = bucket.params_shard[shard_start:shard_end]
main_param_fragment.copy_(model_param_fragment)
def zero_grad(self, set_to_none: bool = False) -> None:
"""Clear parameter gradients"""
# Reset bucket buffers
self._grads_buckets.clear()
# Construct views into contiguous grad buffer, if needed
if self.contiguous_grad_buffer:
if not self._grad_buffers:
self._init_grad_buffer()
for grad_buffer in self._grad_buffers.values():
grad_buffer.zero_()
for bucket_id, bucket in enumerate(self.state["buckets"]):
bucket_size = bucket.bucket_size
buffer_start = bucket.contiguous_buffer_offset
buffer_end = buffer_start + bucket_size
grad_buffer = self._grad_buffers[bucket.dtypes()]
self._grads_buckets[bucket_id].grads_bucket = grad_buffer[
buffer_start:buffer_end
]
# Reset param grads
for param in self.parameters():
with _disable_pre_forward_hook(param):
need_to_zero = True
if set_to_none:
param.grad = None
elif self.contiguous_grad_buffer:
bucket_id = self.state[param]["fragments"][0].bucket_id
bucket = self.state["buckets"][bucket_id]
if param.dtype == bucket.grad_sync_dtype and _devices_match(
param.device, self.device
):
param.grad = self.grad_buffer_view(param)
need_to_zero = False
if need_to_zero and param.grad is not None:
param.grad.zero_()
# Reset other state
self._grad_scale = torch.full([], 1.0, dtype=torch.float32, device=self.device)
self._grad_norm = None
self._dummy_overflow_buf = torch.zeros(
[1], dtype=torch.int32, device=self.device
)
def _grad_copy(self, param: torch.nn.Parameter) -> None:
"""Copy parameter gradients to buckets"""
# Initialize parameter if needed
if "fragments" not in self.state[param]:
for param_group_id, group in enumerate(self.param_groups):
for param_id, param_ in enumerate(group["params"]):
if param is param_:
self._init_param_state(param, param_group_id, param_id)
if "fragments" not in self.state[param]:
raise RuntimeError(
"Could not initialize DistributedFusedAdam with parameter"
)
# Copy param grad to buckets
for fragment in self.state[param]["fragments"]:
# Get fragment position
bucket_id = fragment.bucket_id
bucket = self._grads_buckets[bucket_id]
bucket_size = self.state["buckets"][bucket_id].bucket_size
grad_sync_dtype = self.state["buckets"][bucket_id].grad_sync_dtype
grad_start, grad_end = fragment.param_range
bucket_start, bucket_end = fragment.bucket_range
# Set reduction status
if bucket.status == self.GradientStatus.SYNCING:
self._finish_bucket_grad_sync()
bucket.status = self.GradientStatus.PARTIALLY_FILLED
# Allocate gradient buffer if needed
if bucket.grads_bucket is None and self.contiguous_grad_buffer:
if not self._grad_buffers:
self._init_grad_buffer()
state_bucket = self.state["buckets"][bucket_id]
buffer_start = state_bucket.contiguous_buffer_offset
buffer_end = buffer_start + bucket_size
grad_buffer = self._grad_buffers[state_bucket.dtypes()]
grad_buffer = grad_buffer[buffer_start:buffer_end]
if (
bucket.grads_shard is None
or bucket.grads_shard.storage().data_ptr()
!= grad_buffer.storage().data_ptr()
):
bucket.grads_bucket = grad_buffer
bucket.grads_bucket.zero_()
if bucket.grads_bucket is None:
bucket.grads_bucket = torch.zeros(
[bucket_size],
dtype=grad_sync_dtype,
device=self.device,
)
# Copy param grad to bucket
if param.grad is not None:
grad_in = param.grad.detach().view(-1)[grad_start:grad_end]
grad_out = bucket.grads_bucket[bucket_start:bucket_end]
if grad_in.data_ptr() != grad_out.data_ptr():
grad_out.add_(grad_in)
# Free param grad buffer
param.grad = None
def _param_copy(self, params: torch.nn.Parameter) -> None:
"""Update parameters with values from parameter buckets"""
# Get parameter fragments to be synchronized
if isinstance(params, torch.Tensor):
params = [params]
fragments = []
for param in params:
if "fragments" in self.state[param]:
fragments.extend(
fragment
for fragment in self.state[param]["fragments"]
if fragment.bucket_id in self._params_buckets
)
# Make sure all needed buckets have been synchronized
buckets = collections.OrderedDict()
for fragment in fragments:
bucket_id = fragment.bucket_id
bucket = self._params_buckets[bucket_id]
buckets[bucket] = bucket.status
if any(
status != self.ParameterStatus.READY for bucket, status in buckets.items()
):
self._start_bucket_param_sync(buckets.keys())
self._finish_bucket_param_sync()
# Copy values from bucket buffers to params
params_in = []
params_out = []
for fragment in fragments:
bucket_id = fragment.bucket_id
param_group_id = fragment.param_group_id
param_id = fragment.param_id
bucket_start, bucket_end = fragment.bucket_range
param_start, param_end = fragment.param_range
if param_end > param_start:
bucket = self._params_buckets[bucket_id]
param = self.param_groups[param_group_id]["params"][param_id]
params_in.append(bucket.params_bucket[bucket_start:bucket_end])
params_out.append(param.detach().view(-1)[param_start:param_end])
_multi_tensor_copy(
params_in,
params_out,
dummy_overflow_buf=self._dummy_overflow_buf,
)
# Delete buckets if possible
for fragment in fragments:
bucket_id = fragment.bucket_id
bucket = self._params_buckets[bucket_id]
bucket_fragments = self.state["buckets"][bucket_id].fragments
param_group_id = fragment.param_group_id
param_id = fragment.param_id
param = self.param_groups[param_group_id]["params"][param_id]
bucket.params_updated.add(param)
if len(bucket.params_updated) == len(bucket_fragments):
del self._params_buckets[bucket_id]
def grad_buffer_view(self, param: torch.nn.Parameter) -> torch.Tensor:
"""Construct view into grad buffer corresponding to param
Assumes optimizer is using a contiguous grad buffer.
"""
# Initialize contiguous grad buffers if needed
assert self.contiguous_grad_buffer
if not self._grad_buffers:
self._init_grad_buffer()
# Figure out corresponding position in grad buffer
fragment = self.state[param]["fragments"][0]
bucket_id = fragment.bucket_id
bucket = self.state["buckets"][bucket_id]
bucket_start, _ = fragment.bucket_range
buffer_offset = bucket.contiguous_buffer_offset
buffer_start = buffer_offset + bucket_start
buffer_end = buffer_start + param.numel()
# Construct view into grad buffer
flat_buffer = self._grad_buffers[bucket.dtypes()]
flat_buffer = flat_buffer[buffer_start:buffer_end]
return flat_buffer.detach().view(param.size())
def _force_bucket_grad_sync(self) -> None:
"""Ensure that all gradient buckets are synchronized"""
# Synchronize all unsynchronized buckets
Status = self.GradientStatus
buckets = []
for bucket_id, grads_bucket in sorted(self._grads_buckets.items()):
if grads_bucket.status not in (Status.READY, Status.SYNCING):
buckets.append(grads_bucket)
if grads_bucket.grads_bucket is None:
state_bucket = self.state["buckets"][bucket_id]
grads_bucket.grads_bucket = torch.zeros(
[state_bucket.bucket_size],
dtype=state_bucket.grad_sync_dtype,
device=self.device,
)
if buckets:
self._start_bucket_grad_sync(buckets)
self._finish_bucket_grad_sync()
# Fill any unsynchronized gradients with zeros
for bucket_id in range(len(self.state["buckets"])):
grads_bucket = self._grads_buckets[bucket_id]
if grads_bucket.grads_shard is None:
state_bucket = self.state["buckets"][bucket_id]
grads_bucket.grads_shard = torch.zeros(
[state_bucket.shard_size],
dtype=state_bucket.grad_sync_dtype,
device=self.device,
)
def _try_start_bucket_grad_sync(
self,
params: Optional[Iterable[torch.nn.Parameter]] = None,
ignore_last_bucket: bool = False,
) -> None:
"""Attempt to launch gradient synchronization
Launches gradient synchronization if any bucket has receieved
all its expected gradients. Gradient synchronization is
asynchronous.
Arguments:
params (iterable): parameters that have had their
gradients copied to buckets
ignore_last_bucket (bool): avoid synchronizing last bucket
until all gradients have been generated. This avoids
excessive synchronization when initializing buckets in
the first backward pass.
"""
# Register params that have generated grads
if params is None:
params = []
for param in params:
for fragment in self.state[param]["fragments"]:
bucket_id = fragment.bucket_id
grads_bucket = self._grads_buckets[bucket_id]
state_bucket = self.state["buckets"][bucket_id]
bucket_fragments = state_bucket.fragments
grads_bucket.grads_generated.add(param)
if len(grads_bucket.grads_generated) == len(bucket_fragments):
grads_bucket.status = self.GradientStatus.FULLY_FILLED
if grads_bucket.grads_bucket is None:
grads_bucket.grads_bucket = torch.zeros(
[state_bucket.bucket_size],
dtype=state_bucket.grad_sync_dtype,
device=self.device,
)
# Launch reductions if enough buckets are ready
filled_buckets = []
for bucket_id, bucket in sorted(self._grads_buckets.items()):
if ignore_last_bucket and bucket_id == len(self.state["buckets"]) - 1:
continue
if bucket.status == self.GradientStatus.FULLY_FILLED:
filled_buckets.append(bucket)
if filled_buckets:
self._start_bucket_grad_sync(filled_buckets)
def _start_bucket_grad_sync(self, buckets: List[GradientBucket]) -> None:
"""Synchronize gradient buckets
Gradient synchronization is asynchronous. Involves
reduce-scatter over distributed process group and allreduce
over redundant process group. Assumes grad bucket buffers are
already initialized.
"""
# Complete any outstanding grad syncs
# Note: Not needed with contiguous grad buffer since there is
# no memory benefit from eagerly freeing grad buffers.
if not self.contiguous_grad_buffer:
self._finish_bucket_grad_sync()
# Reduction operation
if self.average_grad_sync:
reduce_op = torch.distributed.ReduceOp.AVG
else:
reduce_op = torch.distributed.ReduceOp.SUM
# Initialize grad state and buffers
for bucket in buckets:
if bucket.status == self.GradientStatus.SYNCING:
self._finish_bucket_grad_sync()
bucket.status = self.GradientStatus.SYNCING
bucket.grads_generated.clear()
if self.distributed_size == 1:
bucket.sync_grads_shard = bucket.grads_bucket
else:
bucket_size = bucket.grads_bucket.numel()
shard_size = bucket_size // self.distributed_size
bucket.sync_grads_shard = torch.empty(
[shard_size],
dtype=bucket.grads_bucket.dtype,
device=bucket.grads_bucket.device,
)
# Side stream for communication
main_stream = torch.cuda.current_stream()
comm_stream = self._pipeline_streams[-1]
comm_stream.wait_stream(main_stream)
# Reduce-scatter over distributed process group
if self.distributed_size > 1:
with torch.cuda.stream(comm_stream):
group = self.distributed_process_group
with _coalescing_manager(group, self.device, async_ops=True) as cm:
for bucket in buckets:
_coalescing_manager_append_work(
cm,
reduce_scatter_tensor(
bucket.sync_grads_shard,
bucket.grads_bucket,
op=reduce_op,
group=group,
async_op=True,
),
)
cm.wait()
# All-reduce over redundant process group
if self.redundant_size > 1:
with torch.cuda.stream(comm_stream):
group = self.redundant_process_group
with _coalescing_manager(group, self.device, async_ops=True) as cm:
for bucket in buckets:
_coalescing_manager_append_work(
cm,
torch.distributed.all_reduce(
bucket.sync_grads_shard,
op=reduce_op,
group=group,
async_op=True,
),
)
cm.wait()
def _finish_bucket_grad_sync(self) -> None:
"""Wait for any gradient synchronizations that are in progress"""
main_stream = torch.cuda.current_stream()
comm_stream = self._pipeline_streams[-1]
main_stream.wait_stream(comm_stream)
for bucket_id, bucket in sorted(self._grads_buckets.items()):
if bucket.status == self.GradientStatus.SYNCING:
# Accumulate gradient in local shard
if bucket.grads_shard is None:
bucket.grads_shard = bucket.sync_grads_shard
else:
bucket.grads_shard.add_(bucket.sync_grads_shard)
bucket.grads_bucket = None
bucket.sync_grads_shard = None
# Reset status
bucket.status = self.GradientStatus.READY
# Cached gradient norm has been invalidated
self._grad_norm = None
def _try_start_bucket_param_sync(
self,
params: Iterable[torch.nn.Parameter] = None,
) -> None:
"""Attempt to launch parameter synchronization
Launches parameter synchronization for buckets corresponding
to provided parameters, if needed. If parameters are not
provided and no other synchronizations are in progress,
attempts to find a parameter that still requires
synchronization. Parameter synchronization is asynchronous.
Arguments:
params (iterable, optional): parameters to synchronize
"""
# Default behavior: only launch param sync if no other syncs
# are in progress
if params is None:
params = []
if any(
bucket.status == self.ParameterStatus.SYNCING
for bucket in self._params_buckets.values()
):
return
for bucket_id, bucket in self._params_buckets.items():
if bucket.status == self.ParameterStatus.SHARDED:
fragment = self.state["buckets"][bucket_id].fragments[-1]
param_group_id = fragment.param_group_id
param_id = fragment.param_id
param = self.param_groups[param_group_id]["params"][param_id]
params.append(param)
break
# Find buckets corresponding to params
bucket_ids = set()
for param in params:
bucket_ids.update(
fragment.bucket_id for fragment in self.state[param]["fragments"]
)
buckets = [
self._params_buckets[bucket_id]
for bucket_id in sorted(bucket_ids)
if bucket_id in self._params_buckets
]
buckets = [
bucket
for bucket in buckets
if bucket.status == self.ParameterStatus.SHARDED
]
# Launch param sync if needed
if buckets:
self._start_bucket_param_sync(buckets)
def _start_bucket_param_sync(self, buckets: List[ParameterBucket]) -> None:
"""Synchronize parameter buckets
Parameter synchronization is asynchronous. Involves all-gather
over distributed process group. Assumes param shard buffers
are already initialized.
"""
# Complete any outstanding param syncs
self._finish_bucket_param_sync()
# Initialize param state and buffers
buckets = [
bucket
for bucket in buckets
if bucket.status == self.ParameterStatus.SHARDED
]
for bucket in buckets:
bucket.status = self.ParameterStatus.SYNCING
if self.distributed_size == 1:
bucket.params_bucket = bucket.params_shard
elif bucket.params_bucket is None:
shard_size = bucket.params_shard.numel()
bucket_size = shard_size * self.distributed_size
bucket.params_bucket = torch.empty(
[bucket_size],
dtype=bucket.params_shard.dtype,
device=bucket.params_shard.device,
)
# Side stream for communication
main_stream = torch.cuda.current_stream()
comm_stream = self._pipeline_streams[-1]
comm_stream.wait_stream(main_stream)
# All-gather over distributed process group
if self.distributed_size > 1:
with torch.cuda.stream(comm_stream):
group = self.distributed_process_group
with _coalescing_manager(group, self.device, async_ops=True) as cm:
for bucket in buckets:
_coalescing_manager_append_work(
cm,
all_gather_into_tensor(
bucket.params_bucket,
bucket.params_shard,
group=group,
async_op=True,
),
)
cm.wait()
def _finish_bucket_param_sync(self) -> None:
"""Wait for any param synchronizations that are in progress"""
main_stream = torch.cuda.current_stream()
comm_stream = self._pipeline_streams[-1]
main_stream.wait_stream(comm_stream)
for bucket_id, bucket in self._params_buckets.items():
if bucket.status == self.ParameterStatus.SYNCING:
bucket.params_shard = None
bucket.status = self.ParameterStatus.READY
@contextlib.contextmanager
def no_sync(
self,
greedy_grad_copy: None = False,
) -> contextlib.AbstractContextManager:
"""Disable overlapped gradient synchronization
Context manager that is similar to
torch.nn.parallel.DistributedDataParallel.no_sync. The
gradients can be synchronized by calling grad_sync or step. If
overlapped gradient synchronization is enabled, gradients can
also be synchronized by leaving the context and performing a
backward pass.
Arguments:
greedy_grad_copy (bool, optional): copy parameter
gradients to buckets as soon as they are generated
(default: False)
"""
old_greedy_grad_copy = self.greedy_grad_copy
old_overlap_grad_sync = self.overlap_grad_sync
self.greedy_grad_copy = greedy_grad_copy
self.overlap_grad_sync = False
try:
yield
finally:
self.greedy_grad_copy = old_greedy_grad_copy
self.overlap_grad_sync = old_overlap_grad_sync
def grad_sync(self) -> None:
"""Ensure that all gradients are synchronized"""
for bucket in self.state["buckets"]:
for fragment in bucket.fragments:
param_group_id = fragment.param_group_id
param_id = fragment.param_id
param = self.param_groups[param_group_id]["params"][param_id]
if param.grad is not None:
self._grad_copy(param)
if not self.contiguous_grad_buffer:
self._try_start_bucket_grad_sync(
params=[param],
ignore_last_bucket=False,
)
self._force_bucket_grad_sync()
def param_sync(self) -> None:
"""Ensure that all parameters are synchronized"""
if self.contiguous_param_buffer:
self._param_copy(self.parameters())
else:
while self._params_buckets:
bucket_id, bucket = next(iter((self._params_buckets.items())))
for fragment in reversed(self.state["buckets"][bucket_id].fragments):
param_id = fragment.param_id
param_group_id = fragment.param_group_id
param = self.param_groups[param_group_id]["params"][param_id]
self._param_copy(param)
self._params_buckets.clear()
@torch.no_grad()
def _local_grad_norm(
self,
parameters: Optional[Iterable[torch.nn.Parameter]] = None,
norm_type: float = 2.0,
) -> torch.Tensor:
"""Local contribution to parameter gradient norm
Returns square of 2-norm. Other norms are not yet supported.
If no parameters are provided, the norm is computed for all
parameters in optimizer. Provided parameters are assumed to be
in optimizer and to require gradients.
"""
norm_type = float(norm_type)
assert norm_type == 2.0
# Make sure that gradients have been reduced
self.grad_sync()
# Check if provided parameters are subset of all parameters
if parameters is not None:
parameters = list(parameters)
params_set = set(parameters)
all_params_set = set()
for bucket in self.state["buckets"]:
for fragment in bucket.fragments:
param_group_id = fragment.param_group_id
param_id = fragment.param_id
all_params_set.add(
self.param_groups[param_group_id]["params"][param_id]
)
if not params_set.issubset(all_params_set):
raise RuntimeError(
"Attempted to compute gradient norm for a parameter "
"that is not managed by DistributedFusedAdam"
)
if params_set == all_params_set:
parameters = None
# Group grads by dtype
grad_groups = collections.defaultdict(list)
if parameters is None:
# Compute norm of all local gradients
for bucket_id, grads_bucket in self._grads_buckets.items():
state_bucket = self.state["buckets"][bucket_id]
dtype = state_bucket.grad_sync_dtype
grad_groups[dtype].append(grads_bucket.grads_shard)
else:
# Compute norm of selected local gradients
for param in parameters:
if "fragments" not in self.state[param]:
continue
for fragment in self.state[param]["fragments"]:
if not fragment.in_local_shard:
continue
shard_start, shard_end = fragment.shard_range
if shard_end <= shard_start:
continue
bucket_id = fragment.bucket_id
grads_bucket = self._grads_buckets[bucket_id]
state_bucket = self.state["buckets"][bucket_id]
grad_groups[state_bucket.grad_sync_dtype].append(
grads_bucket.grads_shard[shard_start:shard_end]
)
# Compute norm of each group of grads
grad_norm_sq = None
for grad_group in grad_groups.values():
grad_group_norm_sq = (
multi_tensor_applier(
amp_C.multi_tensor_l2norm,
self._dummy_overflow_buf,
[grad_group],
False,
)[0]
** 2
)
if grad_norm_sq is None:
grad_norm_sq = grad_group_norm_sq
else:
grad_norm_sq += grad_group_norm_sq
if grad_norm_sq is None:
grad_norm_sq = torch.zeros([], dtype=self.dtype, device=self.device)
# Interpret norm as scalar
grad_norm_sq = grad_norm_sq.to(dtype=self.dtype, device=self.device)
grad_norm_sq = grad_norm_sq.view([])
return grad_norm_sq
def grad_norm(
self,
parameters: Optional[Iterable[torch.nn.Parameter]] = None,
norm_type: float = 2.0,
force: bool = False,
) -> torch.Tensor:
"""Gradient norm of parameters in optimizer
The norm is computed over all gradients together, as if they
were concatenated into a single vector. All provided
parameters must be managed by optimizer.
The computed value is cached to avoid redundant communication.
Arguments:
parameters (iterable, optional): an iterable of parameters
in optimizer (default: all parameters in optimizer).
norm_type (float, optional): type of the used p-norm
(default: 2). Only 2-norm is currently supported.
force (bool, optional): ignore cached value and force norm
computation (default: False).
"""
if force or self._grad_norm is None:
norm_type = float(norm_type)
assert norm_type == 2.0
grad_norm_sq = self._local_grad_norm(
parameters=parameters,
norm_type=norm_type,
)
torch.distributed.all_reduce(
grad_norm_sq,
op=torch.distributed.ReduceOp.SUM,
group=self.distributed_process_group,
)
self._grad_norm = grad_norm_sq.sqrt()
grad_norm = self._grad_norm * self._grad_scale
return grad_norm.detach()
def clip_grad_norm(
self,
max_norm: float,
parameters: Optional[Iterable[torch.nn.Parameter]] = None,
norm_type: float = 2.0,
) -> torch.Tensor:
"""Clips gradient norm of parameters in optimizer
The norm is computed over all gradients together, as if they
were concatenated into a single vector. The scaling is
deferred until the optimizer step, which should be called
immediately after this function.
The computed grad norm is cached to avoid redundant
communication.
Arguments:
max_norm (float): max norm of the gradients
parameters (iterable, optional): an iterable of parameters
in optimizer (default: all parameters in optimizer).
norm_type (float, optional): type of the used
p-norm (default: 2)
"""
assert max_norm > 0
total_norm = self.grad_norm(parameters=parameters, norm_type=norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
self._grad_scale *= clip_coef_clamped
return total_norm
def unscale_grads(self, inv_scale: torch.Tensor, *args):
"""Custom unscale function for use by AMP gradient scaler
Overflow checking is deferred to optimization step.
Arguments:
inv_scale (torch.Tensor): factor to multiply gradients
"""
self._grad_scale *= inv_scale.view([])
return {self.device: torch.zeros(1, dtype=torch.float32, device=self.device)}
def step(
self,
closure: Optional[Callable] = None,
*,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
):
"""Apply Adam optimizer step
Arguments:
closure (callable, optional): closure to recompute loss
(default: None)
grad_scaler (torch.cuda.amp.GradScaler, optional):
gradient scaler (default: None)
"""
# Apply closure
loss = None
if closure is not None:
loss = closure()
# Make sure that parameters and gradients are synchronized
self.param_sync()
self.grad_sync()
# Apply gradient scaler if provided
# Note: We compute gradient norm to check for non-finite
# values. This is more conservative and compute intensive than
# directly checking, but it avoids extra communication if we
# have already computed gradient norm e.g. for gradient
# clipping.
if grad_scaler is not None:
grad_scaler_state = grad_scaler._per_optimizer_states[id(self)]
GradScalerOptState = torch.cuda.amp.grad_scaler.OptState
if grad_scaler_state["stage"] is GradScalerOptState.READY:
assert grad_scaler._scale is not None
self._grad_scale /= grad_scaler._scale.view([])
grad_norm = self.grad_norm()
found_inf = torch.logical_not(torch.isfinite(grad_norm))
scaler_state = grad_scaler._per_optimizer_states[id(self)]
scaler_state["found_inf_per_device"] = {found_inf.device: found_inf.float()}
if found_inf.item():
return
self._grad_scale = self._grad_scale.to(dtype=torch.float32, device=self.device)
# Initialize param shard buffers
for bucket_id in reversed(range(len(self.state["buckets"]))):
params_bucket = self.ParameterBucket()
state_bucket = self.state["buckets"][bucket_id]
shard_size = state_bucket.shard_size
if self.contiguous_param_buffer:
if not self._param_buffers:
self.init_param_buffer()
bucket_size = state_bucket.bucket_size
buffer_start = state_bucket.contiguous_buffer_offset
buffer_end = buffer_start + bucket_size
param_buffer = self._param_buffers[state_bucket.dtypes()]
params_bucket.params_bucket = param_buffer[buffer_start:buffer_end]
bucket_start = self.distributed_rank * shard_size
bucket_end = bucket_start + shard_size
params_bucket.params_shard = params_bucket.params_bucket[
bucket_start:bucket_end
]
else:
params_bucket.params_shard = torch.empty(
[shard_size],
dtype=state_bucket.param_sync_dtype,
device=self.device,
)
self._params_buckets[bucket_id] = params_bucket
# Apply optimizer step and synchronize params
self.state["step"] += 1
if (
self.distributed_size > 1
and self.overlap_param_sync
and self.state["buckets"]
):
# Local step and non-blocking param sync
# Note: Overlap param sync of first buckets with optimizer
# step of remaining buckets.
# Get buckets containing "first" parameter
fragment = self.state["buckets"][-1].fragments[-1]
param_group_id = fragment.param_group_id
param_id = fragment.param_id
param = self.param_groups[param_group_id]["params"][param_id]
first_bucket_ids = sorted(
fragment.bucket_id for fragment in self.state[param]["fragments"]
)
# Local step and launch param sync for first buckets
self._local_step(first_bucket_ids)
self._start_bucket_param_sync(
self._params_buckets[bucket_id] for bucket_id in first_bucket_ids
)
# Local step for remaining buckets
first_bucket_ids = set(first_bucket_ids)
self._local_step(
bucket_id
for bucket_id in range(len(self.state["buckets"]))
if bucket_id not in first_bucket_ids
)
# Enable pre-forward hook
for param in self.parameters():
param._pre_forward_hook_is_enabled = True
else:
# Local step and blocking param sync
self._local_step(list(range(len(self.state["buckets"]))))
self.param_sync()
return loss
def _local_step(self, bucket_ids: Iterable[int]) -> None:
"""Apply optimizer step to local shard of parameter buckets
Arguments:
bucket_ids (iterable): bucket indices
"""
# Optimized implementation with BF16 params and 16-bit param
# remainders
if self.store_param_remainders:
bf16_rem_buckets = set()
for bucket_id in bucket_ids:
state_bucket = self.state["buckets"][bucket_id]
if state_bucket.param_remainders_shard is not None:
bf16_rem_buckets.add(bucket_id)
if bf16_rem_buckets:
self._local_step_with_param_remainders(sorted(bf16_rem_buckets))
bucket_ids = [
bucket_id
for bucket_id in bucket_ids
if bucket_id not in bf16_rem_buckets
]
if not bucket_ids:
return
# Find param fragments for each bucket
buffers = collections.defaultdict(list) # p_in, m, v, g, p_out
for bucket_id in bucket_ids:
state_bucket = self.state["buckets"][bucket_id]
grads_bucket = self._grads_buckets[bucket_id]
params_bucket = self._params_buckets[bucket_id]
# Optimizer state buffers for local shard
fragments = state_bucket.fragments
exp_avg = state_bucket.exp_avg_shard
exp_avg_sq = state_bucket.exp_avg_sq_shard
grads = grads_bucket.grads_shard
params_out = params_bucket.params_shard
# Find param fragments in local shard
for fragment in fragments:
if not fragment.in_local_shard:
continue
shard_start, shard_end = fragment.shard_range
if shard_end <= shard_start:
continue
shard_range = slice(shard_start, shard_end)
param_group_id = fragment.param_group_id
param_id = fragment.param_id
if state_bucket.params_shard is None:
param = self.param_groups[param_group_id]["params"][param_id]
param_range = slice(*fragment.shard_param_range)
param_fragment = param.detach().view(-1)[param_range]
param_fragment = param_fragment.to(
dtype=state_bucket.dtype, device=self.device
)
else:
params_shard = state_bucket.params_shard
param_fragment = params_shard[shard_range]
buffers_key = (
param_group_id,
state_bucket.dtype,
state_bucket.grad_sync_dtype,
state_bucket.param_sync_dtype,
)
buffers[buffers_key].append(
[
param_fragment,
exp_avg[shard_range],
exp_avg_sq[shard_range],
grads[shard_range],
params_out[shard_range],
]
)
# Apply optimizer step to each param group
for (group_id, _, _, _), group_buffers in buffers.items():
group = self.param_groups[group_id]
beta1, beta2 = group["betas"]
multi_tensor_applier(
distributed_adam_cuda.multi_tensor_fused_adam,
self._dummy_overflow_buf,
list(zip(*group_buffers)),
self._grad_scale,
group["lr"],
beta1,
beta2,
group["eps"],
self.state["step"],
1 if self.adam_w_mode else 0,
1 if group["bias_correction"] else 0,
group["weight_decay"],
)
def _local_step_with_param_remainders(
self,
bucket_ids: Iterable[int],
) -> None:
"""Apply optimizer step to local shard of parameter bucket
This is an experimental implementation that expects
store_params=False and store_param_remainders=True. The
optimizer dtype must be FP32 and the params must all be BF16
and GPU.
Arguments:
bucket_ids (iterable): bucket indices
"""
# Find param fragments for each bucket
buffers = collections.defaultdict(list) # p_in, p_rem, m, v, g, p_out
for bucket_id in bucket_ids:
state_bucket = self.state["buckets"][bucket_id]
grads_bucket = self._grads_buckets[bucket_id]
params_bucket = self._params_buckets[bucket_id]
# State buffers for local shard
fragments = state_bucket.fragments
param_remainders_shard = state_bucket.param_remainders_shard
exp_avg = state_bucket.exp_avg_shard
exp_avg_sq = state_bucket.exp_avg_sq_shard
grads = grads_bucket.grads_shard
params_out = params_bucket.params_shard
# Find param fragments in local shard
for fragment in fragments:
if not fragment.in_local_shard:
continue
shard_start, shard_end = fragment.shard_range
if shard_end <= shard_start:
continue
shard_range = slice(shard_start, shard_end)
param_group_id = fragment.param_group_id
param_id = fragment.param_id
buffers_key = (param_group_id, state_bucket.grad_sync_dtype)
param = self.param_groups[param_group_id]["params"][param_id]
param_range = slice(*fragment.shard_param_range)
param_fragment = param.detach().view(-1)[param_range]
param_fragment = param_fragment.to(
dtype=torch.bfloat16, device=self.device
)
buffers[buffers_key].append(
[
param_fragment,
param_remainders_shard[shard_range],
exp_avg[shard_range],
exp_avg_sq[shard_range],
grads[shard_range],
params_out[shard_range],
]
)
# Apply optimizer step to each param group
for (group_id, _), group_buffers in buffers.items():
group = self.param_groups[group_id]
beta1, beta2 = group["betas"]
multi_tensor_applier(
distributed_adam_cuda.multi_tensor_fused_adam_with_param_remainders,
self._dummy_overflow_buf,
list(zip(*group_buffers)),
self._grad_scale,
group["lr"],
beta1,
beta2,
group["eps"],
self.state["step"],
1 if self.adam_w_mode else 0,
1 if group["bias_correction"] else 0,
group["weight_decay"],
)
def state_dict(
self,
*,
state_dict_format: Optional[int] = None,
gather_on_root: Optional[bool] = None,
) -> Optional[dict]:
"""Get dictionary containing optimizer state
All ranks in the process group must call this function since
it performs communication. The same optimizer state is
returned on all ranks.
Arguments:
state_dict_format (int, optional): Tag for custom or
deprecated state dict format.
gather_on_root (bool, optional): Option for deprecated v1
format.
"""
# Default state dict format
if state_dict_format is None:
state_dict_format = 2
# Construct state dict
state_dict = None
if state_dict_format == 1:
# Deprecated v1 format
kwargs = {}
if gather_on_root is not None:
kwargs["gather_on_root"] = gather_on_root
state_dict = self._state_dict_v1(**kwargs)
elif state_dict_format == 2:
# Default v2 format
state_dict = self._state_dict_v2()
else:
# Unrecognized format
raise ValueError(f"Unrecognized state dict format ({state_dict_format})")
# Add format tag to state dict
if state_dict is not None:
state_dict["format"] = state_dict_format
return state_dict
def _state_dict_v1(self, gather_on_root: bool = True) -> Optional[dict]:
"""Get dictionary containing optimizer state (deprecated v1 format)
Default behavior is to perform communication so that the
entire optimizer state is returned on the root rank in the
process group. In this case, all ranks in the process group
must enter this function and no value is returned on non-root
ranks.
Arguments:
gather_on_root (bool, optional): Gather state from all
ranks on the root rank (default: True)
"""
warnings.warn(
"Making optimizer state dictionary in deprecated v1 format. "
"Future support is not guaranteed."
)
state_dict = super().state_dict()
if not gather_on_root:
return state_dict
# Finish any asynchronous communication
self.grad_sync()
self.param_sync()
# Export local state to byte string
state_bytes = io.BytesIO()
torch.save(state_dict, state_bytes)
state_bytes.seek(0)
state_bytes_view = state_bytes.getbuffer()
# Get data sizes on all ranks
local_state_size = len(state_bytes_view)
state_sizes = [None] * self.distributed_size
torch.distributed.all_gather_object(
state_sizes,
local_state_size,
group=self.process_group,
)
max_state_size = max(state_sizes)
# Construct workspace buffers
chunk_size = (
self.default_shard_size * torch.finfo(self.grad_sync_dtype).bits // 8
)
if self.distributed_rank == 0:
gathered_state_bytes = [
torch.empty([size], dtype=torch.uint8, device="cpu")
for size in state_sizes
]
gathered_state_bytes[0].copy_(
torch.frombuffer(state_bytes_view, dtype=torch.uint8)
)
gathered_chunks_buffers = [
torch.empty(
[chunk_size * self.distributed_size],
dtype=torch.uint8,
device=self.device,
)
for _ in range(self.pipeline_size)
]
else:
chunk_buffers = [
torch.empty(
[chunk_size],
dtype=torch.uint8,
device=self.device,
)
for _ in range(self.pipeline_size)
]
# Split data into chunks and gather on root rank
# Note: Assuming we are using the NCCL backend, communication
# must happen on the GPU. We split the data into fixed-size
# chunks to limit GPU memory usage.
main_stream = torch.cuda.current_stream()
for stream in self._pipeline_streams:
stream.wait_stream(main_stream)
for stream_id, offset in enumerate(range(0, max_state_size, chunk_size)):
stream_id %= self.pipeline_size
stream = self._pipeline_streams[stream_id]
with torch.cuda.stream(stream):
# Buffers for chunk
if self.distributed_rank == 0:
gathered_chunks = [
gathered_chunks_buffers[stream_id][
i * chunk_size : (i + 1) * chunk_size
]
for i in range(self.distributed_size)
]
else:
chunk = chunk_buffers[stream_id]
# Copy to GPU
if self.distributed_rank != 0 and offset < local_state_size:
local_chunk_size = min(chunk_size, local_state_size - offset)
chunk[:local_chunk_size].copy_(
torch.frombuffer(
state_bytes_view,
dtype=torch.uint8,
count=local_chunk_size,
offset=offset,
),
non_blocking=True,
)
# Gather on root
# Note: Call in main stream to avoid memory pool
# overheads from internal memory allocations in
# gather.
main_stream.wait_stream(stream)
with torch.cuda.stream(main_stream):
if self.distributed_rank == 0:
if self._gather_no_copy:
no_copy_kwarg = {"no_copy": True}
else:
no_copy_kwarg = {}
torch.distributed.gather(
gathered_chunks[0],
gathered_chunks,
dst=self.process_group_root,
group=self.process_group,
**no_copy_kwarg,
)
else:
torch.distributed.gather(
chunk,
dst=self.process_group_root,
group=self.process_group,
)
stream.wait_stream(main_stream)
# Copy back to CPU
if self.distributed_rank == 0:
for rank in range(1, self.distributed_size):
rank_chunk_start = offset
rank_chunk_end = min(offset + chunk_size, state_sizes[rank])
rank_chunk_size = rank_chunk_end - rank_chunk_start
if rank_chunk_size > 0:
src = gathered_chunks[rank][:rank_chunk_size]
dst = gathered_state_bytes[rank][
rank_chunk_start:rank_chunk_end
]
dst.copy_(src, non_blocking=True)
# Synchronize GPU
for stream in self._pipeline_streams:
main_stream.wait_stream(stream)
main_stream.synchronize()
# Return gathered state data on root rank
if self.distributed_rank == 0:
return {"gathered_states": gathered_state_bytes}
else:
return None
@torch.no_grad()
def _state_dict_v2(self) -> Optional[dict]:
"""Get dictionary containing optimizer state (default v2 format)
All ranks in the process group must call this function since
it performs communication. The same optimizer state is
returned on all ranks.
"""
# Make sure params are initialized
self.init_params()
# Finish any asynchronous communication
self.grad_sync()
self.param_sync()
# Get state dict from base class
state_dict = super().state_dict()
state_dict["state"] = {"step": state_dict["state"]["step"]}
# Initialize state dict with CPU buffers
for param in self.parameters():
# Get param index in state dict
fragment = self.state[param]["fragments"][0]
param_group_id = fragment.param_group_id
param_id = fragment.param_id
index = state_dict["param_groups"][param_group_id]["params"][param_id]
# Construct CPU buffers with optimizer state
state_dict["state"][index] = dict(
param=torch.zeros_like(param, dtype=self.dtype, device="cpu"),
exp_avg=torch.zeros_like(param, dtype=self.dtype, device="cpu"),
exp_avg_sq=torch.zeros_like(param, dtype=self.dtype, device="cpu"),
)
# Workspace buffers for gathering shards on root rank
num_buckets = len(self.state["buckets"])
max_bucket_size = max(bucket.bucket_size for bucket in self.state["buckets"])
bucket_buffers = [
torch.empty(
[max_bucket_size],
dtype=self.dtype,
device=self.device,
)
for _ in range(self.pipeline_size)
]
if self.store_param_remainders:
max_shard_size = max(bucket.shard_size for bucket in self.state["buckets"])
shard_bf16_buffers = [
torch.empty([max_shard_size], dtype=torch.bfloat16, device=self.device)
for _ in range(self.pipeline_size)
]
# Synchronize streams
main_stream = torch.cuda.current_stream()
for stream in self._pipeline_streams:
stream.wait_stream(main_stream)
def pack_param_shard(bucket_id: int) -> torch.Tensor:
"""Pack local shard of param values into contiguous buffer"""
# Stream objects
stream_id = bucket_id % self.pipeline_size
stream = self._pipeline_streams[stream_id]
# Bucket objects
bucket = self.state["buckets"][bucket_id]
shard_size = bucket.shard_size
# Case 1: Param state is already packed
if bucket.params_shard is not None:
return bucket.params_shard
# Case 2: Pack BF16 model params with 16-bit remainders
if bucket.param_remainders_shard is not None:
with torch.cuda.stream(stream):
# Pack bf16 param values
shard_bf16 = shard_bf16_buffers[stream_id][:shard_size]
buffers_in = []
buffers_out = []
for fragment in bucket.fragments:
if not fragment.in_local_shard:
continue
param_id = fragment.param_id
param_group_id = fragment.param_group_id
param_range = slice(*fragment.shard_param_range)
shard_range = slice(*fragment.shard_range)
param = self.param_groups[param_group_id]["params"][param_id]
buffers_in.append(param.view(-1)[param_range])
buffers_out.append(shard_bf16[shard_range])
_multi_tensor_copy(
buffers_in,
buffers_out,
dummy_overflow_buf=self._dummy_overflow_buf,
)
# Reconstruct fp32 from bf16 and remainders
shard_range = slice(
shard_size * self.distributed_rank,
shard_size * (self.distributed_rank + 1),
)
shard_fp32 = bucket_buffers[stream_id][shard_range]
_bf16_rem_to_fp32(
shard_bf16,
bucket.param_remainders_shard,
shard_fp32,
)
return shard_fp32
# Case 3: Pack model params
with torch.cuda.stream(stream):
shard_range = slice(
shard_size * self.distributed_rank,
shard_size * (self.distributed_rank + 1),
)
shard = bucket_buffers[stream_id][shard_range]
buffers_in = []
buffers_out = []
for fragment in bucket.fragments:
if not fragment.in_local_shard:
continue
param_id = fragment.param_id
param_group_id = fragment.param_group_id
param_range = slice(*fragment.shard_param_range)
shard_range = slice(*fragment.shard_range)
param = self.param_groups[param_group_id]["params"][param_id]
buffers_in.append(param.view(-1)[param_range])
buffers_out.append(shard[shard_range])
_multi_tensor_copy(
buffers_in,
buffers_out,
dummy_overflow_buf=self._dummy_overflow_buf,
)
return shard
def start_all_gather(bucket_id: int, shard: torch.Tensor) -> None:
"""Launch all-gather on bucket shards
Communication is done on main stream to ensure consistent
ordering.
"""
# Stream objects
stream_id = bucket_id % self.pipeline_size
stream = self._pipeline_streams[stream_id]
# Workspace buffer
bucket = self.state["buckets"][bucket_id]
bucket_size = bucket.bucket_size
bucket_buffer = bucket_buffers[stream_id][:bucket_size]
# All-gather shards
main_stream.wait_stream(stream)
all_gather_into_tensor(
bucket_buffer,
shard,
group=self.distributed_process_group,
)
stream.wait_stream(main_stream)
def finish_all_gather(bucket_id: int, state_dict_key: str) -> None:
"""Finish all-gather on bucket shards
Data is copied into state dict CPU buffers.
Splitting the NCCL all-gather and the CPU memcpys into
separate stages helps achieve good overlap when kernel
launches are serialized with
CUDA_DEVICE_MAX_CONNECTIONS=1. In particular, the pipeline
calls start_all_gather(bucket_id+1) before
finish_all_gather(bucket_id).
"""
# Stream objects
stream_id = bucket_id % self.pipeline_size
stream = self._pipeline_streams[stream_id]
# Bucket objects
bucket = self.state["buckets"][bucket_id]
bucket_size = bucket.bucket_size
bucket_buffer = bucket_buffers[stream_id][:bucket_size]
# Update state dict
with torch.cuda.stream(stream):
for fragment in bucket.fragments:
param_range = slice(*fragment.param_range)
bucket_range = slice(*fragment.bucket_range)
param_group_id = fragment.param_group_id
param_id = fragment.param_id
index = state_dict["param_groups"][param_group_id]["params"][
param_id
]
state_buffer = state_dict["state"][index][state_dict_key]
state_fragment = state_buffer.view(-1)[param_range]
bucket_fragment = bucket_buffer[bucket_range]
state_fragment.copy_(bucket_fragment, non_blocking=True)
# All-gather param state
for bucket_id in range(num_buckets):
shard = pack_param_shard(bucket_id)
start_all_gather(bucket_id, shard)
if bucket_id > 0:
finish_all_gather(bucket_id - 1, "param")
if bucket_id == num_buckets - 1:
finish_all_gather(bucket_id, "param")
# All-gather exp_avg state
for bucket_id in range(num_buckets):
shard = self.state["buckets"][bucket_id].exp_avg_shard
start_all_gather(bucket_id, shard)
if bucket_id > 0:
finish_all_gather(bucket_id - 1, "exp_avg")
if bucket_id == num_buckets - 1:
finish_all_gather(bucket_id, "exp_avg")
# All-gather exp_avg_sq state
for bucket_id in range(num_buckets):
shard = self.state["buckets"][bucket_id].exp_avg_sq_shard
start_all_gather(bucket_id, shard)
if bucket_id > 0:
finish_all_gather(bucket_id - 1, "exp_avg_sq")
if bucket_id == num_buckets - 1:
finish_all_gather(bucket_id, "exp_avg_sq")
# Synchronize GPU and return
for stream in self._pipeline_streams:
main_stream.wait_stream(stream)
main_stream.synchronize()
return state_dict
def load_state_dict(self, state_dict: dict) -> None:
"""Load optimizer state"""
# Figure out state dict format
state_dict_format = state_dict.pop("format", None)
if state_dict_format is None:
if "buckets" in state_dict or "gathered_states" in state_dict:
state_dict_format = 1
else:
state_dict_format = 2
# Load state dict
if state_dict_format == 1:
# Deprecated v1 format
self._load_state_dict_v1(state_dict)
elif state_dict_format == 2:
# Default v2 format
self._load_state_dict_v2(state_dict)
else:
# Unrecognized format
raise ValueError(f"Unrecognized state dict format ({state_dict_format})")
def _load_state_dict_v1(self, state_dict: dict) -> None:
"""Load optimizer state (deprecated v1 format)
Parallel configuration (e.g. process group sizes) and
optimizer options must match between saving and loading the
optimizer state.
"""
warnings.warn(
"Loading checkpoint in deprecated v1 format. "
"Future support is not guaranteed."
)
# Get state dict for current rank
if "gathered_states" in state_dict:
# Deallocate distributed optimizer state to reduce GPU
# memory usage
if "buckets" in self.state:
del self.state["buckets"]
# Get state for current rank and parse byte string
state_bytes = state_dict["gathered_states"][self.distributed_rank]
state_bytes = io.BytesIO(state_bytes.numpy())
state_dict = torch.load(state_bytes)
# Load state dict
super().load_state_dict(state_dict)
# Handle old state dicts without per-bucket dtypes
for bucket in self.state["buckets"]:
if getattr(bucket, "dtype", None) is None:
bucket.dtype = self.dtype
if getattr(bucket, "grad_sync_dtype", None) is None:
bucket.grad_sync_dtype = self.grad_sync_dtype
if getattr(bucket, "param_sync_dtype", None) is None:
bucket.param_sync_dtype = self.param_sync_dtype
@torch.no_grad()
def _load_state_dict_v2(self, state_dict: dict) -> None:
"""Load optimizer state (default v2 format)
The parallel configuration and optimizer options are allowed
to differ between saving and loading the model.
"""
# Make sure params are initialized
self.init_params()
# Finish any asynchronous communication
self.grad_sync()
self.param_sync()
# Load step count
self.state["step"] = state_dict["state"]["step"]
# Load state for each param
for param in self.parameters():
# Get param index in state dict
fragment = self.state[param]["fragments"][0]
param_id = fragment.param_id
param_group_id = fragment.param_group_id
index = state_dict["param_groups"][param_group_id]["params"][param_id]
# Buffers in state dict
param_state = state_dict["state"][index]["param"].view(-1)
exp_avg = state_dict["state"][index]["exp_avg"].view(-1)
exp_avg_sq = state_dict["state"][index]["exp_avg_sq"].view(-1)
# Copy to local shard of state buckets
for fragment in self.state[param]["fragments"]:
if not fragment.in_local_shard:
continue
bucket = self.state["buckets"][fragment.bucket_id]
param_start, param_end = fragment.shard_param_range
shard_start, shard_end = fragment.shard_range
if bucket.params_shard is not None:
bucket.params_shard[shard_start:shard_end].copy_(
param_state[param_start:param_end],
non_blocking=True,
)
if bucket.param_remainders_shard is not None:
param_state_int16 = param_state.unsqueeze(-1).view(torch.int16)
bucket.param_remainders_shard[shard_start:shard_end].copy_(
param_state_int16[param_start:param_end, 0],
non_blocking=True,
)
bucket.exp_avg_shard[shard_start:shard_end].copy_(
exp_avg[param_start:param_end],
non_blocking=True,
)
bucket.exp_avg_sq_shard[shard_start:shard_end].copy_(
exp_avg_sq[param_start:param_end],
non_blocking=True,
)
# Synchronize GPU
torch.cuda.current_stream().synchronize()
|
apex-master
|
apex/contrib/optimizers/distributed_fused_adam.py
|
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FP16_Optimizer(object):
"""
:class:`FP16_Optimizer` A cutdown version of apex.fp16_utils.FP16_Optimizer.
Designed only to wrap apex.contrib.optimizers.FusedAdam, FusedSGD.
Refer to apex.fp16_utils documents for more information.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = apex.contrib.optimizers.FusedSGD(model.parameters())
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
# loss.backward() becomes:
optimizer.backward(loss)
...
Example with dynamic loss scaling::
...
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
# optional arg to control dynamic loss scaling behavior
# dynamic_loss_args={'scale_window' : 500})
# Usually, dynamic_loss_args is not necessary.
"""
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True):
print("\nThis fp16_optimizer is designed to only work with apex.contrib.optimizers.*")
print("To update, use updated optimizers with AMP.")
# The fused optimizer does all the work. We need this layer for two reason:
# 1. maintain same user API from apex.fp16_utils
# 2. keep common stuff here in case we need to add new fused optimizer later
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.optimizer = init_optimizer
self.fp16_groups = [] # model params
self.fp32_groups = [] # master weights
# iterate over param_groups
for param_group in self.optimizer.param_groups:
fp16_group = []
fp32_group = []
for p in param_group['params']:
fp16_group.append(p)
fp32_group.append(p.clone().float().detach())
self.fp16_groups.append(fp16_group)
self.fp32_groups.append(fp32_group)
param_group['params'] = fp32_group
if multi_tensor_applier.available:
import amp_C
self.overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm
else:
raise RuntimeError('FP16_Optimizer requires cuda extensions')
# we may have a way of fusing dynamic scale. Do not support for now
if dynamic_loss_scale:
if dynamic_loss_args is not None:
raise SystemError("Do not support dynamic loss scale args for now.")
self.dynamic_loss_scale = True
self.cur_scale = 2**16
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = 2
self.scale_window = 1000
else:
self.dynamic_loss_scale = False
self.cur_iter = 0
self.cur_scale = static_loss_scale
self.verbose = verbose
def zero_grad(self, set_grads_to_None=True):
"""
Zero FP16 parameter grads.
"""
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_grads_to_None:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def step(self, closure=None):
"""
Not supporting closure.
"""
fp16_grads = []
norm_groups = []
skip = False
for group in self.fp16_groups:
fp16_grad = []
for i, p in enumerate(group):
fp16_grad.append(p.grad)
fp16_grads.append(fp16_grad)
# nan check
self.overflow_buf.zero_()
for fp16_grad in fp16_grads:
if len(fp16_grad) > 0:
norm, norm_per_tensor = multi_tensor_applier(self.multi_tensor_l2norm,
self.overflow_buf,
[fp16_grad], True)
norm_groups.append(norm)
if self.overflow_buf.item() != 0:
skip = True
if skip:
self._update_scale(skip)
return
# norm is in fact norm*cur_scale
self.optimizer.step(grads=fp16_grads,
output_params=self.fp16_groups,
scale=self.cur_scale,
grad_norms=norm_groups)
self._update_scale(False)
return
def backward(self, loss):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
scaled_loss = (loss.float()) * self.cur_scale
scaled_loss.backward()
def _update_scale(self, skip):
if self.dynamic_loss_scale:
if skip:
if self.verbose:
print("\nGrad overflow on iteration", self.cur_iter)
print("Using dynamic loss scale of", self.cur_scale)
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
else:
if skip:
print("\nGrad overflow on iteration", self.cur_iter)
print("Using static loss scale of", self.cur_scale)
self.cur_iter +=1
return
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_groups'] = self.fp32_groups
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
if state_dict['dynamic_loss_scale']:
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current, saved in zip(self.fp32_groups, state_dict['fp32_groups']):
for _current, _saved in zip(current, saved):
_current.data.copy_(_saved.data)
|
apex-master
|
apex/contrib/optimizers/fp16_optimizer.py
|
import torch
import importlib
import math
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" --global-option="--deprecated_fused_lamb" ./``.
This version of fused LAMB implements 2 fusions.
* Fusion of the LAMB update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.contrib.optimizers.FusedLAMB`'s usage is identical to any ordinary Pytorch optimizer::
opt = apex.contrib.optimizers.FusedLAMB(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedLAMB` may be used with or without Amp. If you wish to use :class:`FusedLAMB` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
NOT SUPPORTED now! (default: False)
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
grad_averaging (bool, optional): whether apply (1-beta2) to grad when
calculating running averages of gradient. (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
max_grad_norm (float, optional): value used to clip global grad norm
(default: 1.0)
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01,
amsgrad=False, adam_w_mode=True,
grad_averaging=True, set_grad_none=True,
max_grad_norm=1.0):
if amsgrad:
raise RuntimeError('FusedLAMB does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
grad_averaging=grad_averaging,
max_grad_norm=max_grad_norm)
super(FusedLAMB, self).__init__(params, defaults)
if multi_tensor_applier.available:
import amp_C
self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
fused_lamb_cuda = importlib.import_module("fused_lamb_cuda")
self.multi_tensor_lamb = fused_lamb_cuda.lamb
else:
raise RuntimeError('apex.contrib.optimizers.FusedLAMB requires cuda extensions')
self.adam_w_mode = 1 if adam_w_mode else 0
self.set_grad_none = set_grad_none
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedLAMB, self).zero_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
# create separate grad lists for fp32 and fp16 params
g_all_32, g_all_16 = [], []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
if p.dtype == torch.float32:
g_all_32.append(p.grad.data)
elif p.dtype == torch.float16:
g_all_16.append(p.grad.data)
else:
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
g_norm_32, g_norm_16 = 0.0, 0.0
# compute grad norm for two lists
if len(g_all_32) > 0:
g_norm_32 = multi_tensor_applier(self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[g_all_32], False)[0].item()
if len(g_all_16) > 0:
g_norm_16 = multi_tensor_applier(self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[g_all_16], False)[0].item()
# blend two grad norms to get global grad norm
global_grad_norm = math.sqrt(g_norm_32 * g_norm_32 + g_norm_16 * g_norm_16)
max_grad_norm = self.defaults['max_grad_norm']
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
# create lists for multi-tensor apply
g_16, p_16, m_16, v_16 = [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError('FusedLAMB does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
if(len(g_16) > 0):
multi_tensor_applier(self.multi_tensor_lamb,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
bias_correction,
group['weight_decay'],
grad_averaging,
self.adam_w_mode,
global_grad_norm,
max_grad_norm)
if(len(g_32) > 0):
multi_tensor_applier(self.multi_tensor_lamb,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
bias_correction,
group['weight_decay'],
grad_averaging,
self.adam_w_mode,
global_grad_norm,
max_grad_norm)
return loss
|
apex-master
|
apex/contrib/optimizers/fused_lamb.py
|
import types
import torch
from torch.optim.optimizer import Optimizer, required
from apex.multi_tensor_apply import multi_tensor_applier
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
This version of fused SGD implements 2 fusions.
* Fusion of the SGD update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.contrib.optimizers.FusedSGD` should be used without AMP.
:class:`apex.contrib.optimizers.FusedSGD` only works in the case where all parameters require grad.
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
model = ...
model.half()
optimizer = apex.contrib.optimizers.FusedSGD(model.parameters())
# wrap with FP16_Optimizer
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
optimizer.zero_grad()
...
optimizer.backward(loss)
optmizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
v = \rho * v + g \\
p = p - lr * v
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
v = \rho * v + lr * g \\
p = p - v
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False,
wd_after_momentum=False,
materialize_master_grads=True):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(FusedSGD, self).__init__(params, defaults)
self.wd_after_momentum = wd_after_momentum
if multi_tensor_applier.available:
import amp_C
# Skip buffer
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_sgd = amp_C.multi_tensor_sgd
else:
raise RuntimeError('apex.contrib.optimizers.FusedSGD requires cuda extensions')
def __setstate__(self, state):
super(FusedSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def get_momentums(self, params):
momentums = []
first_run = True
for p in params:
param_state = self.state[p]
# torch.optim.SGD initializes momentum in the main loop, we have
# to do it here, and track whether or not we've done so, so that
# momentum application can be skipped in the main kernel.
if 'momentum_buffer' not in param_state:
first_run = True
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
momentums.append(buf)
else:
first_run = False
momentums.append(param_state['momentum_buffer'])
return momentums, first_run
def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output_params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
if hasattr(self, "_amp_stash"):
raise RuntimeError('apex.contrib.optimizers.FusedSGD should not be used with AMP.')
loss = None
if closure is not None:
loss = closure()
if grads is None:
raise RuntimeError('apex.contrib.optimizers.FusedSGD must be wrapped \
with apex.contrib.optimizers.FP16_Optimizer \
which provides grads.')
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0])!=list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
raise RuntimeError('apex.contrib.optimizers.FusedSGD must be wrapped \
with apex.contrib.optimizers.FP16_Optimizer \
which provides output_params.')
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0])!=list:
output_params_group = [output_params]
else:
output_params_group = output_params
for group, grads_this_group, output_params_this_group in zip(self.param_groups,
grads_group,
output_params_group):
if grads_this_group is None or output_params_this_group is None:
raise RuntimeError('apex.contrib.optimizers.FusedSGD only works \
when all parameters require grad.')
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
lr = group['lr']
first_runs = [True, True]
# output_params_this_group: original weights (either fp16 or fp32)
# group['params']: master weights (fp32)
# grad_type, param_to_update_type, momentum_type, requires_fp16_model_copy
# fp32, fp32, fp32, No
fp32_grads = [g for (p, g) in zip(output_params_this_group, grads_this_group) if p.dtype == torch.float32]
fp32_params = [p2 for (p1, p2) in zip(output_params_this_group, group['params']) if p1.dtype == torch.float32]
fp32_momentums, first_runs[1] = self.get_momentums(fp32_params)
fp32_set = [fp32_grads, fp32_params, fp32_momentums]
# fp16, fp32, fp32, Yes
fp16_grads = [g for (p, g) in zip(output_params_this_group, grads_this_group) if p.dtype == torch.float16]
fp32_from_fp16_params = [p2 for (p1, p2) in zip(output_params_this_group, group['params']) if p1.dtype == torch.float16]
fp32_from_fp16_momentums, first_runs[0] = self.get_momentums(fp32_from_fp16_params)
fp16_params = [p1 for (p1, p2) in zip(output_params_this_group, group['params']) if p1.dtype == torch.float16]
fp16_set = [fp16_grads, fp32_from_fp16_params, fp32_from_fp16_momentums, fp16_params]
launch_sets = [fp16_set, fp32_set]
for launch_set, first_run in zip(launch_sets, first_runs):
assert len(launch_set[0]) == len(launch_set[1])
assert len(launch_set[0]) == len(launch_set[2])
if len(launch_set[0]) > 0:
multi_tensor_applier(
self.multi_tensor_sgd,
self._dummy_overflow_buf,
launch_set,
weight_decay,
momentum,
dampening,
lr,
nesterov,
first_run,
self.wd_after_momentum,
1.0/scale)
return loss
|
apex-master
|
apex/contrib/optimizers/fused_sgd.py
|
import os
import math
import inspect
import torch
import importlib
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
import torch.distributed.distributed_c10d as c10d
# Fallback to private fields if using older PyTorch version
try:
import torch.distributed.distributed_c10d.get_process_group_ranks
except ImportError:
def get_process_group_ranks(group):
return list(c10d._pg_group_ranks[group].keys())
_make_nccl_premul_sum = getattr(torch.distributed, "_make_nccl_premul_sum", None)
# Ref: https://github.com/pytorch/pytorch/pull/81272
if _make_nccl_premul_sum is None:
if hasattr(torch.distributed, "make_nccl_premul_sum"):
_make_nccl_premul_sum = torch.distributed.make_nccl_premul_sum
class DistributedFusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused LAMB implements 2 fusions.
* Fusion of the LAMB update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedLAMB`'s usage is identical to any ordinary Pytorch optimizer::
opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedLAMB` may be used with or without Amp. If you wish to use :class:`FusedLAMB` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
NOT SUPPORTED now! (default: False)
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
grad_averaging (bool, optional): whether apply (1-beta2) to grad when
calculating running averages of gradient. (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
max_grad_norm (float, optional): value used to clip global grad norm
(default: 1.0)
use_nvlamb (boolean, optional): Apply adaptive learning rate to 0.0
weight decay parameter (default: False)
step_supports_amp_scaling(boolean, optional): whether to use customized
gradient unscaling logic (default: True)
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
class AtomicCounter(object):
def __init__(self):
self.value = 0
self.order = []
import threading
self._lock = threading.Lock()
def add(self, idx):
with self._lock:
self.value += 1
self.order.append(idx)
def __init__(self, params,
lr=1e-3, bias_correction = True, grad_averaging=True,
betas=(0.9, 0.999), eps=1e-8,
weight_decay=0., max_grad_norm=0.,
adam_w_mode=True, use_nvlamb=False,
step_supports_amp_scaling=True, overlap_reductions=True,
dwu_group_size=0, dwu_num_blocks=4, dwu_num_chunks=4,
dwu_num_rs_pg=1, dwu_num_ar_pg=4, dwu_num_ag_pg=0, fused_norm=False,
e5m2_allgather=False, verbose=False, clip_after_ar=True,
full_ar=False, set_param_views_to_flat_buffer=False, skip_allgather=False,
fuse_scale=False, param_order=None, nccl_allgather_channels=0):
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
grad_averaging=grad_averaging,
max_grad_norm=max_grad_norm)
super(DistributedFusedLAMB, self).__init__(params, defaults)
global fused_adam_cuda, distributed_lamb_cuda
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
distributed_lamb_cuda = importlib.import_module("distributed_lamb_cuda")
self._overflow_buf = torch.cuda.IntTensor([0])
self._has_overflow = False
self.multi_tensor_lamb_compute_update_term = distributed_lamb_cuda.multi_tensor_lamb_compute_update_term
self.multi_tensor_lamb_update_weights = distributed_lamb_cuda.multi_tensor_lamb_update_weights
import amp_C
self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm
self._grad_averaging = grad_averaging
self._adam_w_mode = 1 if adam_w_mode else 0
self._use_nvlamb = use_nvlamb
self._step_supports_amp_scaling = step_supports_amp_scaling
self._is_accumulation_step = False
self._last_step = False
self._overlap_reductions = overlap_reductions
self._global_scale = None
self._num_blocks = dwu_num_blocks
self._num_chunks = dwu_num_chunks
self._e5m2_allgather = e5m2_allgather
self._verbose = verbose
self._clip_after_ar = clip_after_ar
self._full_ar = full_ar
self._fuse_scale = fuse_scale
self._L2_grad_norm = None
self._set_flat_param_view = set_param_views_to_flat_buffer
self._skip_ag = skip_allgather
self._fused_norm = fused_norm if not clip_after_ar else False
self._current_process_group = c10d._get_default_group()
self._available_ranks = get_process_group_ranks(self._current_process_group)
self._group_size = torch.cuda.device_count() if dwu_group_size <= 0 else dwu_group_size
self._world_size = torch.distributed.get_world_size()
self._num_groups = self._world_size // self._group_size
self._rank_in_group = torch.distributed.get_rank() % self._group_size
self._lr = torch.tensor(0.0, dtype=torch.float32, device='cuda')
self._resume_from_checkpoint = False
self._step = torch.cuda.IntTensor([0])
# Master weight, moment, gradient buffers
self._fp32_p, self._fp32_m, self._fp32_v, self._fp16_p, self._fp16_g = None, None, None, None, None
# Check if collectives have no_copy option
self._reduce_scatter_no_copy = (
'no_copy' in inspect.getfullargspec(torch.distributed.reduce_scatter).args
)
self._all_gather_no_copy = (
'no_copy' in inspect.getfullargspec(torch.distributed.all_gather).args
)
if "reduce_scatter_tensor" not in dir(torch.distributed):
torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base
if "all_gather_into_tensor" not in dir(torch.distributed):
torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
self._num_rs_pg = dwu_num_rs_pg
self._num_ar_pg = dwu_num_ar_pg
self._num_ag_pg = dwu_num_ag_pg
if self._full_ar: # full all reduce, only need AR and AG groups
# l2_grad_norm may be reduced within a node to limit from memory reads
for group_i in range(self._num_groups):
ranks = [group_i*self._group_size+j for j in range(self._group_size)]
l2_grad_norm_pg = torch.distributed.new_group(ranks=ranks)
if torch.distributed.get_rank() in ranks:
self._l2_grad_norm_pg = l2_grad_norm_pg
self._ar_pg = []
# consider all the ranks
ranks = list(range(0, self._world_size))
for i in range(self._num_ar_pg):
if self._verbose:
print(f"creating new AR group {i}: {ranks}")
grp = torch.distributed.new_group(ranks=ranks)
if grp != torch.distributed.GroupMember.NON_GROUP_MEMBER:
if self._verbose:
print(f"group {i}: init barrier (device: {torch.cuda.current_device()})")
torch.distributed.barrier(group=grp, device_ids=[torch.cuda.current_device()])
if self._verbose:
print(f"created new AR group {i}: {ranks}")
if torch.distributed.get_rank() in ranks:
self._ar_pg.append(grp)
self._ar_st = [torch.cuda.Stream() for _ in range(self._num_ar_pg)]
if nccl_allgather_channels > 0:
os.putenv('NCCL_MAX_NCHANNELS', str(nccl_allgather_channels))
if self._num_ag_pg == 0:
self._ag_pg = self._ar_pg
self._ag_st = self._ar_st
self._num_ag_pg = self._num_ar_pg
else:
self._ag_pg = []
ranks = []
stride = torch.cuda.device_count()
for i in range(self._num_groups):
rs = list(range(i*stride, (i+1)*stride))
ranks.append(rs)
for rs in ranks:
for i in range(self._num_ag_pg):
grp = torch.distributed.new_group(ranks=rs)
if torch.distributed.get_rank() in rs:
if self._verbose:
print(f"creating AG group {i}: {rs}")
self._ag_pg.append(grp)
self._ag_st = [torch.cuda.Stream() for _ in range(self._num_ag_pg)]
else: # reduce-scatter + all-reduce, need RS, AR, AG groups
if self._num_groups > 1:
self._ar_pg = []
for dev_i in range(self._group_size):
ranks = [dev_i+j*self._group_size for j in range(self._num_groups)]
for i in range(self._num_ar_pg):
if self._verbose:
print(f"creating new AR group {i}: {ranks}")
grp = torch.distributed.new_group(ranks=ranks)
if grp != torch.distributed.GroupMember.NON_GROUP_MEMBER:
if self._verbose:
print(f"group {i}: init barrier (device: {torch.cuda.current_device()})")
torch.distributed.barrier(group=grp, device_ids=[torch.cuda.current_device()])
if self._verbose:
print(f"created new AR group {i}: {ranks}")
if torch.distributed.get_rank() in ranks:
self._ar_pg.append(grp)
self._ar_st = [torch.cuda.Stream() for _ in range(self._num_ar_pg)]
rs_ranks = []
for group_i in range(self._num_groups):
rs_ranks.append([group_i*self._group_size+j for j in range(self._group_size)])
self._rs_pg = []
for group_i in range(self._num_groups):
ranks = rs_ranks[group_i]
for i in range(self._num_rs_pg):
grp = torch.distributed.new_group(ranks=ranks)
if torch.distributed.get_rank() in ranks:
self._rs_pg.append(grp)
if self._verbose:
print(f"creating RS group : {ranks}")
l2_grad_norm_pg = torch.distributed.new_group(ranks=ranks)
if torch.distributed.get_rank() in ranks:
self._l2_grad_norm_pg = l2_grad_norm_pg
self._rs_st = [torch.cuda.Stream() for _ in range(self._num_rs_pg)]
if self._num_ag_pg == 0:
self._ag_pg = self._rs_pg
self._ag_st = self._rs_st
self._num_ag_pg = self._num_rs_pg
else:
self._ag_pg = []
for group_i in range(self._num_groups):
ranks = rs_ranks[group_i]
for i in range(self._num_ag_pg):
grp = torch.distributed.new_group(ranks=ranks)
if torch.distributed.get_rank() in ranks:
self._ag_pg.append(grp)
if self._verbose:
print(f"creating AG group : {ranks}")
self._ag_st = [torch.cuda.Stream() for _ in range(self._num_ag_pg)]
for ag_pg in self._ag_pg:
torch.distributed.barrier(group=ag_pg)
self._l2_grad_norm_st = torch.cuda.Stream()
self._completion_st = torch.cuda.Stream()
self._step.record_stream(self._completion_st)
self._reductions_works = [None]*self._num_blocks
self._allgather_works = [None]*self._num_blocks
self._one = torch.cuda.IntTensor([1])
self._first_step = True
self._lazy_init_stage1_done, self._lazy_init_stage2_done = False, False
self._param_order = self.AtomicCounter()
p_offset = 0
p_i = 0
self._model_params = []
self._grad_accs = []
self._group_properties = []
for group in self.param_groups:
prev = None
beta1, beta2 = group['betas']
beta3 = 1.0 - beta1 if self._grad_averaging else 1.0
bias_correction = 1 if group['bias_correction'] else 0
eps = group['eps']
weight_decay = group['weight_decay']
for p in group['params']:
if not p.requires_grad:
continue
self._model_params.append(p)
self._group_properties.append((
weight_decay,
bias_correction,
beta1,
beta2,
beta3,
eps
))
p_grads_size = p.numel()
if self._set_flat_param_view:
if param_order:
# this is executed when param_order is specified by the user
self._param_order.add(param_order[p])
else:
self._param_order.add(p_i)
p_offset += p_grads_size
# Only enforce 128b alignment (64 * fp16) for non-consecutive parameters
# RNN is one example of consecutive parameters:
# (weight_ih, weight_hh, bias_ih, bias_hh)
if prev is not None and (prev.data_ptr() + prev.numel() * prev.element_size() != p.data_ptr()):
p_offset = ((p_offset + 63) // 64) * 64
prev = p
p_i += 1
if param_order:
self._param_order.order = torch.argsort(torch.tensor(self._param_order.order)).tolist()
self._grads_generated = [False]*len(self._model_params)
self._grads_fp16, self._grads_fp32 = [], []
if self._overlap_reductions:
self._current_block = self._num_blocks
self._net_total_param_size = p_offset
self._total_param_size = p_offset
dwu_min_page_size = 256 * self._num_blocks * self._num_chunks * self._group_size
self._total_param_size = ((self._total_param_size + dwu_min_page_size - 1) // dwu_min_page_size) * dwu_min_page_size
self._new_params = torch.zeros([self._total_param_size], dtype=torch.uint8 if self._e5m2_allgather else torch.float16, device='cuda')
def _lazy_init_stage1(self):
if self._lazy_init_stage1_done: return
p_i = 0
#self._model_params = []
#self._grad_accs = []
#self._group_properties = []
for group in self.param_groups:
for p in group['params']:
torch.distributed.broadcast(p, 0)
if not p.requires_grad:
continue
def wrapper(param, param_i):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
def allreduce_hook(*unused):
if not self._set_flat_param_view:
if self._first_step:
# first time
self._param_order.add(param_i)
else:
idx = self._param_order.order.index(param_i)
self._do_overlapped_reduction(idx, param)
else:
if not self._first_step:
idx = self._param_order.order.index(param_i)
self._do_overlapped_reduction(idx, param)
grad_acc.register_hook(allreduce_hook)
self._grad_accs.append(grad_acc)
wrapper(p, p_i)
p_i += 1
self._block_size = self._total_param_size // self._num_blocks
self._chunk_size = self._block_size // self._num_chunks
self._shard_size = self._chunk_size // self._group_size
self._flat_grads = torch.zeros([self._total_param_size], dtype=torch.float16, device='cuda')
self._mega_shard_size = self._num_blocks * self._num_chunks * self._shard_size
# initialize master weights, moments buffers if not loaded from checkpoint
if self._fp32_p is None:
self._fp32_p = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda')
self._fp32_m = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda')
self._fp32_v = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda')
self._fp32_u = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda')
# FIXME: Rethink fp16 label since it's either uint8 or fp16
self._fp16_p = torch.zeros([self._mega_shard_size], dtype=torch.uint8 if self._e5m2_allgather else torch.float16, device='cuda')
self._fp16_g = torch.zeros([self._mega_shard_size], dtype=torch.float16, device='cuda')
def _flat_split(p):
def __blockify(p):
return [p[block_id*self._block_size:(block_id+1)*self._block_size] for block_id in range(self._num_blocks)]
def __chunkify(p):
return [p[chunk_id*self._chunk_size:(chunk_id+1)*self._chunk_size] for chunk_id in range(self._num_chunks)]
def __shardify(p):
return [p[shard_id*self._shard_size:(shard_id+1)*self._shard_size] for shard_id in range(self._group_size)]
list_of_blocks = __blockify(p)
list_of_list_of_chunks = [__chunkify(block) for block in list_of_blocks]
list_of_list_of_list_of_shards = [[__shardify(chunk) for chunk in chunks] for chunks in list_of_list_of_chunks]
return list_of_blocks, list_of_list_of_chunks, list_of_list_of_list_of_shards
# note(crcrpar): the function below doesn't seem to be used at all.
# def _flat_split_no_shards(p):
# def __blockify(p):
# return [p[block_id*self._block_size:(block_id+1)*self._block_size] for block_id in range(self._num_blocks)]
# def __chunkify(p):
# return [p[chunk_id*self._chunk_size:(chunk_id+1)*self._chunk_size] for chunk_id in range(self._num_chunks)]
# list_of_blocks = __blockify(self._flat_grads)
# list_of_list_of_chunks = [__chunkify(block) for block in list_of_blocks]
# return list_of_blocks, list_of_list_of_chunks
def _full_packed_split(p):
def __shardify(p):
return [p[mega_shard*self._mega_shard_size:(mega_shard+1)*self._mega_shard_size] for mega_shard in range(self._group_size)]
def __blockify(p):
return [p[block_id*self._num_chunks*self._shard_size:(block_id+1)*self._num_chunks*self._shard_size] for block_id in range(self._num_blocks)]
def __chunkify(p):
return [p[chunk_id*self._shard_size:(chunk_id+1)*self._shard_size] for chunk_id in range(self._num_chunks)]
list_of_mega_shards = __shardify(p)
list_of_list_of_mega_blocks = [__blockify(mega_shard) for mega_shard in list_of_mega_shards]
list_of_list_of_list_of_mega_chunks = [[__chunkify(mega_block) for mega_block in mega_blocks] for mega_blocks in list_of_list_of_mega_blocks]
return list_of_mega_shards, list_of_list_of_mega_blocks, list_of_list_of_list_of_mega_chunks
def _packed_split(p):
def __packed_blockify(p):
packed_block_size = self._num_chunks*self._shard_size
return [p[block_id*packed_block_size:(block_id+1)*packed_block_size] for block_id in range(self._num_blocks)]
def __packed_chunkify(p):
# in the packed format, each chunk contains one shard, so packed_chunk_size == self._shard_size
return [p[chunk_id*self._shard_size:(chunk_id+1)*self._shard_size] for chunk_id in range(self._num_chunks)]
list_of_blocks = __packed_blockify(p)
list_of_list_of_chunks = [__packed_chunkify(block) for block in list_of_blocks]
return list_of_blocks, list_of_list_of_chunks
def _split_assign(shards):
packed_block_size = self._num_chunks*self._shard_size
list_of_list_of_chunks=[]
for block_id in range(self._num_blocks):
list_of_chunks=[]
for chunk_id in range(self._num_chunks):
#self._fp16_g[block_id*packed_block_size+chunk_id*self._shard_size:block_id*packed_block_size+(chunk_id+1)*self._shard_size] = shards[block_id][chunk_id][self._rank_in_group]
list_of_chunks.append( shards[block_id][chunk_id][self._rank_in_group])
list_of_list_of_chunks.append(list_of_chunks)
return list_of_list_of_chunks
self._new_params_mega_shards, self._new_params_mega_blocks, self._new_params_mega_chunks = _full_packed_split(self._new_params)
# this splitting scheme is needed when allgather needs to be split into multiple chunks in a contiguous way
self._new_params2_blocks, self._new_params2_chunks, self._new_params2_shards = _flat_split(self._new_params)
self._fp32_p_blocks, self._fp32_p_chunks = _packed_split(self._fp32_p)
self._fp32_m_blocks, self._fp32_m_chunks = _packed_split(self._fp32_m)
self._fp32_v_blocks, self._fp32_v_chunks = _packed_split(self._fp32_v)
self._fp32_u_blocks, self._fp32_u_chunks = _packed_split(self._fp32_u)
self._fp16_p_blocks, self._fp16_p_chunks = _packed_split(self._fp16_p)
if self._full_ar:
# for gradient all-reduce
self._flat_grads_blocks, self._flat_grads_chunks, self._flat_grads_shards = _flat_split(self._flat_grads)
# for weight update
self._fp16_g_chunks = _split_assign(self._flat_grads_shards)
else:
self._flat_grads_blocks, self._flat_grads_chunks, self._flat_grads_shards = _flat_split(self._flat_grads)
self._fp16_g_blocks, self._fp16_g_chunks = _packed_split(self._fp16_g)
self._lazy_init_stage1_done = True
def _lazy_init_stage2(self):
if self._lazy_init_stage2_done: return
if not self._set_flat_param_view:
# reversing is needed for overlapping allreduce and backprop, but currently not supported for flat param view
self._param_order.order.reverse()
# re-order model_params, grad_accs, group_properties lists
self._model_params = [self._model_params[i] for i in self._param_order.order]
self._grad_accs = [self._grad_accs[i] for i in self._param_order.order]
self._group_properties = [self._group_properties[i] for i in self._param_order.order]
def _get_flat_view(param):
if param.is_contiguous(memory_format=torch.channels_last):
K, C, H, W = param.shape
pv = param.as_strided(size=(K,H,W,C), stride=(H*W*C, W*C, C, 1))
elif param.is_contiguous(memory_format=torch.channels_last_3d):
K, C, D, H, W = param.shape
pv = param.as_strided(size=(K,D,H,W,C), stride=(D*H*W*C, H*W*C, W*C, C, 1))
else:
pv = param
return pv.view(-1)
# re-collect grads info (size, offset) after ordering
prev = None
p_offset = 0
self._grads_info = []
self._individual_flat_grads = []
for i, p in enumerate(self._model_params):
p_grads_size = p.numel()
self._grads_info.append({"param_grads_size":p_grads_size, "param_offset":p_offset})
self._individual_flat_grads.append(self._flat_grads[p_offset:p_offset+p_grads_size].view_as(p))
# for the first iteration
self._do_overlapped_reduction(i, p)
p_offset += p_grads_size
# Only enforce 128b alignment (64 * fp16) for non-consecutive parameters
# RNN is one example of consecutive parameters:
# (weight_ih, weight_hh, bias_ih, bias_hh)
if prev is not None and (prev.data_ptr() + prev.numel() * prev.element_size() != p.data_ptr()):
p_offset = ((p_offset + 63) // 64) * 64
prev = p
self._low_param_i = [0]*self._num_blocks
for block_id in range(self._num_blocks-1,-1,-1):
p_i = len(self._grads_info)-1
while p_i > 0 and self._grads_info[p_i]["param_offset"] > block_id*self._block_size:
p_i -= 1
self._low_param_i[block_id] = p_i
#print("self._low_param_i", self._low_param_i)
# This paragraph does two things:
# 1) Copy model parameters into master buffer
# 2) Create tensor lists for unpacking new parameter tensor after all-gather
self._packed_flat_to_model_params_fp16 = []
self._packed_flat_to_model_params_fp32 = []
self._model_params_num = len(self._model_params)
self._contrib_tensor_list = []
self._contrib_min_param_i, self._contrib_max_param_i = -1, -1
self._contrib_update_frag_for_norm = []
self._contrib_model_param_for_norm_fp16 = []
self._contrib_model_param_for_norm_fp32 = []
self._contrib_model_param_for_norm_is_fp16 = []
self._model_param_is_contrib = []
self._contrib_group_properties = []
for shard_id in range(self._group_size):
for block_id in range(self._num_blocks):
for chunk_id in range(self._num_chunks):
flat_shard_start = (((block_id * self._num_chunks + chunk_id) * self._group_size) + shard_id) * self._shard_size
flat_shard_end = flat_shard_start + self._shard_size
for param_i, (p, grads_info, group_props) in enumerate(zip(self._model_params, self._grads_info, self._group_properties)):
flat_grad_start = grads_info["param_offset"]
flat_grad_end = flat_grad_start + grads_info["param_grads_size"]
clipped_start = (lambda a,b: a if a > b else b)(flat_grad_start, flat_shard_start)
clipped_end = (lambda a,b: a if a < b else b)(flat_grad_end, flat_shard_end)
if clipped_start < clipped_end:
grad_offset = clipped_start - flat_grad_start
grad_length = clipped_end - clipped_start
shard_offset = clipped_start - flat_shard_start
pf = _get_flat_view(p)
model_param_fragment = pf[grad_offset:grad_offset+grad_length]
new_param_packed_fragment = self._new_params_mega_chunks[shard_id][block_id][chunk_id][shard_offset:shard_offset+grad_length]
if model_param_fragment.dtype == torch.float16:
self._packed_flat_to_model_params_fp16.append( (new_param_packed_fragment, model_param_fragment) )
else:
self._packed_flat_to_model_params_fp32.append( (new_param_packed_fragment, model_param_fragment) )
if shard_id == self._rank_in_group:
self._model_param_is_contrib.append(param_i)
# copy model parameters into master buffer
master_param_fragment = self._fp32_p_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]
opti_state_m_fragment = self._fp32_m_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]
opti_state_v_fragment = self._fp32_v_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]
opti_state_u_fragment = self._fp32_u_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]
opti_state_g_fragment = self._fp16_g_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]
opti_state_p_fragment = self._fp16_p_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]
#print("model_param_fragment.size()=%s, new_param_packed_fragment.size()=%s, master_param_fragment.size()=%s" % (str(model_param_fragment.size()), str(new_param_packed_fragment.size()), str(master_param_fragment.size())))
if not self._resume_from_checkpoint:
master_param_fragment.copy_(model_param_fragment)
self._contrib_group_properties.append(group_props)
self._contrib_tensor_list.append((master_param_fragment, opti_state_m_fragment, opti_state_v_fragment, opti_state_u_fragment, opti_state_g_fragment, opti_state_p_fragment)) # p, m, v, u, g, p_copy
self._contrib_update_frag_for_norm.append(opti_state_u_fragment)
if p.dtype == torch.float16:
self._contrib_model_param_for_norm_fp16.append(p)
else:
self._contrib_model_param_for_norm_fp32.append(p)
self._contrib_model_param_for_norm_is_fp16.append(True if p.dtype == torch.float16 else False)
if self._contrib_min_param_i < 0: self._contrib_min_param_i = param_i
self._contrib_max_param_i = param_i
self._contrib_model_param_for_norm_num = len(self._contrib_model_param_for_norm_is_fp16)
if len(self._contrib_model_param_for_norm_fp16) == 0: self._contrib_model_param_for_norm_fp16 = None
if len(self._contrib_model_param_for_norm_fp32) == 0: self._contrib_model_param_for_norm_fp32 = None
self._contrib_model_param_for_norm_is_fp32 = torch.tensor([not is_fp16 for is_fp16 in self._contrib_model_param_for_norm_is_fp16], dtype=torch.bool, device='cuda')
self._contrib_model_param_for_norm_is_fp16 = torch.tensor([is_fp16 for is_fp16 in self._contrib_model_param_for_norm_is_fp16], dtype=torch.bool, device='cuda')
self._offsets = torch.tensor(self._model_param_is_contrib, dtype=torch.int64, device='cuda')
p, m, v, u, g, p_copy = list(zip(*self._contrib_tensor_list))
self._contrib_compute_update_term_tensor_list = [g, p, m, v, u]
self._contrib_update_weights_tensor_list = [u, p, p_copy]
math_type = self._fp32_u.dtype
decay, bias_correction, beta1, beta2, beta3, epsilon = list(zip(*self._contrib_group_properties))
self._contrib_beta1 = torch.tensor(beta1, dtype=math_type, device='cuda')
self._contrib_beta2 = torch.tensor(beta2, dtype=math_type, device='cuda')
self._contrib_beta3 = torch.tensor(beta3, dtype=math_type, device='cuda')
self._contrib_bias_correction = torch.tensor(bias_correction, dtype=torch.int, device='cuda')
self._contrib_epsilon = torch.tensor(epsilon, dtype=math_type, device='cuda')
self._contrib_weight_decay = torch.tensor(decay, dtype=math_type, device='cuda')
self._packed_flat_to_model_params_fp16 = list(zip(*self._packed_flat_to_model_params_fp16)) if len(self._packed_flat_to_model_params_fp16) > 0 else None
self._packed_flat_to_model_params_fp32 = list(zip(*self._packed_flat_to_model_params_fp32)) if len(self._packed_flat_to_model_params_fp32) > 0 else None
self._lazy_init_stage2_done = True
self.complete_reductions()
self._first_step = False
def set_is_accumulation_step(self, is_accumulation_step):
self._is_accumulation_step = is_accumulation_step
def set_last_step(self, last_step):
self._last_step = last_step
def _get_flush_block(self):
flush_block = []
if self._current_block > 0 and self._grads_generated[self._low_param_i[self._current_block-1]]:
num_grads = len(self._grads_generated)
contiguous_idx = num_grads
while contiguous_idx > 0 and self._grads_generated[contiguous_idx-1]:
contiguous_idx -= 1
if contiguous_idx < num_grads and self._grads_info[contiguous_idx]["param_offset"] <= (self._current_block-1)*self._block_size:
self._current_block -= 1
start = self._current_block * self._block_size
end = (self._current_block+1) * self._block_size
flush_block = [start, end]
return flush_block
def _full_all_reduce_scale(self, block_id, scale):
works = [None]*self._num_chunks
if self._clip_after_ar:
for chunk_id in range(self._num_chunks):
glob_chunk_id = block_id * self._num_chunks + chunk_id
ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg]
ar_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(ar_stream):
works[chunk_id] = torch.distributed.all_reduce(self._flat_grads_chunks[block_id][chunk_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True,op=_make_nccl_premul_sum(scale))
else:
glob_chunk_id = block_id
ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg]
ar_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(ar_stream):
works0 = torch.distributed.all_reduce(self._flat_grads_blocks[block_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True,op=_make_nccl_premul_sum(scale))
for i in range(self._num_chunks):
works[i]=works0
self._reductions_works[block_id] = works
def _full_all_reduce(self, block_id):
works = [None]*self._num_chunks
for chunk_id in range(self._num_chunks):
glob_chunk_id = block_id * self._num_chunks + chunk_id
ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg]
ar_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(ar_stream):
works[chunk_id] = torch.distributed.all_reduce(self._flat_grads_chunks[block_id][chunk_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True)
self._reductions_works[block_id] = works
def _reduce_scatter_and_all_reduce_scale(self, block_id, scale):
# Reduction within each node
# Changes gradient format from [block * chunk * shard] to [shard * block * chunk]
# The output format is the same as the fp32 master parameters
works = [None]*self._num_chunks
for chunk_id in range(self._num_chunks):
glob_chunk_id = block_id * self._num_chunks + chunk_id
rs_stream = self._rs_st[glob_chunk_id%self._num_rs_pg]
rs_stream.wait_stream(torch.cuda.current_stream())
rs_stream.wait_stream(self._l2_grad_norm_st)
with torch.cuda.stream(rs_stream):
if self._reduce_scatter_no_copy:
works[chunk_id] = torch.distributed.reduce_scatter(
output=self._fp16_g_chunks[block_id][chunk_id],
input_list=self._flat_grads_shards[block_id][chunk_id],
group=self._rs_pg[glob_chunk_id%self._num_rs_pg],
async_op=True,
no_copy=True,
op=_make_nccl_premul_sum(scale),
)
else:
works[chunk_id] = torch.distributed.reduce_scatter_tensor(
output=self._fp16_g_chunks[block_id][chunk_id],
input=self._flat_grads_chunks[block_id][chunk_id],
group=self._rs_pg[glob_chunk_id%self._num_rs_pg],
async_op=True,
op=_make_nccl_premul_sum(scale),
)
# Reduction across nodes for each rank
if self._num_groups > 1:
for chunk_id in range(self._num_chunks):
glob_chunk_id = block_id * self._num_chunks + chunk_id
ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg]
with torch.cuda.stream(ar_stream):
works[chunk_id].wait()
works[chunk_id] = torch.distributed.all_reduce(self._fp16_g_chunks[block_id][chunk_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True)
self._reductions_works[block_id] = works
def _reduce_scatter_and_all_reduce(self, block_id):
# Reduction within each node
# Changes gradient format from [block * chunk * shard] to [shard * block * chunk]
# The output format is the same as the fp32 master parameters
works = [None]*self._num_chunks
for chunk_id in range(self._num_chunks):
glob_chunk_id = block_id * self._num_chunks + chunk_id
rs_stream = self._rs_st[glob_chunk_id%self._num_rs_pg]
rs_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(rs_stream):
if self._reduce_scatter_no_copy:
works[chunk_id] = torch.distributed.reduce_scatter(
output=self._fp16_g_chunks[block_id][chunk_id],
input_list=self._flat_grads_shards[block_id][chunk_id],
group=self._rs_pg[glob_chunk_id%self._num_rs_pg],
async_op=True,
no_copy=True,
)
else:
works[chunk_id] = torch.distributed.reduce_scatter_tensor(
output = self._fp16_g_chunks[block_id][chunk_id],
input = self._flat_grads_chunks[block_id][chunk_id],
group = self._rs_pg[glob_chunk_id%self._num_rs_pg],
async_op = True,
)
# Reduction across nodes for each rank
if self._num_groups > 1:
for chunk_id in range(self._num_chunks):
glob_chunk_id = block_id * self._num_chunks + chunk_id
ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg]
with torch.cuda.stream(ar_stream):
works[chunk_id].wait()
works[chunk_id] = torch.distributed.all_reduce(self._fp16_g_chunks[block_id][chunk_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True)
self._reductions_works[block_id] = works
def _pipeline_block_reductions(self, block_id):
if self._clip_after_ar:
self._flatten_grad_mt(1.0/self._world_size)
if self._full_ar:
self._full_all_reduce(block_id)
else:
self._reduce_scatter_and_all_reduce(block_id)
# Compute L2 grad norm
if block_id == 0:
with torch.cuda.stream(self._l2_grad_norm_st):
for block_id in range(self._num_blocks):
for chunk_id in range(self._num_chunks):
self._reductions_works[block_id][chunk_id].wait()
# Since the packed format is contiguous after reductions, only one norm is needed
l2_grad_norm_sq = torch.empty([1], device='cuda')
if self._full_ar:
# this flattening of lists is to keep multi_tensor_apply function happy, it wants depth=1 for l2 norm computation
flat_list = [item for sublist in self._fp16_g_chunks for item in sublist]
l2_grad_norm_sq = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [flat_list], False)[0]**2
else:
l2_grad_norm_sq = self._fp16_g.norm(dtype=torch.float32, p=2)**2
torch.distributed.all_reduce(l2_grad_norm_sq, group=self._l2_grad_norm_pg)
self._L2_grad_norm = l2_grad_norm_sq.sqrt()
else:
# Copy model grads to flat grads buffer
self._flatten_grad_mt(1.0)
# Compute L2 grad norm
self._l2_grad_norm_st.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self._l2_grad_norm_st):
if not self._fused_norm:
self._L2_grad_norm = self._flat_grads.norm(dtype=torch.float16, p=2).float()
torch.cuda.current_stream().wait_stream(self._l2_grad_norm_st)
# Apply clipping & pre-reduction scaling on grads
loss_scale = self.global_scale
max_grad_norm = loss_scale*self.defaults['max_grad_norm']
coeff = max_grad_norm /(1e-6+self.L2_grad_norm)
coeff = (coeff>1) * self._one + (coeff<=1) * coeff
tmp = torch.cat(((self._one), (coeff)))
index = (coeff+1>coeff).int()
scale = tmp.index_select(0, index).half()/self._world_size
if not self._fuse_scale:
self._flat_grads.mul_(scale)
if self._full_ar:
if self._fuse_scale:
self._full_all_reduce_scale(block_id, scale)
else:
self._full_all_reduce(block_id)
else:
if self._fuse_scale:
self._reduce_scatter_and_all_reduce_scale(block_id, scale)
else:
self._reduce_scatter_and_all_reduce(block_id)
if block_id == 0:
for block_id in range(self._num_blocks):
for chunk_id in range(self._num_chunks):
self._reductions_works[block_id][chunk_id].wait()
def __compute_contrib_param_norm(self):
if self._contrib_model_param_for_norm_fp16 is not None and self._contrib_model_param_for_norm_fp32 is not None:
gnorm_fp16 = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_model_param_for_norm_fp16], True)[1]
gnorm_fp32 = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_model_param_for_norm_fp32], True)[1]
gnorm = torch.empty(size=[self._contrib_model_param_for_norm_num], dtype=torch.bool, device='cuda')
gnorm.masked_scatter_(self._contrib_model_param_for_norm_is_fp16, gnorm_fp16)
gnorm.masked_scatter_(self._contrib_model_param_for_norm_is_fp32, gnorm_fp32)
elif self._contrib_model_param_for_norm_fp16 is not None:
gnorm = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_model_param_for_norm_fp16], True)[1]
elif self._contrib_model_param_for_norm_fp32 is not None:
gnorm = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_model_param_for_norm_fp32], True)[1]
return gnorm
def __compute_contrib_update_norm(self):
l2_norm = torch.zeros(size=[self._model_params_num], dtype=torch.float32, device='cuda')
local_contrib_l2_norm = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_update_frag_for_norm], True)[1] ** 2
l2_norm.scatter_(dim=0, index=self._offsets, src=local_contrib_l2_norm)
torch.distributed.all_reduce(l2_norm, group=self._ag_pg[0])
l2_norm = torch.sqrt(l2_norm)
return l2_norm
def _pipeline_step(self):
global_scale = self.global_scale
# if clip before ar, set max_grad_norm to 0
max_grad_norm = self.defaults['max_grad_norm'] * self._clip_after_ar
self._completion_st.wait_stream(self._l2_grad_norm_st)
global_grad_norm = self.L2_grad_norm
# check global_grad_norm and fill overflow_buf
is_finite = (global_grad_norm + 1 > global_grad_norm).int()
self._overflow_buf = self._one * (is_finite ^ self._one) # toggle between 0 and 1
if not self._clip_after_ar:
torch.distributed.all_reduce(is_finite,
op=torch.distributed.ReduceOp.MIN,
group=self._current_process_group)
torch.distributed.all_reduce(self._overflow_buf,
op=torch.distributed.ReduceOp.MAX,
group=self._current_process_group)
# increment step counter if no overflow
self._step += is_finite
self._completion_st.wait_stream(torch.cuda.current_stream())
self._completion_st.wait_stream(self._l2_grad_norm_st)
# Call step kernel once per step
# Call all-gather once per step
with torch.cuda.stream(self._completion_st):
for block_id in range(self._num_blocks):
for chunk_id in range(self._num_chunks):
self._reductions_works[block_id][chunk_id].wait()
param_norm = self.__compute_contrib_param_norm()
multi_tensor_applier(self.multi_tensor_lamb_compute_update_term,
self._overflow_buf,
self._contrib_compute_update_term_tensor_list, # g, p, m, v, u
self._contrib_beta1,
self._contrib_beta2,
self._contrib_beta3,
self._contrib_bias_correction,
self._step,
self._contrib_epsilon,
self._adam_w_mode,
self._contrib_weight_decay,
global_scale,
global_grad_norm,
max_grad_norm)
upd_norm = self.__compute_contrib_update_norm()
multi_tensor_applier(self.multi_tensor_lamb_update_weights,
self._overflow_buf,
self._contrib_update_weights_tensor_list, # u, p, p_copy
param_norm,
upd_norm,
self._offsets,
self._lr,
self._contrib_weight_decay,
global_grad_norm,
self._use_nvlamb)
if not self._skip_ag:
# allgather chunking is currently not supported for clip after allreduce
if not self._clip_after_ar:
for block in range(self._num_blocks):
for chunk in range(self._num_chunks):
if self._all_gather_no_copy:
torch.distributed.all_gather(
tensor_list = self._new_params2_shards[block][chunk],
tensor = self._fp16_p_chunks[block][chunk],
group = self._ag_pg[0],
no_copy = True,
)
else:
torch.distributed.all_gather_into_tensor(
output_tensor = self._new_params2_blocks[block],
input_tensor = self._fp16_p_chunks[block][chunk],
group = self._ag_pg[0],
)
else:
if self._all_gather_no_copy:
torch.distributed.all_gather(
tensor_list = self._new_params_mega_shards,
tensor = self._fp16_p,
group = self._ag_pg[0],
no_copy = True,
)
else:
torch.distributed.all_gather_into_tensor(
output_tensor = self._new_params,
input_tensor = self._fp16_p,
group = self._ag_pg[0],
)
def _flatten_grad_mt(self, scale):
if len(self._grads_fp16) > 0:
self._overflow_buf.zero_()
if not self._fused_norm:
multi_tensor_applier(
amp_C.multi_tensor_scale,
self._overflow_buf,
list(zip(*self._grads_fp16)),
scale)
else:
self._L2_grad_norm=multi_tensor_applier(
amp_C.multi_tensor_l2norm_scale,
self._overflow_buf,
list(zip(*self._grads_fp16)),
scale, False)[0].float()
self._grads_fp16 = []
if len(self._grads_fp32) > 0:
self._overflow_buf.zero_()
if not self._fused_norm:
multi_tensor_applier(
amp_C.multi_tensor_scale,
self._overflow_buf,
list(zip(*self._grads_fp32)),
scale)
else:
self._L2_grad_norm=multi_tensor_applier(
amp_C.multi_tensor_l2norm_scale,
self._overflow_buf,
list(zip(*self._grads_fp32)),
scale, False)[0].float()
self._grads_fp32 = []
def _do_overlapped_reduction(self, param_i, param):
if not self._is_accumulation_step:
# handle overlapped reductions
if param.dtype == torch.float16:
self._grads_fp16.append( (param.grad, self._individual_flat_grads[param_i]) )
else:
self._grads_fp32.append( (param.grad, self._individual_flat_grads[param_i]) )
self._grads_generated[param_i]=True
if not self._first_step and not self._last_step:
if self._overlap_reductions:
flush_block = self._get_flush_block()
while flush_block:
block_id = flush_block[0] // self._block_size
self._pipeline_block_reductions(block_id)
flush_block = self._get_flush_block()
def set_global_scale(self, global_scale):
"""Set global scale.
"""
self._global_scale = global_scale
@property
def global_scale(self):
return self._global_scale
@property
def L2_grad_norm(self):
torch.cuda.current_stream().wait_stream(self._l2_grad_norm_st)
return self._L2_grad_norm
def complete_reductions(self):
"""Complete reductions if full pipeline is not selected or overlap is not allowed.
"""
if self._last_step:
# zero out gradients that have not been completed yet
for param_i, grad_generated in enumerate(self._grads_generated):
if not grad_generated:
grad_info = self._grads_info[param_i]
param_offset = grad_info["param_offset"]
param_size = grad_info["param_grads_size"]
self._flat_grads[param_offset:param_offset+param_size].zero_()
self._grads_generated[param_i] = True
if self._first_step or self._last_step or not self._overlap_reductions:
# nothing done so far, run full pipeline after reductions
for block_id in range(self._num_blocks-1,-1,-1):
self._pipeline_block_reductions(block_id)
torch.cuda.current_stream().wait_stream(self._l2_grad_norm_st)
self._current_block = self._num_blocks
self._grads_generated = [False]*len(self._grads_info)
def step(self, closure=None, grad_scaler=None):
loss = None
if closure is not None:
loss = closure()
self._pipeline_step()
if grad_scaler is not None:
found_inf = self._overflow_buf.float()
optimizer_state = grad_scaler._per_optimizer_states[id(self)]
current_device = torch.device('cuda', torch.cuda.current_device())
optimizer_state["found_inf_per_device"][current_device] = found_inf
self._completion_st.wait_stream(torch.cuda.current_stream())
if not self._set_flat_param_view:
with torch.cuda.stream(self._completion_st):
# Copy self._new_params to model params
with torch.no_grad():
if self._packed_flat_to_model_params_fp16 is not None:
multi_tensor_applier(
fused_adam_cuda.maybe_cast_mt,
self._overflow_buf,
self._packed_flat_to_model_params_fp16)
if self._packed_flat_to_model_params_fp32 is not None:
multi_tensor_applier(
fused_adam_cuda.maybe_cast_mt,
self._overflow_buf,
self._packed_flat_to_model_params_fp32)
torch.cuda.current_stream().wait_stream(self._completion_st)
self._reductions_works = [None]*self._num_blocks
self._allgather_works = [None]*self._num_blocks
return loss
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`DistributedFusedAdam` instance.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
# save step, master weights and first/second moments
state_dict = {}
state_dict['step'] = self._step
state_dict['fp32_p'] = self._fp32_p
state_dict['fp32_m'] = self._fp32_m
state_dict['fp32_v'] = self._fp32_v
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If an DistributedFusedAdam instance was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``optimizer.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# restore step, master weights and first/second moments
self._step = state_dict['step']
self._fp32_p = state_dict['fp32_p'].to(device="cuda")
self._fp32_m = state_dict['fp32_m'].to(device="cuda")
self._fp32_v = state_dict['fp32_v'].to(device="cuda")
self._resume_from_checkpoint = True
|
apex-master
|
apex/contrib/optimizers/distributed_fused_lamb.py
|
import torch
import torch.distributed as dist
from torch import nn
import nccl_p2p_cuda as inc
import peer_memory_cuda as pm
# Communication free halo exchanger.
# NB! This halo exchanger does not exchange halos with neighbors as it should, it merely swaps the inputs
# NB! This is only useful for performance testing.
# NB! Do not use for actual production runs
class HaloExchanger(object):
def __init__(self, ranks, rank_in_group):
self.stream1 = torch.cuda.Stream()
self.stream2 = torch.cuda.Stream()
self.stream3 = torch.cuda.Stream()
self.group_size = len(ranks)
self.ranks = ranks
self.rank_in_group = rank_in_group
self.wrap_around_left_rank_in_group = (rank_in_group + self.group_size - 1) % self.group_size
self.wrap_around_right_rank_in_group = (rank_in_group + 1) % self.group_size
self.left_rank = ranks[rank_in_group-1] if rank_in_group > 0 else -1
self.left_zero = True if rank_in_group == 0 else False
self.right_rank = ranks[rank_in_group+1] if rank_in_group < self.group_size - 1 else -1
self.right_zero = True if rank_in_group == self.group_size - 1 else False
class HaloExchangerNoComm(HaloExchanger):
def __init__(self, ranks, rank_in_group):
super(HaloExchangerNoComm, self).__init__(ranks, rank_in_group)
def left_right_halo_exchange(self, left_output_halo, right_output_halo, left_input_halo=None, right_input_halo=None):
if left_input_halo is None:
return right_output_halo, left_output_halo
else:
left_input_halo.copy_(right_output_halo)
right_input_halo.copy_(left_output_halo)
class HaloExchangerAllGather(HaloExchanger):
def __init__(self, ranks, rank_in_group, comm):
super(HaloExchangerAllGather, self).__init__(ranks, rank_in_group)
# self.comm must be NCCL process_group created with torch.distributed.new_group(ranks=ranks)
self.comm = comm
def left_right_halo_exchange(self, left_output_halo, right_output_halo, left_input_halo=None, right_input_halo=None):
N,Hh,W,C = list(left_output_halo.shape)
send_halos = torch.empty((N,2*Hh,W,C),dtype=left_output_halo.dtype,device=left_output_halo.device)
send_halos[:,:Hh,:,:].copy_(left_output_halo)
send_halos[:,Hh:,:,:].copy_(right_output_halo)
all_halos = torch.empty((N,2*Hh*self.group_size,W,C),dtype=left_output_halo.dtype,device=left_output_halo.device)
all_halos = [all_halos[:,i*2*Hh:(i+1)*2*Hh,:,:] for i in range(self.group_size)]
torch.distributed.all_gather(all_halos,send_halos,group=self.comm,no_copy=True)
ag_left_input_halo = all_halos[self.wrap_around_left_rank_in_group][:,Hh:,:,:]
ag_right_input_halo = all_halos[self.wrap_around_right_rank_in_group][:,:Hh,:,:]
if left_input_halo is None:
if self.left_zero:
ag_left_input_halo.zero_()
if self.right_zero:
ag_right_input_halo.zero_()
return ag_left_input_halo, ag_right_input_halo
else:
if self.left_zero:
left_input_halo.zero_()
else:
left_input_halo.copy_(ag_left_input_halo)
if self.right_zero:
right_input_halo.zero_()
else:
right_input_halo.copy_(ag_right_input_halo)
class HaloExchangerSendRecv(HaloExchanger):
def __init__(self, ranks, rank_in_group):
super(HaloExchangerSendRecv, self).__init__(ranks, rank_in_group)
nccl_id = inc.get_unique_nccl_id(1).cuda()
torch.distributed.broadcast(nccl_id, 0)
nccl_id = nccl_id.cpu()
print("%d :: nccl_id = %s" % (torch.distributed.get_rank(), str(nccl_id)))
# Create another global nccl communicator in addition to the one created by torch.distributed.init_process_group("nccl")
# This is unavoidable because the underlying NCCL communicator torch.distributed creates is a protected variable, hence
# it cannot be accessed from another class.
# TODO: Figure out a way to avoid creating a second global communicator
assert(torch.distributed.get_rank() == self.ranks[self.rank_in_group]), "ranks[%d](%d) != torch.distributed.get_rank()(%d)" % (self.rank_in_group, self.ranks[self.rank_in_group], torch.distributed.get_rank())
self.handle = inc.init_nccl_comm(nccl_id, torch.distributed.get_rank(), torch.distributed.get_world_size())
def left_right_halo_exchange(self, left_output_halo, right_output_halo, left_input_halo=None, right_input_halo=None):
if left_input_halo is None:
left_input_halo, right_input_halo = inc.left_right_halo_exchange(self.handle, self.left_rank, self.right_rank , left_output_halo, right_output_halo)
return left_input_halo, right_input_halo
else:
inc.left_right_halo_exchange_inplace(self.handle, self.left_rank, self.right_rank, left_output_halo, right_output_halo, left_input_halo, right_input_halo)
class HaloExchangerPeer(HaloExchanger):
def __init__(self, ranks, rank_in_group, peer_pool, explicit_nhwc, numSM=0):
super(HaloExchangerPeer, self).__init__(ranks, rank_in_group)
self.diagnostics = False
self.explicit_nhwc = explicit_nhwc
self.numSM = numSM
self.peer_pool = peer_pool
def _allocate_peer_tensor(self, halo):
# Compute size in bytes
# Note: Pad buffer so each CUDA block gets required buffer size
size = 4 * halo.numel() * halo.element_size()
size_per_block = 128 * 2 * 16 # 128 threads each require two 128b buffers
size = (size + size_per_block - 1) // size_per_block * size_per_block
# Construct dtype peer buffer with desired size
shape = [1, 1, 1, size // halo.element_size()]
return self.peer_pool.allocate_peer_tensors(shape, halo.dtype, False, True)
def left_right_halo_exchange(self, left_output_halo, right_output_halo, left_input_halo=None, right_input_halo=None):
inplace = False if left_input_halo is None and right_input_halo is None else True
if not inplace:
left_input_halo = torch.empty_like(right_output_halo)
right_input_halo = torch.empty_like(left_output_halo)
channels_last = left_output_halo.is_contiguous(memory_format=torch.channels_last) and not self.explicit_nhwc
left_tx = self._allocate_peer_tensor(left_input_halo)
right_tx = self._allocate_peer_tensor(right_input_halo)
pm.push_pull_halos_1d(
self.diagnostics, self.explicit_nhwc, self.numSM, self.rank_in_group,
self.left_zero, left_output_halo, left_tx[self.rank_in_group], right_tx[self.wrap_around_left_rank_in_group], left_input_halo,
self.right_zero, right_output_halo, right_tx[self.rank_in_group], left_tx[self.wrap_around_right_rank_in_group], right_input_halo,
)
if not inplace:
return left_input_halo, right_input_halo
# Class that combines input volume with halos from neighbors (1d).
class HaloPadder:
def __init__(self, halo_ex):
self.halo_ex = halo_ex
self.stream1 = torch.cuda.Stream()
self.stream2 = torch.cuda.Stream()
def __call__(self, y, half_halo, explicit_nhwc, H_split):
channels_last = not explicit_nhwc and y.is_contiguous(memory_format=torch.channels_last)
if explicit_nhwc:
N,H,W,C = list(y.shape)
if H_split:
padded_shape = [N,H+2*half_halo,W,C]
ypad = torch.empty(shape=padded_shape, dtype=y.dtype, device=y.device, memory_format=torch.contiguous_format)
yleft = ypad[:,:half_halo,:,:]
ymid = ypad[:,half_halo:H+half_halo,:,:]
yright = ypad[:,H+half_halo:H+2*half_halo,:,:]
oleft = y[:,:half_halo,:,:]
oright = y[:,H-half_halo:,:,:]
else:
padded_shape = [N,H,W+2*half_halo,C]
ypad = torch.empty(shape=padded_shape, dtype=y.dtype, device=y.device, memory_format=torch.contiguous_format)
yleft = ypad[:,:,:half_halo,:]
ymid = ypad[:,:,half_halo:W+half_halo,:]
yright = ypad[:,:,W+half_halo:W+2*half_halo,:]
oleft = y[:,:,:half_halo,:]
oright = y[:,:,W-half_halo:,:]
else:
N,C,H,W = list(y.shape)
if H_split:
padded_shape = [N,C,H+2*half_halo,W]
ypad = torch.empty(shape=padded_shape, dtype=y.dtype, device=y.device, memory_format=torch.channels_last)
yleft = ypad[:,:,:half_halo,:]
ymid = ypad[:,:,half_halo:H+half_halo,:]
yright = ypad[:,:,H+half_halo:H+2*half_halo,:]
oleft = y[:,:,:half_halo,:]
oright = y[:,:,H-half_halo:,:]
else:
padded_shape = [N,C,H,W+2*half_halo]
ypad = torch.empty(shape=padded_shape, dtype=y.dtype, device=y.device, memory_format=torch.channels_last)
yleft = ypad[:,:,:,:half_halo]
ymid = ypad[:,:,:,half_halo:W+half_halo]
yright = ypad[:,:,:,W+half_halo:W+2*half_halo]
oleft = y[:,:,:,:half_halo]
oright = y[:,:,:,W-half_halo:]
with torch.cuda.stream(self.stream1):
self.halo_ex(oleft, oright, yleft, yright)
with torch.cuda.stream(self.stream2):
ymid.copy_(y)
return ypad
def wait(self):
current_stream = torch.cuda.current_stream()
current_stream.wait_stream(self.stream1)
current_stream.wait_stream(self.stream2)
|
apex-master
|
apex/contrib/bottleneck/halo_exchangers.py
|
from .bottleneck import Bottleneck, SpatialBottleneck
from .halo_exchangers import HaloExchangerNoComm, HaloExchangerAllGather, HaloExchangerSendRecv, HaloExchangerPeer
|
apex-master
|
apex/contrib/bottleneck/__init__.py
|
import torch
from bottleneck import Bottleneck
torch.manual_seed(23337)
# use True to print layerwise sum for all outputs in reference code path
DEBUG = False#True
for stride, o_channel in [(1,32), (1,128), (2,32)]:
print("testing stride ==", stride, ", in_channel == 32 , out_channel ==", o_channel)
a_ = torch.randn(17,32,28,28)
a = a_.cuda().half().to(memory_format=torch.channels_last).requires_grad_()
model = Bottleneck(32,8,o_channel,stride=stride).cuda().half().to(memory_format=torch.channels_last)
# test model
b = model(a)
b.mean().backward()
d_grad = a.grad.float()
a.grad = None
torch.cuda.synchronize()
if DEBUG:
print("[DEBUG] ref dx :", d_grad.sum().item())
# print wgrad. we don't need to reset since later cpp print before accumulation
for i, w in enumerate(model.w_conv):
print("[DEBUG] ref wgrad{} :".format(i+1), w.grad.sum().item())
wgrads = []
for w in model.w_conv:
wgrads.append(w.grad.float())
model.use_cudnn = True
model.zero_grad()
c = model(a)
c.mean().backward()
torch.cuda.synchronize()
print("comparing native and channels_last:")
print("max error fprop:", (b-c).abs().max().item(), "max elem:", b.abs().max().item())
print("max error dgrad:", (d_grad-a.grad.float()).abs().max().item(), "max elem:", d_grad.abs().max().item())
for i, (w, wgrad) in enumerate(zip(model.w_conv, wgrads)):
print("max error wgrad{}:".format(i+1), (wgrad - w.grad.float()).abs().max().item(), "max elem:", wgrad.abs().max().item())
nhwc_a = a_.permute(0,2,3,1).contiguous().cuda().half().requires_grad_()
nhwc_model = Bottleneck(32,8,o_channel,stride=stride,explicit_nhwc=True, use_cudnn=True).cuda().half()
for p,q in zip(model.parameters(), nhwc_model.parameters()):
# model's storage is already in nhwc, we clone and assign to explicit nhwc model
q.data.copy_(p.data.permute(0,2,3,1).contiguous())
for p,q in zip(model.buffers(), nhwc_model.buffers()):
q.data.copy_(p.data)
d = nhwc_model(nhwc_a)
d.mean().backward()
torch.cuda.synchronize()
# reset reference to cudnn channels_last permute
#c_s = c.storage().tolist()
#d_s = d.storage().tolist()
#print(max([x-y for x,y in zip(c_s,d_s)]))
c = c.contiguous(memory_format=torch.contiguous_format).permute(0,2,3,1).contiguous()
d_grad = a.grad.float().permute(0,2,3,1).contiguous()
wgrads = []
for w in model.w_conv:
wgrads.append(w.grad.float().permute(0,2,3,1).contiguous())
torch.cuda.synchronize()
print("comparing nhwc and channels_last:")
print("max error fprop:", (d-c).abs().max().item(), "max elem:", c.abs().max().item())
print("max error dgrad:", (d_grad-nhwc_a.grad.float()).abs().max().item(), "max elem:", d_grad.abs().max().item())
for i, (w, wgrad) in enumerate(zip(nhwc_model.w_conv, wgrads)):
print("max error wgrad{}:".format(i+1), (wgrad - w.grad.float()).abs().max().item(), "max elem:", wgrad.abs().max().item())
|
apex-master
|
apex/contrib/bottleneck/test.py
|
import functools as func
import torch
import torch.distributed as dist
from torch import nn
from apex import check_cudnn_version_and_warn
import fast_bottleneck
import nccl_p2p_cuda as inc
assert check_cudnn_version_and_warn(__name__, 8400)
def kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
weight_tensor_nchw = tensor
nn.init.kaiming_uniform_(weight_tensor_nchw, a=a, mode=mode, nonlinearity=nonlinearity)
def compute_scale_bias_one(nhwc, weight, bias, running_mean, running_var, w_scale, w_bias):
scale = weight * running_var.rsqrt()
bias = bias - running_mean * scale
w_scale.copy_(scale)
w_bias.copy_(bias)
def compute_scale_bias_method(nhwc, args):
for arg in args:
# arg is tuple of (weight, bias, running_mean, running_var, w_scale, w_bias)
compute_scale_bias_one(nhwc, *arg)
class FrozenBatchNorm2d(torch.jit.ScriptModule):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
@torch.jit.script_method
def get_scale_bias(self, nhwc):
# type: (bool) -> List[torch.Tensor]
scale = self.weight * self.running_var.rsqrt()
bias = self.bias - self.running_mean * scale
if nhwc:
scale = scale.reshape(1, 1, 1, -1)
bias = bias.reshape(1, 1, 1, -1)
else:
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return scale, bias
@torch.jit.script_method
def forward(self, x):
scale, bias = self.get_scale_bias(False)
return x * scale + bias
@torch.jit.script
def drelu_dscale1(grad_o, output, scale1):
relu_mask = (output>0)
dx_relu = relu_mask * grad_o
g1 = dx_relu * scale1
return g1, dx_relu
@torch.jit.script
def drelu_dscale2(grad_o, output, scale1, scale2):
relu_mask = (output>0)
dx_relu = relu_mask * grad_o
g1 = dx_relu * scale1
g2 = dx_relu * scale2
return g1, g2
class BottleneckFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, nhwc, stride_1x1, scale, bias, x, *conv):
# TODO: clean up order of tensors
args = [x, *conv[0:3], *scale[0:3], *bias[0:3]]
ctx.downsample = len(conv) > 3
if ctx.downsample:
args.append(conv[3])
args.append(scale[3])
args.append(bias[3])
# weight buffers are always in nhwc while shape can be nhwc or channels_last
# here we pass in flag and let c++ handle it
# alternatively, we can put all sizes into a fixed format and pass it in
outputs = fast_bottleneck.forward(nhwc, stride_1x1, args)
ctx.save_for_backward(*(args+outputs))
# save relu outputs for drelu
ctx.nhwc = nhwc
ctx.stride_1x1 = stride_1x1
return outputs[2]
# backward relu is not exposed, MUL with mask used now
# only support dgrad
@staticmethod
def backward(ctx, grad_o):
outputs = ctx.saved_tensors[-3:]
if ctx.downsample:
grad_conv3, grad_conv4 = drelu_dscale2(grad_o, outputs[2], ctx.saved_tensors[6], ctx.saved_tensors[11])
else:
grad_conv3, grad_conv4 = drelu_dscale1(grad_o, outputs[2], ctx.saved_tensors[6])
# create input vector for backward
t_list = [*ctx.saved_tensors[0:10]]
t_list.append(grad_conv3)
t_list.append(grad_conv4)
# outputs used for wgrad and generating drelu mask
t_list.append(outputs[0])
t_list.append(outputs[1])
# in case there is downsample
if ctx.downsample:
t_list.append(ctx.saved_tensors[10])
grads = fast_bottleneck.backward(ctx.nhwc, ctx.stride_1x1, t_list)
return (None, None, None, None, *grads)
bottleneck_function = BottleneckFunction.apply
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class Bottleneck(torch.nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
# here we put it at 1x1
def __init__(self, in_channels, bottleneck_channels, out_channels, stride=1, groups=1,
dilation=1, norm_func=None, use_cudnn=False, explicit_nhwc=False):
super(Bottleneck, self).__init__()
if groups != 1:
raise RuntimeError('Only support groups == 1')
if dilation != 1:
raise RuntimeError('Only support dilation == 1')
if norm_func == None:
norm_func = FrozenBatchNorm2d
else:
raise RuntimeError('Only support frozen BN now.')
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
conv1x1(in_channels, out_channels, stride),
norm_func(out_channels),
)
else:
self.downsample = None
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(in_channels, bottleneck_channels, stride)
self.conv2 = conv3x3(bottleneck_channels, bottleneck_channels)
self.conv3 = conv1x1(bottleneck_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
self.bn1 = norm_func(bottleneck_channels)
self.bn2 = norm_func(bottleneck_channels)
self.bn3 = norm_func(out_channels)
self.w_scale = None
self.use_cudnn = use_cudnn
# setup conv weights
self.w_conv = [self.conv1.weight, self.conv2.weight, self.conv3.weight]
if self.downsample is not None:
self.w_conv.append(self.downsample[0].weight)
# init weight in nchw format before possible transpose
for w in self.w_conv:
kaiming_uniform_(w, a=1)
# TODO: prevent unsupported case usage
# support cases
# native cudnn
# normal yes no
# channel_last yes yes
# explicit_nhwc no yes
self.explicit_nhwc = explicit_nhwc
if self.explicit_nhwc:
for p in self.parameters():
with torch.no_grad():
p.data = p.data.permute(0,2,3,1).contiguous()
return
# Returns single callable that recomputes scale and bias for all frozen batch-norms.
# This method must be called before cuda graphing.
# The callable it returns can be called anytime.
# Calling this method will prevent these from being computed every forward call.
def get_scale_bias_callable(self):
self.w_scale, self.w_bias, args = [], [], []
batch_norms = [self.bn1, self.bn2, self.bn3]
if self.downsample is not None:
batch_norms.append(self.downsample[1])
for bn in batch_norms:
s = torch.empty_like(bn.weight)
b = torch.empty_like(s)
args.append( (bn.weight, bn.bias, bn.running_mean, bn.running_var, s, b) )
if self.explicit_nhwc:
self.w_scale.append( s.reshape(1, 1, 1, -1) )
self.w_bias.append( b.reshape(1, 1, 1, -1) )
else:
self.w_scale.append( s.reshape(1, -1, 1, 1) )
self.w_bias.append( b.reshape(1, -1, 1, 1) )
return func.partial(compute_scale_bias_method, self.explicit_nhwc, args)
def forward(self, x):
if self.use_cudnn:
if self.w_scale is None:
# calculate scale/bias from registered buffers
# TODO: make this better
s1, b1 = self.bn1.get_scale_bias(self.explicit_nhwc)
s2, b2 = self.bn2.get_scale_bias(self.explicit_nhwc)
s3, b3 = self.bn3.get_scale_bias(self.explicit_nhwc)
w_scale = [s1, s2, s3]
w_bias = [b1, b2, b3]
if self.downsample is not None:
s4, b4 = self.downsample[1].get_scale_bias(self.explicit_nhwc)
w_scale.append(s4)
w_bias.append(b4)
out = bottleneck_function(self.explicit_nhwc, self.stride, w_scale, w_bias, x, *self.w_conv)
else:
out = bottleneck_function(self.explicit_nhwc, self.stride, self.w_scale, self.w_bias, x, *self.w_conv)
return out
if self.explicit_nhwc:
raise RuntimeError('explicit nhwc with native ops is not supported.')
# fallback to native ops
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class SpatialBottleneckFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, spatial_group_size, spatial_group_rank, spatial_communicator, spatial_halo_exchanger, spatial_method, use_delay_kernel, explicit_nhwc, stride_1x1, scale, bias, thresholdTop, thresholdBottom, x, *conv):
if spatial_group_size > 1:
stream1 = spatial_halo_exchanger.stream1
stream2 = spatial_halo_exchanger.stream2
stream3 = spatial_halo_exchanger.stream3
# TODO: clean up order of tensors
args = [x, *conv[0:3], *scale[0:3], *bias[0:3]]
ctx.downsample = len(conv) > 3
if ctx.downsample:
args.append(conv[3])
args.append(scale[3])
args.append(bias[3])
# weight buffers are always in explicit_nhwc while shape can be explicit_nhwc or channels_last
# here we pass in flag and let c++ handle it
# alternatively, we can put all sizes into a fixed format and pass it in
outputs = fast_bottleneck.forward_init(explicit_nhwc, stride_1x1, args)
fast_bottleneck.forward_out1(explicit_nhwc, stride_1x1, args, outputs)
if spatial_group_size > 1:
out1 = outputs[0]
if explicit_nhwc:
N,Hs,W,C = list(out1.shape)
memory_format = torch.contiguous_format
out1_pad = torch.empty([N,Hs+2,W,C], dtype=out1.dtype, device='cuda')
else:
N,C,Hs,W = list(out1.shape)
memory_format = torch.channels_last if out1.is_contiguous(memory_format=torch.channels_last) else torch.contiguous_format
out1_pad = torch.empty([N,C,Hs+2,W], dtype=out1.dtype, device='cuda', memory_format=memory_format)
stream1.wait_stream(torch.cuda.current_stream())
if spatial_method != 2: stream3.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream1):
if explicit_nhwc:
top_out1_halo = out1_pad[:,:1,:,:]
btm_out1_halo = out1_pad[:,Hs+1:Hs+2,:,:]
spatial_halo_exchanger.left_right_halo_exchange(out1[:,:1,:,:], out1[:,Hs-1:,:,:], top_out1_halo, btm_out1_halo)
else:
top_out1_halo = out1_pad[:,:,:1,:]
btm_out1_halo = out1_pad[:,:,Hs+1:Hs+2,:]
spatial_halo_exchanger.left_right_halo_exchange(out1[:,:,:1,:], out1[:,:,Hs-1:,:], top_out1_halo, btm_out1_halo)
if spatial_method == 1:
# overlap mid convolution with halo transfer
if spatial_group_rank < spatial_group_size-1:
stream2.wait_stream(stream1)
with torch.cuda.stream(stream2):
if explicit_nhwc:
btm_fat_halo = torch.empty((N,3,W,C),dtype=out1.dtype,device=out1.device)
btm_fat_halo[:,0:2,:,:].copy_(out1[:,Hs-2:,:,:])
btm_fat_halo[:,2:,:,:].copy_(btm_out1_halo)
else:
btm_fat_halo = torch.empty((N,C,3,W),dtype=out1.dtype,device=out1.device)
btm_fat_halo[:,:,0:2,:].copy_(out1[:,:,Hs-2:,:])
btm_fat_halo[:,:,2:,:].copy_(btm_out1_halo)
btm_out2 = fast_bottleneck.forward_out2_halo(explicit_nhwc, btm_fat_halo, args)
if spatial_group_rank > 0:
with torch.cuda.stream(stream1):
if explicit_nhwc:
top_fat_halo = torch.empty((N,3,W,C),dtype=out1.dtype,device=out1.device)
top_fat_halo[:,:1,:,:].copy_(top_out1_halo)
top_fat_halo[:,1:3,:,:].copy_(out1[:,:2,:,:])
else:
top_fat_halo = torch.empty((N,C,3,W),dtype=out1.dtype,device=out1.device)
top_fat_halo[:,:,:1,:].copy_(top_out1_halo)
top_fat_halo[:,:,1:3,:].copy_(out1[:,:,:2,:])
top_out2 = fast_bottleneck.forward_out2_halo(explicit_nhwc, top_fat_halo, args)
if use_delay_kernel: inc.add_delay(10)
elif spatial_method != 2 and spatial_method != 3:
assert(False), "spatial_method must be 1, 2 or 3"
if spatial_group_size <= 1:
fast_bottleneck.forward_out2(explicit_nhwc, stride_1x1, args, outputs)
elif spatial_method == 1:
fast_bottleneck.forward_out2(explicit_nhwc, stride_1x1, args, outputs)
with torch.cuda.stream(stream3):
if explicit_nhwc:
out1_pad[:,1:Hs+1,:,:].copy_(out1)
else:
out1_pad[:,:,1:Hs+1,:].copy_(out1)
elif spatial_method == 2:
# wait for halo transfer to finish before doing a full convolution of padded x
if explicit_nhwc:
out1_pad[:,1:Hs+1,:,:].copy_(out1)
else:
out1_pad[:,:,1:Hs+1,:].copy_(out1)
torch.cuda.current_stream().wait_stream(stream1)
fast_bottleneck.forward_out2_pad(explicit_nhwc, stride_1x1, args, outputs, out1_pad)
elif spatial_method == 3:
fast_bottleneck.forward_out2_mask(explicit_nhwc, stride_1x1, args, outputs, thresholdTop, thresholdBottom)
with torch.cuda.stream(stream3):
if explicit_nhwc:
out1_pad[:,1:Hs+1,:,:].copy_(out1)
else:
out1_pad[:,:,1:Hs+1,:].copy_(out1)
# compute halo cells for outputs[1] (out2)
if spatial_group_size > 1:
out2 = outputs[1]
if explicit_nhwc:
top_out2_halo = out2[:,:1,:,:]
btm_out2_halo = out2[:,Hs-1:,:,:]
else:
top_out2_halo = out2[:,:,:1,:]
btm_out2_halo = out2[:,:,Hs-1:,:]
if spatial_method == 1:
if spatial_group_rank > 0:
torch.cuda.current_stream().wait_stream(stream1)
top_out2_halo.copy_(top_out2)
if spatial_group_rank < spatial_group_size-1:
torch.cuda.current_stream().wait_stream(stream2)
btm_out2_halo.copy_(btm_out2)
elif spatial_method == 3:
# Note
# out2 halo correction cannot overlap with anything since it has
# to wait for out2_mask to finish, but itself has to finish before
# the first kernel of _forward_rest can launch.
# At least we can overlap the two halo correction kernels.
if spatial_group_rank < spatial_group_size-1:
stream2.wait_stream(stream1) # wait for halo transfers to finish
stream2.wait_stream(torch.cuda.current_stream()) # wait for *_out2_mask to finish
with torch.cuda.stream(stream2):
w1by3 = args[2][:,2:3,:,:].clone()
btm_out1_halo = btm_out1_halo.clone()
btm_out2 = fast_bottleneck.forward_out2_halo_corr(explicit_nhwc, btm_out1_halo, args, w1by3, btm_out2_halo.clone())
btm_out2_halo.copy_(btm_out2)
if spatial_group_rank > 0:
stream1.wait_stream(torch.cuda.current_stream()) # wait for *_out2_mask to finish
with torch.cuda.stream(stream1):
w1by3 = args[2][:,:1,:,:].clone()
top_out1_halo = top_out1_halo.clone()
top_out2 = fast_bottleneck.forward_out2_halo_corr(explicit_nhwc, top_out1_halo, args, w1by3, top_out2_halo.clone())
top_out2_halo.copy_(top_out2)
if spatial_group_rank < spatial_group_size-1:
torch.cuda.current_stream().wait_stream(stream2)
if spatial_group_rank > 0:
torch.cuda.current_stream().wait_stream(stream1)
fast_bottleneck.forward_rest(explicit_nhwc, stride_1x1, args, outputs)
# save halos for backward pass
if spatial_group_size > 1:
if spatial_method != 2:
# make sure copy of mid-section of out1 into out1_pad is done before exiting
torch.cuda.current_stream().wait_stream(stream3)
ctx.save_for_backward(*(args+outputs+[out1_pad,]))
else:
ctx.save_for_backward(*(args+outputs))
# save relu outputs for drelu
ctx.explicit_nhwc = explicit_nhwc
ctx.stride_1x1 = stride_1x1
ctx.spatial_group_size = spatial_group_size
if spatial_group_size > 1:
ctx.spatial_group_rank = spatial_group_rank
ctx.spatial_halo_exchanger = spatial_halo_exchanger
ctx.spatial_method = spatial_method
ctx.use_delay_kernel = use_delay_kernel
ctx.thresholdTop = thresholdTop
ctx.thresholdBottom = thresholdBottom
ctx.stream1 = stream1
ctx.stream2 = stream2
ctx.stream3 = stream3
return outputs[2]
# backward relu is not exposed, MUL with mask used now
# only support dgrad
@staticmethod
def backward(ctx, grad_o):
if ctx.spatial_group_size > 1:
out1_pad = ctx.saved_tensors[-1]
outputs = ctx.saved_tensors[-4:-1]
else:
outputs = ctx.saved_tensors[-3:]
if ctx.downsample:
grad_conv3, grad_conv4 = drelu_dscale2(grad_o, outputs[2], ctx.saved_tensors[6], ctx.saved_tensors[11])
else:
grad_conv3, grad_conv4 = drelu_dscale1(grad_o, outputs[2], ctx.saved_tensors[6])
# create input vector for backward
t_list = [*ctx.saved_tensors[0:10]]
t_list.append(grad_conv3)
t_list.append(grad_conv4)
# outputs used for wgrad and generating drelu mask
t_list.append(outputs[0])
t_list.append(outputs[1])
# in case there is downsample
if ctx.downsample:
t_list.append(ctx.saved_tensors[10])
grads = fast_bottleneck.backward_init(ctx.explicit_nhwc, ctx.stride_1x1, t_list)
wgrad3_stream = torch.cuda.Stream()
wgrad3_stream.wait_stream(torch.cuda.current_stream())
grad_out2 = fast_bottleneck.backward_grad_out2(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads)
wgrad2_stream = torch.cuda.Stream()
wgrad2_stream.wait_stream(torch.cuda.current_stream())
# do halo exchange of grad_out2 here
# compute halo cells for grad_out1
if ctx.spatial_group_size > 1:
if ctx.explicit_nhwc:
N,Hs,W,C = list(grad_out2.shape)
else:
N,C,Hs,W = list(grad_out2.shape)
relu1 = t_list[12]
ctx.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(ctx.stream1):
top_halo, btm_halo = ctx.spatial_halo_exchanger.left_right_halo_exchange(grad_out2[:,:1,:,:], grad_out2[:,Hs-1:,:,:])
# copy halos to send buffer
if ctx.spatial_method == 1 or ctx.spatial_method == 2:
# 1 -> halo recompute approach
# 2 -> wait for concatenated halos, then do single conv on full input (not implemented yet for bprop)
if ctx.spatial_group_rank < ctx.spatial_group_size-1:
ctx.stream2.wait_stream(ctx.stream1)
with torch.cuda.stream(ctx.stream2):
if ctx.explicit_nhwc:
btm_fat_halo = torch.empty((N,3,W,C),dtype=grad_out2.dtype,device=grad_out2.device)
btm_fat_halo[:,:2,:,:].copy_(grad_out2[:,Hs-2:,:,:])
btm_fat_halo[:,2:,:,:].copy_(btm_halo)
btm_fat_relu_halo = torch.empty((N,3,W,C),dtype=grad_out2.dtype,device=grad_out2.device)
btm_fat_relu_halo[:,:2,:,:].copy_(relu1[:,Hs-2:,:,:])
btm_fat_relu_halo[:,2:,:,:].zero_()
else:
btm_fat_halo = torch.empty((N,C,3,W),dtype=grad_out2.dtype,device=grad_out2.device)
btm_fat_halo[:,:,:2,:].copy_(grad_out2[:,:,Hs-2:,:])
btm_fat_halo[:,:,2:,:].copy_(btm_halo)
btm_fat_relu_halo = torch.empty((N,C,3,W),dtype=grad_out2.dtype,device=grad_out2.device)
btm_fat_relu_halo[:,:,:2,:].copy_(relu1[:,:,Hs-2:,:])
btm_fat_relu_halo[:,:,2:,:].zero_()
btm_grad_out1_halo = fast_bottleneck.backward_grad_out1_halo(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads, btm_fat_halo, btm_fat_relu_halo)
if ctx.explicit_nhwc:
btm_grad_out1_halo = btm_grad_out1_halo[:,1:2,:,:]
else:
btm_grad_out1_halo = btm_grad_out1_halo[:,:,1:2,:]
if ctx.spatial_group_rank > 0:
with torch.cuda.stream(ctx.stream1):
if ctx.explicit_nhwc:
top_fat_halo = torch.empty((N,3,W,C),dtype=grad_out2.dtype,device=grad_out2.device)
top_fat_halo[:,:1,:,:].copy_(top_halo)
top_fat_halo[:,1:,:,:].copy_(grad_out2[:,:2,:,:])
top_fat_relu_halo = torch.empty((N,3,W,C),dtype=grad_out2.dtype,device=grad_out2.device)
top_fat_relu_halo[:,:1,:,:].zero_()
top_fat_relu_halo[:,1:,:,:].copy_(relu1[:,:2,:,:])
else:
top_fat_halo = torch.empty((N,C,3,W),dtype=grad_out2.dtype,device=grad_out2.device)
top_fat_halo[:,:,:1,:].copy_(top_halo)
top_fat_halo[:,:,1:,:].copy_(grad_out2[:,:,:2,:])
top_fat_relu_halo = torch.empty((N,C,3,W),dtype=grad_out2.dtype,device=grad_out2.device)
top_fat_relu_halo[:,:,:1,:].zero_()
top_fat_relu_halo[:,:,1:,:].copy_(relu1[:,:,:2,:])
top_grad_out1_halo = fast_bottleneck.backward_grad_out1_halo(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads, top_fat_halo, top_fat_relu_halo)
if ctx.explicit_nhwc:
top_grad_out1_halo = top_grad_out1_halo[:,1:2,:,:]
else:
top_grad_out1_halo = top_grad_out1_halo[:,:,1:2,:]
if ctx.use_delay_kernel: inc.add_delay(10)
elif ctx.spatial_method != 3:
assert(False), "spatial_method must be 1, 2 or 3"
# compute grad_out1 for internal cells
if ctx.spatial_group_size <= 1 or ctx.spatial_method == 1 or ctx.spatial_method == 2:
grad_out1 = fast_bottleneck.backward_grad_out1(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads, grad_out2)
elif ctx.spatial_group_size > 1 and ctx.spatial_method == 3:
grad_out1 = fast_bottleneck.backward_grad_out1_mask(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads, grad_out2, ctx.thresholdTop, ctx.thresholdBottom)
# apply halo cells to grad_out1
if ctx.spatial_group_size > 1:
w = t_list[2]
z = t_list[4]
relu1 = t_list[12]
#print("w.shape = %s, z.shape = %s, relu1.shape = %s" % (str(list(w.shape)), str(list(z.shape)), str(list(relu1.shape))))
if ctx.spatial_method == 1 or ctx.spatial_method == 2:
if ctx.spatial_group_rank < ctx.spatial_group_size-1:
torch.cuda.current_stream().wait_stream(ctx.stream2)
if ctx.explicit_nhwc:
grad_out1[:,Hs-1:,:,:].copy_(btm_grad_out1_halo)
else:
grad_out1[:,:,Hs-1:,:].copy_(btm_grad_out1_halo)
#print("ctx.spatial_group_rank = %d, apply grad_out1 btm halo (grad_out1.shape = %s)" % (ctx.spatial_group_rank, str(list(grad_out1.shape))))
if ctx.spatial_group_rank > 0:
torch.cuda.current_stream().wait_stream(ctx.stream1)
if ctx.explicit_nhwc:
grad_out1[:,:1,:,:].copy_(top_grad_out1_halo)
else:
grad_out1[:,:,:1,:].copy_(top_grad_out1_halo)
#print("ctx.spatial_group_rank = %d, apply grad_out1 top halo (grad_out1.shape = %s)" % (ctx.spatial_group_rank, str(list(grad_out1.shape))))
elif ctx.spatial_method == 3:
if ctx.spatial_group_rank < ctx.spatial_group_size-1:
if ctx.explicit_nhwc:
btm_relu_halo = relu1[:,Hs-1:,:,:].clone()
btm_grad_out1 = grad_out1[:,Hs-1:,:,:]
else:
btm_relu_halo = relu1[:,:,Hs-1:,:].clone()
btm_grad_out1 = grad_out1[:,:,Hs-1:,:]
w1by3 = w[:,:1,:,:].clone()
ctx.stream2.wait_stream(ctx.stream1) # wait for halo transfers to finish
ctx.stream2.wait_stream(torch.cuda.current_stream()) # wait for backward_grad_out1_mask to finish before launching halo correction kernel
with torch.cuda.stream(ctx.stream2):
btm_grad_out1_halo = fast_bottleneck.backward_grad_out1_halo_corr(ctx.explicit_nhwc, ctx.stride_1x1, t_list, w1by3, grads, btm_halo, btm_relu_halo, btm_grad_out1.clone())
btm_grad_out1.copy_(btm_grad_out1_halo)
if ctx.spatial_group_rank > 0:
if ctx.explicit_nhwc:
top_relu_halo = relu1[:,:1,:,:].clone()
top_grad_out1 = grad_out1[:,:1,:,:]
else:
top_relu_halo = relu1[:,:,:1,:].clone()
top_grad_out1 = grad_out1[:,:,:1,:]
w1by3 = w[:,2:,:,:].clone()
ctx.stream1.wait_stream(torch.cuda.current_stream()) # wait for backward_grad_out1_mask to finish before launching halo correction kernel
with torch.cuda.stream(ctx.stream1):
top_grad_out1_halo = fast_bottleneck.backward_grad_out1_halo_corr(ctx.explicit_nhwc, ctx.stride_1x1, t_list, w1by3, grads, top_halo, top_relu_halo, top_grad_out1.clone())
top_grad_out1.copy_(top_grad_out1_halo)
if ctx.spatial_group_rank < ctx.spatial_group_size-1:
torch.cuda.current_stream().wait_stream(ctx.stream2) # wait for halo correction to finish
if ctx.spatial_group_rank > 0:
torch.cuda.current_stream().wait_stream(ctx.stream1)
wgrad1_stream = torch.cuda.Stream()
wgrad1_stream.wait_stream(torch.cuda.current_stream())
fast_bottleneck.backward_rest(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads, grad_out2, grad_out1)
with torch.cuda.stream(wgrad3_stream):
fast_bottleneck.backward_wgrad3(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads)
with torch.cuda.stream(wgrad2_stream):
if ctx.spatial_group_size > 1:
fast_bottleneck.backward_wgrad2_pad(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads, out1_pad, grad_out2)
else:
fast_bottleneck.backward_wgrad2(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads, grad_out2)
with torch.cuda.stream(wgrad1_stream):
fast_bottleneck.backward_wgrad1(ctx.explicit_nhwc, ctx.stride_1x1, t_list, grads, grad_out1)
torch.cuda.current_stream().wait_stream(wgrad3_stream)
torch.cuda.current_stream().wait_stream(wgrad2_stream)
torch.cuda.current_stream().wait_stream(wgrad1_stream)
return (None, None, None, None, None, None, None, None, None, None, None, None, *grads)
spatial_bottleneck_function = SpatialBottleneckFunction.apply
class SpatialBottleneck(torch.nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
# here we put it at 1x1
def __init__(self, in_channels, bottleneck_channels, out_channels, stride=1, groups=1,
dilation=1, norm_func=None, use_cudnn=False, explicit_nhwc=False,
spatial_parallel_args=None):
super(SpatialBottleneck, self).__init__()
if groups != 1:
raise RuntimeError('Only support groups == 1')
if dilation != 1:
raise RuntimeError('Only support dilation == 1')
if norm_func == None:
norm_func = FrozenBatchNorm2d
else:
raise RuntimeError('Only support frozen BN now.')
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
conv1x1(in_channels, out_channels, stride),
norm_func(out_channels),
)
else:
self.downsample = None
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(in_channels, bottleneck_channels, stride)
self.conv2 = conv3x3(bottleneck_channels, bottleneck_channels)
self.conv3 = conv1x1(bottleneck_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
self.bn1 = norm_func(bottleneck_channels)
self.bn2 = norm_func(bottleneck_channels)
self.bn3 = norm_func(out_channels)
self.w_scale = None
self.use_cudnn = use_cudnn
# setup conv weights
self.w_conv = [self.conv1.weight, self.conv2.weight, self.conv3.weight]
if self.downsample is not None:
self.w_conv.append(self.downsample[0].weight)
# init weight in nchw format before possible transpose
for w in self.w_conv:
kaiming_uniform_(w, a=1)
self.thresholdTop, self.thresholdBottom = None, None
# TODO: prevent unsupported case usage
# support cases
# native cudnn
# normal yes no
# channel_last yes yes
# explicit_nhwc no yes
self.explicit_nhwc = explicit_nhwc
if self.explicit_nhwc:
for p in self.parameters():
with torch.no_grad():
p.data = p.data.permute(0,2,3,1).contiguous()
# spatial communicator
if spatial_parallel_args is None:
self.spatial_parallel_args = (1, 0, None, None, 0, False)
else:
self.spatial_parallel_args = spatial_parallel_args
return
# Returns single callable that recomputes scale and bias for all frozen batch-norms.
# This method must be called before cuda graphing.
# The callable it returns can be called anytime.
# Calling this method will prevent these from being computed every forward call.
def get_scale_bias_callable(self):
self.w_scale, self.w_bias, args = [], [], []
batch_norms = [self.bn1, self.bn2, self.bn3]
if self.downsample is not None:
batch_norms.append(self.downsample[1])
for bn in batch_norms:
s = torch.empty_like(bn.weight)
b = torch.empty_like(s)
args.append( (bn.weight, bn.bias, bn.running_mean, bn.running_var, s, b) )
if self.explicit_nhwc:
self.w_scale.append( s.reshape(1, 1, 1, -1) )
self.w_bias.append( b.reshape(1, 1, 1, -1) )
else:
self.w_scale.append( s.reshape(1, -1, 1, 1) )
self.w_bias.append( b.reshape(1, -1, 1, 1) )
return func.partial(compute_scale_bias_method, self.explicit_nhwc, args)
def forward(self, x):
if self.use_cudnn:
if self.thresholdTop is None:
spatial_group_size, spatial_group_rank, _, _, _, _ = self.spatial_parallel_args
if self.explicit_nhwc:
N,H,W,C = list(x.shape)
else:
N,C,H,W = list(x.shape)
self.thresholdTop = torch.tensor([1 if spatial_group_rank > 0 else 0], dtype=torch.int32, device='cuda')
self.thresholdBottom = torch.tensor([H-2 if spatial_group_rank < spatial_group_size - 1 else H-1], dtype=torch.int32, device='cuda')
if self.w_scale is None:
# calculate scale/bias from registered buffers
# TODO: make this better
s1, b1 = self.bn1.get_scale_bias(self.explicit_nhwc)
s2, b2 = self.bn2.get_scale_bias(self.explicit_nhwc)
s3, b3 = self.bn3.get_scale_bias(self.explicit_nhwc)
w_scale = [s1, s2, s3]
w_bias = [b1, b2, b3]
if self.downsample is not None:
s4, b4 = self.downsample[1].get_scale_bias(self.explicit_nhwc)
w_scale.append(s4)
w_bias.append(b4)
out = spatial_bottleneck_function(*self.spatial_parallel_args, self.explicit_nhwc, self.stride, w_scale, w_bias, self.thresholdTop, self.thresholdBottom, x, *self.w_conv)
else:
out = spatial_bottleneck_function(*self.spatial_parallel_args, self.explicit_nhwc, self.stride, self.w_scale, self.w_bias, self.thresholdTop, self.thresholdBottom, x, *self.w_conv)
return out
if self.explicit_nhwc:
raise RuntimeError('explicit nhwc with native ops is not supported.')
# fallback to native ops
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
apex-master
|
apex/contrib/bottleneck/bottleneck.py
|
import pdb
import torch
from torch.autograd import gradcheck
from apex import check_cudnn_version_and_warn
import fused_conv_bias_relu
check_cudnn_version_and_warn(__name__, 8400)
class ConvBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
class ConvBiasMaskReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, mask, padding, stride):
outputs = fused_conv_bias_relu.forward_mask([x, weight, bias, mask], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None, None
class ConvBias_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward_no_relu([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight)
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward_no_relu(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
class ConvFrozenScaleBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, scale, bias, padding, stride):
output = fused_conv_bias_relu.forward_cscale_cbias_relu([x, weight, scale, bias], padding, stride)
ctx.save_for_backward(x, weight, scale, output)
ctx.padding = padding
ctx.stride = stride
return output
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward_cscale_cbias_relu(bwd_args, padding, stride)
return grads[0], grads[1], None, None, None, None
ConvBiasReLU = ConvBiasReLU_.apply
ConvBiasMaskReLU = ConvBiasMaskReLU_.apply
ConvBias = ConvBias_.apply
ConvFrozenScaleBiasReLU = ConvFrozenScaleBiasReLU_.apply
|
apex-master
|
apex/contrib/conv_bias_relu/conv_bias_relu.py
|
from .conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU, ConvFrozenScaleBiasReLU
|
apex-master
|
apex/contrib/conv_bias_relu/__init__.py
|
import torch
import fast_multihead_attn
class FastEncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
pad_mask,
dropout_prob,
):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = pad_mask is not None
(
input_lin_q_results,
input_lin_kv_results,
softmax_results,
dropout_results,
dropout_mask,
matmul2_results,
outputs,
) = fast_multihead_attn.encdec_multihead_attn_forward(
use_mask,
use_time_mask,
is_training,
heads,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
pad_mask if use_mask else null_tensor,
dropout_prob,
)
ctx.save_for_backward(
heads_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
heads_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
(
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
) = fast_multihead_attn.encdec_multihead_attn_backward(
heads_t[0],
output_grads,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t[0],
)
return (
None,
None,
None,
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
None,
None,
)
fast_encdec_attn_func = FastEncdecAttnFunc.apply
|
apex-master
|
apex/contrib/multihead_attn/fast_encdec_multihead_attn_func.py
|
import torch
import fast_multihead_attn
class MaskSoftmaxDropout(torch.autograd.Function):
@staticmethod
def forward(ctx, is_training, heads, inputs, pad_mask, mask_additive, dropout_prob):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = pad_mask is not None
use_mask_t = torch.tensor([use_mask])
mask_additive_t = torch.tensor([mask_additive])
if mask_additive:
dropout_results, dropout_mask, softmax_results = fast_multihead_attn.additive_mask_softmax_dropout_forward(
use_mask, is_training, heads, inputs, pad_mask if use_mask else null_tensor, dropout_prob
)
# fast_additive_mask_softmax_dropout.forward( \
else:
dropout_results, dropout_mask, softmax_results = fast_multihead_attn.mask_softmax_dropout_forward(
use_mask, is_training, heads, inputs, pad_mask if use_mask else null_tensor, dropout_prob
)
# fast_mask_softmax_dropout.forward( \
ctx.save_for_backward(
use_mask_t,
heads_t,
softmax_results,
dropout_mask,
pad_mask if use_mask else null_tensor,
mask_additive_t,
dropout_prob_t,
)
return dropout_results.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_mask_t,
heads_t,
softmax_results,
dropout_mask,
pad_mask,
mask_additive_t,
dropout_prob_t,
) = ctx.saved_tensors
if mask_additive_t[0]:
input_grads = fast_multihead_attn.additive_mask_softmax_dropout_backward(
use_mask_t[0], heads_t[0], output_grads, softmax_results, dropout_mask, dropout_prob_t[0]
)
# fast_additive_mask_softmax_dropout.backward( \
else:
input_grads = fast_multihead_attn.mask_softmax_dropout_backward(
use_mask_t[0], heads_t[0], output_grads, softmax_results, dropout_mask, pad_mask, dropout_prob_t[0]
)
# fast_mask_softmax_dropout.backward( \
return None, None, input_grads, None, None, None
fast_mask_softmax_dropout_func = MaskSoftmaxDropout.apply
|
apex-master
|
apex/contrib/multihead_attn/mask_softmax_dropout_func.py
|
import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
is_additive_mask,
dropout_prob,
):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
scale_t = torch.tensor([scale])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
if use_biases_t[0]:
input_lin_results = torch.addmm(
input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
input_lin_results = torch.mm(
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)), input_weights.transpose(0, 1)
)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul1_results = torch.empty(
(queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype, device=torch.device("cuda")
)
matmul1_results = torch.baddbmm(
matmul1_results,
queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results,
beta=0.0,
alpha=scale_t[0],
)
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert len(mask.size()) == 2, "Timing mask is not 2D!"
assert mask.size(0) == mask.size(1), "Sequence length should match!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float("-inf"))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
if is_additive_mask:
matmul1_results = matmul1_results + mask.unsqueeze(1).unsqueeze(2)
else:
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float("-inf"))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1.0 - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
matmul2_results = torch.empty(
(dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype,
device=torch.device("cuda"),
).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = (
matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1), inputs.size(2))
)
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
if use_biases_t[0]:
outputs = torch.addmm(
output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
outputs = torch.mm(
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)), output_weights.transpose(0, 1)
)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
ctx.save_for_backward(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t[0]
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Slice out q,k,v from one big set of gradients entering the input linear's bprop (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights
)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)),
)
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
if use_biases_t[0]:
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0
)
else:
output_bias_grads = None
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(
queries_grads.transpose(0, 1),
softmax_grads,
keys.transpose(0, 1),
out=queries_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
keys_grads = torch.baddbmm(
keys_grads.transpose(0, 1),
softmax_grads.transpose(1, 2),
queries.transpose(0, 1),
out=keys_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(
inputs.size(0) * inputs.size(1), heads_t[0] * 3 * head_dim
)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
input_weight_grads = torch.mm(
input_lin_results_grads.transpose(0, 1), inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2))
)
if use_biases_t[0]:
input_bias_grads = torch.sum(input_lin_results_grads, 0)
else:
input_bias_grads = None
return (
None,
None,
None,
None,
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
None,
None,
None,
)
self_attn_func = SelfAttnFunc.apply
|
apex-master
|
apex/contrib/multihead_attn/self_multihead_attn_func.py
|
from .self_multihead_attn import SelfMultiheadAttn
from .encdec_multihead_attn import EncdecMultiheadAttn
from .mask_softmax_dropout_func import fast_mask_softmax_dropout_func
|
apex-master
|
apex/contrib/multihead_attn/__init__.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_multihead_attn_func import self_attn_func
from .fast_self_multihead_attn_func import fast_self_attn_func
from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func
from apex.normalization.fused_layer_norm import FusedLayerNorm
@torch.jit.script
def jit_dropout_add(x, residual, prob, is_training):
# type: (Tensor, Tensor, float, bool) -> Tensor
out = F.dropout(x, p=prob, training=True)
out = residual + out
return out
class SelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=False,
include_norm_add=False,
impl="fast",
separate_qkv_params=False,
mask_additive=False,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = bias
self.include_norm_add = include_norm_add
self.impl = impl
self.scaling = self.head_dim ** -0.5
self.separate_qkv_params = separate_qkv_params
self.mask_additive = mask_additive
if mask_additive:
assert self.include_norm_add == False, "additive mask not supported with layer norm"
assert impl == "default" or (
impl == "fast" and bias
), "additive mask not supported for fast mode without bias"
if separate_qkv_params:
self.q_weight = Parameter(torch.empty(embed_dim, embed_dim))
self.k_weight = Parameter(torch.empty(embed_dim, embed_dim))
self.v_weight = Parameter(torch.empty(embed_dim, embed_dim))
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.empty(embed_dim, embed_dim))
if self.bias:
if separate_qkv_params:
self.q_bias = Parameter(torch.empty(embed_dim))
self.k_bias = Parameter(torch.empty(embed_dim))
self.v_bias = Parameter(torch.empty(embed_dim))
else:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
self.out_proj_bias = Parameter(torch.empty(embed_dim))
else:
if separate_qkv_params:
self.register_parameter("q_bias", None)
self.register_parameter("k_bias", None)
self.register_parameter("v_bias", None)
self.q_bias = None
self.k_bias = None
self.v_bias = None
else:
self.register_parameter("in_proj_bias", None)
self.in_proj_bias = None
self.register_parameter("out_proj_bias", None)
self.out_proj_bias = None
if self.include_norm_add:
if impl == "fast":
self.lyr_nrm_gamma_weights = Parameter(torch.empty(embed_dim))
self.lyr_nrm_beta_weights = Parameter(torch.empty(embed_dim))
self.lyr_nrm = None
else:
self.register_parameter("lyr_norm_gamma_weights", None)
self.register_parameter("lyr_norm_beta_weights", None)
self.lyr_nrm_gamma_weights = None
self.lyr_nrm_beta_weights = None
self.lyr_nrm = FusedLayerNorm(embed_dim)
self.reset_parameters()
if self.include_norm_add:
if impl == "fast":
self.attn_func = fast_self_attn_norm_add_func
elif impl == "default":
self.attn_func = self_attn_func
else:
assert False, "Unsupported impl: {} !".format(impl)
else:
if impl == "fast":
self.attn_func = fast_self_attn_func
elif impl == "default":
self.attn_func = self_attn_func
else:
assert False, "Unsupported impl: {} !".format(impl)
def reset_parameters(self):
if self.separate_qkv_params:
nn.init.xavier_uniform_(self.q_weight)
nn.init.xavier_uniform_(self.k_weight)
nn.init.xavier_uniform_(self.v_weight)
else:
# in_proj_weight has shape [3 * hidden, hidden] but it should be
# initialized like a [hidden, hidden] matrix.
# sqrt(6 / (hidden + hidden)) / sqrt(6 / (3 * hidden + hidden)) = sqrt(2)
# therefore xavier_uniform gain should be set to sqrt(2).
nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj_weight)
if self.bias:
if self.separate_qkv_params:
nn.init.constant_(self.q_bias, 0.0)
nn.init.constant_(self.k_bias, 0.0)
nn.init.constant_(self.v_bias, 0.0)
else:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj_bias, 0.0)
if self.include_norm_add:
if self.impl == "fast":
nn.init.ones_(self.lyr_nrm_gamma_weights)
nn.init.zeros_(self.lyr_nrm_beta_weights)
else:
self.lyr_nrm.reset_parameters()
def forward(self, query, key, value, key_padding_mask=None, need_weights=False, attn_mask=None, is_training=True):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
if self.separate_qkv_params:
input_weights = (
torch.cat(
[
self.q_weight.view(self.num_heads, 1, self.head_dim, self.embed_dim),
self.k_weight.view(self.num_heads, 1, self.head_dim, self.embed_dim),
self.v_weight.view(self.num_heads, 1, self.head_dim, self.embed_dim),
],
dim=1,
)
.reshape(3 * self.embed_dim, self.embed_dim)
.contiguous()
)
else:
input_weights = self.in_proj_weight
if self.bias:
if self.separate_qkv_params:
input_bias = (
torch.cat(
[
self.q_bias.view(self.num_heads, 1, self.head_dim),
self.k_bias.view(self.num_heads, 1, self.head_dim),
self.v_bias.view(self.num_heads, 1, self.head_dim),
],
dim=1,
)
.reshape(3 * self.embed_dim)
.contiguous()
)
else:
input_bias = self.in_proj_bias
else:
input_bias = None
if key_padding_mask is not None:
assert attn_mask is None, "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
elif attn_mask is not None:
assert self.mask_additive == False, "additive mask not supported for time mask"
mask = attn_mask
else:
mask = None
if self.include_norm_add:
if self.impl == "fast":
outputs = self.attn_func(
attn_mask is not None,
is_training,
self.num_heads,
query,
self.lyr_nrm_gamma_weights,
self.lyr_nrm_beta_weights,
input_weights,
self.out_proj_weight,
mask,
self.dropout,
)
else:
lyr_nrm_results = self.lyr_nrm(query)
outputs = self.attn_func(
attn_mask is not None,
is_training,
self.num_heads,
self.scaling,
lyr_nrm_results,
input_weights,
self.out_proj_weight,
input_bias,
self.out_proj_bias,
mask,
self.mask_additive,
self.dropout,
)
if is_training:
outputs = jit_dropout_add(outputs, query, self.dropout, is_training)
else:
outputs = outputs + query
else:
if self.impl == "fast":
outputs = self.attn_func(
attn_mask is not None,
is_training,
self.num_heads,
query,
input_weights,
self.out_proj_weight,
input_bias,
self.out_proj_bias,
mask,
self.mask_additive,
self.dropout,
)
else:
outputs = self.attn_func(
attn_mask is not None,
is_training,
self.num_heads,
self.scaling,
query,
input_weights,
self.out_proj_weight,
input_bias,
self.out_proj_bias,
mask,
self.mask_additive,
self.dropout,
)
return outputs, None
|
apex-master
|
apex/contrib/multihead_attn/self_multihead_attn.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import fast_multihead_attn
class FastEncdecAttnNormAddFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs_q,
inputs_kv,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights_q,
input_weights_kv,
output_weights,
pad_mask,
dropout_prob,
):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = pad_mask is not None
(
lyr_nrm_results,
lyr_nrm_mean,
lyr_nrm_invvar,
input_lin_q_results,
input_lin_kv_results,
softmax_results,
dropout_results,
dropout_mask,
matmul2_results,
dropout_add_mask,
outputs,
) = fast_multihead_attn.encdec_multihead_attn_norm_add_forward(
use_mask,
use_time_mask,
is_training,
heads,
inputs_q,
inputs_kv,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights_q,
input_weights_kv,
output_weights,
pad_mask if use_mask else null_tensor,
dropout_prob,
)
ctx.save_for_backward(
heads_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
lyr_nrm_results,
lyr_nrm_mean,
lyr_nrm_invvar,
inputs_q,
inputs_kv,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_add_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
heads_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
lyr_nrm_results,
lyr_nrm_mean,
lyr_nrm_invvar,
inputs_q,
inputs_kv,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_add_mask,
dropout_prob_t,
) = ctx.saved_tensors
(
input_q_grads,
input_kv_grads,
lyr_nrm_gamma_grads,
lyr_nrm_beta_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
) = fast_multihead_attn.encdec_multihead_attn_norm_add_backward(
heads_t[0],
output_grads,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
lyr_nrm_results,
lyr_nrm_mean,
lyr_nrm_invvar,
inputs_q,
inputs_kv,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_add_mask,
dropout_prob_t[0],
)
# import pdb; pdb.set_trace()
return (
None,
None,
None,
input_q_grads,
input_kv_grads,
lyr_nrm_gamma_grads,
lyr_nrm_beta_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
None,
None,
)
fast_encdec_attn_norm_add_func = FastEncdecAttnNormAddFunc.apply
|
apex-master
|
apex/contrib/multihead_attn/fast_encdec_multihead_attn_norm_add_func.py
|
import torch
import torch.nn.functional as F
class EncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
input_biases_q,
input_biases_kv,
output_biases,
mask,
dropout_prob,
):
use_biases_t = torch.tensor([input_biases_q is not None])
heads_t = torch.tensor([heads])
scale_t = torch.tensor([scale])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs_q.size(2) // heads
# Input Linear GEMM Q
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim (1024), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
if use_biases_t[0]:
input_lin_q_results = torch.addmm(
input_biases_q,
inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
input_weights_q.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
input_lin_q_results = torch.mm(
inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)), input_weights_q.transpose(0, 1)
)
input_lin_q_results = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1), input_weights_q.size(0))
# Input Linear GEMM KV
# input1: (activations) [seql_k, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*2 (2048), embed_dim (1024)] (transpose [0,1])
# output: [seql_k, seqs, embed_dim*2]
# GEMM: ( (seql_k*seqs) x embed_dim ) x ( embed_dim x embed_dim*2 ) = (seql_k*seqs x embed_dim*2)
if use_biases_t[0]:
input_lin_kv_results = torch.addmm(
input_biases_kv,
inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)),
input_weights_kv.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
input_lin_kv_results = torch.mm(
inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)),
input_weights_kv.transpose(0, 1),
)
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1), input_weights_kv.size(0))
# Slice out k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_kv_results: [seql_k, seqs, heads(16), 2, head_dim(64)]
# input_lin_kv_results: [seql_k, batches=seqs*heads, 2, head_dim]
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads, head_dim)
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads, 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul1_results = torch.empty(
(queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype, device=torch.device("cuda")
)
matmul1_results = torch.baddbmm(
matmul1_results,
queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results,
beta=0.0,
alpha=scale_t[0],
)
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert len(mask.size()) == 2, "Timing mask is not 2D!"
assert mask.size(0) == mask.size(1), "Sequence length should match!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float("-inf"))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float("-inf"))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1.0 - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
matmul2_results = torch.empty(
(dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype,
device=torch.device("cuda"),
).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = (
matmul2_results.transpose(0, 1).contiguous().view(inputs_q.size(0), inputs_q.size(1), inputs_q.size(2))
)
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
if use_biases_t[0]:
outputs = torch.addmm(
output_biases,
matmul2_results.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
output_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
outputs = torch.mm(
matmul2_results.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
output_weights.transpose(0, 1),
)
outputs = outputs.view(inputs_q.size(0), inputs_q.size(1), output_weights.size(0))
ctx.save_for_backward(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
head_dim = inputs_q.size(2) // heads_t[0]
# Slice out k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_kv_results: [seql_k, seqs, heads(16), 2, head_dim(64)]
# input_lin_kv_results: [seql_k, batches=seqs*heads, 2, head_dim]
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads_t[0], head_dim)
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads_t[0], 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
# Slice out k,v from one big set of gradients entering the input linear's bprop (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_kv_results_grads = torch.empty_like(input_lin_kv_results)
queries_grads = torch.empty_like(queries)
keys_grads = input_lin_kv_results_grads[:, :, 0, :]
values_grads = input_lin_kv_results_grads[:, :, 1, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights
)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)),
)
output_lin_grads = output_lin_grads.view(
output_grads.size(0), output_grads.size(1) * heads_t[0], head_dim
).transpose(0, 1)
if use_biases_t[0]:
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0
)
else:
output_bias_grads = None
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(
queries_grads.transpose(0, 1),
softmax_grads,
keys.transpose(0, 1),
out=queries_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
keys_grads = torch.baddbmm(
keys_grads.transpose(0, 1),
softmax_grads.transpose(1, 2),
queries.transpose(0, 1),
out=keys_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Input Q Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim (1024), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
queries_grads = queries_grads.transpose(0, 1).view(inputs_q.size(0) * inputs_q.size(1), heads_t[0] * head_dim)
input_q_grads = torch.mm(queries_grads, input_weights_q)
input_q_grads = input_q_grads.view(inputs_q.size(0), inputs_q.size(1), inputs_q.size(2))
# Input KV Linear GEMM - DGRAD
# input1: (data grads) [seql_k, seqs, 2*embed_dim(2048)]
# input2: (weights) [embed_dim*2 (2048), embed_dim (1024)]
# output: [seql_k, seqs, embed_dim]
# GEMM: ( (seql_k*seqs) x 2*embed_dim ) x ( 2*embed_dim x embed_dim ) = (seql_k*seqs x embed_dim)
input_lin_kv_results_grads = input_lin_kv_results_grads.view(
inputs_kv.size(0) * inputs_kv.size(1), heads_t[0] * 2 * head_dim
)
input_kv_grads = torch.mm(input_lin_kv_results_grads, input_weights_kv)
input_kv_grads = input_kv_grads.view(inputs_kv.size(0), inputs_kv.size(1), inputs_kv.size(2))
# Input Q Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, embed_dim(1024)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [embed_dim, embed_dim]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (embed_dim x embed_dim)
input_weight_q_grads = torch.mm(
queries_grads.transpose(0, 1), inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2))
)
# Input KV Linear GEMM - WGRAD
# input1: (data grads) [seql_k*seqs, 2*embed_dim(2048)]
# input2: (activations) [seql_k*seqs, embed_dim(1024)]
# output: [2*embed_dim, embed_dim]
# GEMM: ( 2*embed_dim x seql_k*seqs ) x ( seql_k*seqs x embed_dim ) = (2*embed_dim x embed_dim)
input_weight_kv_grads = torch.mm(
input_lin_kv_results_grads.transpose(0, 1),
inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)),
)
if use_biases_t[0]:
input_bias_grads_q = torch.sum(queries_grads, 0)
input_bias_grads_kv = torch.sum(input_lin_kv_results_grads, 0)
else:
input_bias_grads_q = None
input_bias_grads_kv = None
return (
None,
None,
None,
None,
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
input_bias_grads_q,
input_bias_grads_kv,
output_bias_grads,
None,
None,
)
encdec_attn_func = EncdecAttnFunc.apply
|
apex-master
|
apex/contrib/multihead_attn/encdec_multihead_attn_func.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_multihead_attn_func import encdec_attn_func
from .fast_encdec_multihead_attn_func import fast_encdec_attn_func
from .fast_encdec_multihead_attn_norm_add_func import fast_encdec_attn_norm_add_func
from apex.normalization.fused_layer_norm import FusedLayerNorm
@torch.jit.script
def jit_dropout_add(x, residual, prob, is_training):
# type: (Tensor, Tensor, float, bool) -> Tensor
out = F.dropout(x, p=prob, training=True)
out = residual + out
return out
class EncdecMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=False, include_norm_add=False, impl="fast"):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = bias
self.include_norm_add = include_norm_add
self.impl = impl
self.scaling = self.head_dim ** -0.5
self.in_proj_weight_q = Parameter(torch.empty(embed_dim, embed_dim))
self.in_proj_weight_kv = Parameter(torch.empty(2 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.empty(embed_dim, embed_dim))
if self.bias:
assert impl != "fast", "ERROR! The Fast implementation does not support biases!"
self.in_proj_bias_q = Parameter(torch.empty(embed_dim))
self.in_proj_bias_kv = Parameter(torch.empty(2 * embed_dim))
self.out_proj_bias = Parameter(torch.empty(embed_dim))
else:
self.register_parameter("in_proj_bias_q", None)
self.register_parameter("in_proj_bias_kv", None)
self.in_proj_bias_q = None
self.in_proj_bias_kv = None
self.out_proj_bias = None
if self.include_norm_add:
if impl == "fast":
self.lyr_nrm_gamma_weights = Parameter(torch.empty(embed_dim))
self.lyr_nrm_beta_weights = Parameter(torch.empty(embed_dim))
self.lyr_nrm = None
else:
self.register_parameter("lyr_norm_gamma_weights", None)
self.register_parameter("lyr_norm_beta_weights", None)
self.lyr_nrm_gamma_weights = None
self.lyr_nrm_beta_weights = None
self.lyr_nrm = FusedLayerNorm(embed_dim)
self.reset_parameters()
if self.include_norm_add:
if impl == "fast":
self.attn_func = fast_encdec_attn_norm_add_func
elif impl == "default":
self.attn_func = encdec_attn_func
else:
assert False, "Unsupported impl: {} !".format(impl)
else:
if impl == "fast":
self.attn_func = fast_encdec_attn_func
elif impl == "default":
self.attn_func = encdec_attn_func
else:
assert False, "Unsupported impl: {} !".format(impl)
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight_q)
# in_proj_weight_kv has shape [2 * hidden, hidden] but it should be
# initialized like a [hidden, hidden] matrix.
# sqrt(6 / (hidden + hidden)) / sqrt(6 / (2 * hidden + hidden)) = sqrt(1.5)
# therefore xavier_uniform gain should be set to sqrt(1.5).
nn.init.xavier_uniform_(self.in_proj_weight_kv, gain=math.sqrt(1.5))
nn.init.xavier_uniform_(self.out_proj_weight)
if self.bias:
nn.init.constant_(self.in_proj_bias_q, 0.0)
nn.init.constant_(self.in_proj_bias_kv, 0.0)
nn.init.constant_(self.out_proj_bias, 0.0)
if self.include_norm_add:
if self.impl == "fast":
nn.init.ones_(self.lyr_nrm_gamma_weights)
nn.init.zeros_(self.lyr_nrm_beta_weights)
else:
self.lyr_nrm.reset_parameters()
def forward(self, query, key, value, key_padding_mask=None, need_weights=False, attn_mask=None, is_training=True):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
if key_padding_mask is not None:
assert attn_mask is None, "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
elif attn_mask is not None:
mask = attn_mask
else:
mask = None
if self.include_norm_add:
if self.impl == "fast":
outputs = self.attn_func(
attn_mask is not None,
is_training,
self.num_heads,
query,
key,
self.lyr_nrm_gamma_weights,
self.lyr_nrm_beta_weights,
self.in_proj_weight_q,
self.in_proj_weight_kv,
self.out_proj_weight,
mask,
self.dropout,
)
else:
lyr_nrm_results = self.lyr_nrm(query)
outputs = self.attn_func(
attn_mask is not None,
is_training,
self.num_heads,
self.scaling,
lyr_nrm_results,
key,
self.in_proj_weight_q,
self.in_proj_weight_kv,
self.out_proj_weight,
self.in_proj_bias_q,
self.in_proj_bias_kv,
self.out_proj_bias,
mask,
self.dropout,
)
if is_training:
outputs = jit_dropout_add(outputs, query, self.dropout, is_training)
else:
outputs = outputs + query
else:
if self.impl == "fast":
outputs = self.attn_func(
attn_mask is not None,
is_training,
self.num_heads,
query,
key,
self.in_proj_weight_q,
self.in_proj_weight_kv,
self.out_proj_weight,
mask,
self.dropout,
)
else:
outputs = self.attn_func(
attn_mask is not None,
is_training,
self.num_heads,
self.scaling,
query,
key,
self.in_proj_weight_q,
self.in_proj_weight_kv,
self.out_proj_weight,
self.in_proj_bias_q,
self.in_proj_bias_kv,
self.out_proj_bias,
mask,
self.dropout,
)
return outputs, None
|
apex-master
|
apex/contrib/multihead_attn/encdec_multihead_attn.py
|
import torch
import fast_multihead_attn
class FastSelfAttnNormAddFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights,
output_weights,
pad_mask,
dropout_prob,
):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = pad_mask is not None
(
lyr_nrm_results,
lyr_nrm_mean,
lyr_nrm_invvar,
input_lin_results,
softmax_results,
dropout_results,
dropout_mask,
matmul2_results,
dropout_add_mask,
outputs,
) = fast_multihead_attn.self_attn_norm_add_forward(
use_mask,
use_time_mask,
is_training,
heads,
inputs,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights,
output_weights,
pad_mask if use_mask else null_tensor,
dropout_prob,
)
# fast_self_multihead_attn_norm_add.forward( \
ctx.save_for_backward(
heads_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
lyr_nrm_results,
lyr_nrm_mean,
lyr_nrm_invvar,
inputs,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights,
output_weights,
dropout_mask,
dropout_add_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
heads_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
lyr_nrm_results,
lyr_nrm_mean,
lyr_nrm_invvar,
inputs,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights,
output_weights,
dropout_mask,
dropout_add_mask,
dropout_prob_t,
) = ctx.saved_tensors
(
input_grads,
lyr_nrm_gamma_grads,
lyr_nrm_beta_grads,
input_weight_grads,
output_weight_grads,
) = fast_multihead_attn.self_attn_norm_add_backward(
heads_t[0],
output_grads,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
lyr_nrm_results,
lyr_nrm_mean,
lyr_nrm_invvar,
inputs,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights,
output_weights,
dropout_mask,
dropout_add_mask,
dropout_prob_t[0],
)
# fast_self_multihead_attn_norm_add.backward( \
return (
None,
None,
None,
input_grads,
lyr_nrm_gamma_grads,
lyr_nrm_beta_grads,
input_weight_grads,
output_weight_grads,
None,
None,
)
fast_self_attn_norm_add_func = FastSelfAttnNormAddFunc.apply
|
apex-master
|
apex/contrib/multihead_attn/fast_self_multihead_attn_norm_add_func.py
|
import torch
import fast_multihead_attn
class FastSelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
pad_mask,
mask_additive,
dropout_prob,
):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = pad_mask is not None
mask_additive_t = torch.tensor([mask_additive])
if use_biases_t[0]:
if not mask_additive:
(
input_lin_results,
softmax_results,
dropout_results,
dropout_mask,
matmul2_results,
outputs,
) = fast_multihead_attn.self_attn_bias_forward(
use_mask,
use_time_mask,
is_training,
heads,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
pad_mask if use_mask else null_tensor,
dropout_prob,
)
# fast_self_multihead_attn_bias.forward() \
ctx.save_for_backward(
use_biases_t,
heads_t,
matmul2_results,
dropout_results,
softmax_results,
null_tensor,
null_tensor,
mask_additive_t,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
else:
(
input_lin_results,
bmm1_results,
dropout_results,
dropout_mask,
matmul2_results,
outputs,
) = fast_multihead_attn.self_attn_bias_additive_mask_forward(
use_mask,
use_time_mask,
is_training,
heads,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
pad_mask if use_mask else null_tensor,
dropout_prob,
)
# fast_self_multihead_attn_bias_additive_mask.forward( \
ctx.save_for_backward(
use_biases_t,
heads_t,
matmul2_results,
dropout_results,
null_tensor,
bmm1_results,
pad_mask,
mask_additive_t,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
else:
(
input_lin_results,
softmax_results,
dropout_results,
dropout_mask,
matmul2_results,
outputs,
) = fast_multihead_attn.self_attn_forward(
use_mask,
use_time_mask,
is_training,
heads,
inputs,
input_weights,
output_weights,
pad_mask if use_mask else null_tensor,
dropout_prob,
)
# fast_self_multihead_attn.forward( \
ctx.save_for_backward(
use_biases_t,
heads_t,
matmul2_results,
dropout_results,
softmax_results,
null_tensor,
null_tensor,
mask_additive_t,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_biases_t,
heads_t,
matmul2_results,
dropout_results,
softmax_results,
bmm1_results,
pad_mask,
mask_additive_t,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
if use_biases_t[0]:
if not mask_additive_t[0]:
(
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
) = fast_multihead_attn.self_attn_bias_backward(
heads_t[0],
output_grads,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t[0],
)
# fast_self_multihead_attn_bias.backward( \
else:
(
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
) = fast_multihead_attn.self_attn_bias_additive_mask_backward(
heads_t[0],
output_grads,
matmul2_results,
dropout_results,
bmm1_results,
pad_mask,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t[0],
)
# fast_self_multihead_attn_bias_additive_mask.backward( \
else:
input_bias_grads = None
output_bias_grads = None
input_grads, input_weight_grads, output_weight_grads = fast_multihead_attn.self_attn_backward(
heads_t[0],
output_grads,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t[0],
)
# fast_self_multihead_attn.backward( \
return (
None,
None,
None,
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
None,
None,
None,
)
fast_self_attn_func = FastSelfAttnFunc.apply
|
apex-master
|
apex/contrib/multihead_attn/fast_self_multihead_attn_func.py
|
import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=int, help='Sequence Length of Input')
parser.add_argument('--num-seqs-start', default=10, type=int, help='Start Range of Number of Sequences')
parser.add_argument('--num-seqs-stop', default=120, type=int, help='Stop Range of Number of Sequences')
parser.add_argument('--num-seqs-inc', default=5, type=int, help='Range Increment of Number of Sequences')
parser.add_argument('--trials', default=20, type=int, help='Number of Trials to Execute')
parser.add_argument('--warmup-trials', default=5, type=int, help='Warmup Trials to discard')
parser.add_argument('--layers', default=18, type=int, help='Attention Layers to Execute to Gain CPU/GPU Time Overlap')
parser.add_argument('--hidden-dim', default=1024, type=int, help='Multihead Attention hidden dimension')
parser.add_argument('--heads', default=16, type=int, help='Number of Multihead Attention heads')
parser.add_argument('--encdec-attn', action='store_true', help='Use Encoder-Decoder Attention instead of Self Attention.')
parser.add_argument('--norm-add', action='store_true', help='Include Layer Norm and Dropout-Add in Multihead Attention block.')
parser.add_argument('--ref', action='store_true', help='Reference implementation in python pytorch.')
parser.add_argument('--native', action='store_true', help='torch.nn.MultitheadAttention Version.')
parser.add_argument('--fwd', action='store_true', help='Only execute Fwd Pass.')
parser.add_argument('--biases', action='store_true', help='Execute multihead attention with Linear Biases.')
args = parser.parse_args()
if not torch.cuda.is_available():
raise NotImplementedError('Running on CPU is not supported')
torch.cuda.set_device(0)
torch.manual_seed(111)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(111)
attn_layers = []
for idx in range(0, args.layers) :
if args.encdec_attn :
if args.ref :
attn_layers.append(EncdecMultiheadAttn(args.hidden_dim, args.heads, dropout=0.1, bias=args.biases, include_norm_add=False, impl='default'))
else :
attn_layers.append(EncdecMultiheadAttn(args.hidden_dim, args.heads, dropout=0.1, bias=args.biases, include_norm_add=args.norm_add, impl='fast'))
else :
if args.native :
attn_layers.append(torch.nn.MultiheadAttention(args.hidden_dim, args.heads, dropout=0.1, bias=args.biases))
elif args.ref :
attn_layers.append(SelfMultiheadAttn(args.hidden_dim, args.heads, dropout=0.1, bias=args.biases, include_norm_add=args.norm_add, impl='default'))
else :
attn_layers.append(SelfMultiheadAttn(args.hidden_dim, args.heads, dropout=0.1, bias=args.biases, include_norm_add=args.norm_add, impl='fast'))
attn_layers[idx].cuda()
attn_layers[idx].half()
if not args.native :
attn_layers[idx].reset_parameters()
start_evt_fwd = []
start_evt_bwd = []
stop_evt_bwd = []
for recorded_trial in range(0, args.trials) :
start_evt_fwd.append(torch.cuda.Event(enable_timing=True))
start_evt_bwd.append(torch.cuda.Event(enable_timing=True))
stop_evt_bwd.append(torch.cuda.Event(enable_timing=True))
for sequences in range(args.num_seqs_start, args.num_seqs_stop + args.num_seqs_inc, args.num_seqs_inc) :
inputs = torch.randn(args.seq_length, sequences, args.hidden_dim, dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
grads = torch.randn_like(inputs)
for trial in range(0, args.trials + args.warmup_trials) :
layer_inputs = inputs
evt_idx = trial - args.warmup_trials
if evt_idx >= 0 :
start_evt_fwd[evt_idx].record()
for lyr_idx in range(0, args.layers) :
if args.native :
outputs,_ = attn_layers[lyr_idx].forward(layer_inputs,
layer_inputs,
layer_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None)
else :
outputs,_ = attn_layers[lyr_idx].forward(layer_inputs,
layer_inputs,
layer_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True)
layer_inputs = outputs
if evt_idx >= 0 :
start_evt_bwd[evt_idx].record()
if not args.fwd :
layer_inputs.backward(grads)
if evt_idx >= 0 :
stop_evt_bwd[evt_idx].record()
torch.cuda.synchronize()
elapsed_time_fwd = 0.0
elapsed_time_bwd = 0.0
for evt_idx in range(0, args.trials) :
elapsed_time_fwd += start_evt_fwd[evt_idx].elapsed_time(start_evt_bwd[evt_idx])
elapsed_time_bwd += start_evt_bwd[evt_idx].elapsed_time(stop_evt_bwd[evt_idx])
print("[ {} Attn {} ]Total Tokens: {:4d} Sequences: {:3d} Sequence Length: {:3d} Fwd Time / Layer: {:.3f} ms Bwd Time / Layer: {:.3f} ms".format(
'Encdec' if args.encdec_attn else 'Self', \
'Norm&Add' if args.norm_add else '', \
sequences*args.seq_length, \
sequences, \
args.seq_length, \
elapsed_time_fwd / ( args.trials * args.layers ), \
elapsed_time_bwd / ( args.trials * args.layers )))
|
apex-master
|
apex/contrib/examples/multihead_attn/perf_test_multihead_attn.py
|
import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=int, help='Sequence Length of Input')
parser.add_argument('--num-seqs-start', default=5, type=int, help='Start Range of Number of Sequences')
parser.add_argument('--num-seqs-stop', default=80, type=int, help='Stop Range of Number of Sequences')
parser.add_argument('--num-seqs-inc', default=5, type=int, help='Range Increment of Number of Sequences')
parser.add_argument('--trials', default=20, type=int, help='Number of Trials to Execute')
parser.add_argument('--warmup-trials', default=5, type=int, help='Warmup Trials to discard')
parser.add_argument('--layers', default=18, type=int, help='Attention Layers to Execute to Gain CPU/GPU Time Overlap')
parser.add_argument('--seed-start', default=1, type=int, help='Attention Layers to Execute to Gain CPU/GPU Time Overlap')
parser.add_argument('--seed-end', default=100, type=int, help='Attention Layers to Execute to Gain CPU/GPU Time Overlap')
parser.add_argument('--hidden-dim', default=1024, type=int, help='Multihead Attention hidden dimension')
parser.add_argument('--heads', default=16, type=int, help='Number of Multihead Attention heads')
parser.add_argument('--encdec-attn', action='store_true', help='Use Encoder-Decoder Attention instead of Self Attention.')
parser.add_argument('--norm-add', action='store_true', help='Include Layer Norm and Dropout-Add in Multihead Attention block.')
parser.add_argument('--ref', action='store_true', help='Reference implementation in python pytorch.')
parser.add_argument('--native', action='store_true', help='torch.nn.MultitheadAttention Version.')
parser.add_argument('--fwd', action='store_true', help='Only execute Fwd Pass.')
parser.add_argument('--eval', action='store_true', help='Inference only, no backward pass.')
args = parser.parse_args()
assert args.seq_length % 64 == 0, "Sequence Length should be a multiple of 64!"
if not torch.cuda.is_available():
raise NotImplementedError('Running on CPU is not supported')
torch.cuda.set_device(0)
dropout_prob = 0.1
for seed in range(args.seed_start, args.seed_end+1) :
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
ref_layer = None
if args.encdec_attn :
ref_layer = EncdecMultiheadAttn(args.hidden_dim, args.heads, dropout=dropout_prob, bias=False, include_norm_add=args.norm_add, impl='default')
else :
ref_layer = SelfMultiheadAttn(args.hidden_dim, args.heads, dropout=dropout_prob, bias=False, include_norm_add=args.norm_add, impl='default')
ref_layer.cuda()
ref_layer.half()
ref_layer.reset_parameters()
ref_inputs = torch.randn(args.seq_length, args.num_seqs_start, args.hidden_dim, dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
ref_inputs_kv = None
if args.encdec_attn :
ref_inputs_kv = torch.randn(args.seq_length, args.num_seqs_start, args.hidden_dim, dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
ref_grads = torch.randn_like(ref_inputs)
ref_outputs,_ = ref_layer.forward(ref_inputs,
ref_inputs_kv,
ref_inputs_kv,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=(not args.eval))
ref_outputs.backward(ref_grads)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
tst_layer = None
if args.encdec_attn :
tst_layer = EncdecMultiheadAttn(args.hidden_dim, args.heads, dropout=dropout_prob, bias=False, include_norm_add=args.norm_add, impl='fast')
else:
tst_layer = SelfMultiheadAttn(args.hidden_dim, args.heads, dropout=dropout_prob, bias=False, include_norm_add=args.norm_add, impl='fast')
tst_layer.cuda()
tst_layer.half()
tst_layer.reset_parameters()
tst_inputs = torch.randn(args.seq_length, args.num_seqs_start, args.hidden_dim, dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
tst_inputs_kv = None
if args.encdec_attn :
tst_inputs_kv = torch.randn(args.seq_length, args.num_seqs_start, args.hidden_dim, dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
assert torch.equal(ref_inputs,tst_inputs), "ERROR: Inputs are different!"
tst_grads = torch.randn_like(tst_inputs)
tst_outputs,_ = tst_layer.forward(tst_inputs,
tst_inputs_kv,
tst_inputs_kv,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=(not args.eval))
tst_outputs.backward(tst_grads)
fwd_close = torch.equal(ref_outputs, tst_outputs)
bwd_close = torch.equal(ref_inputs.grad, tst_inputs.grad)
diff_fwd = ref_outputs - tst_outputs
diff_cnt_fwd = diff_fwd.ne(0.0).sum()
diff_accum_fwd = diff_fwd.abs().sum()
diff_bwd = ref_inputs.grad - tst_inputs.grad
diff_cnt_bwd = diff_bwd.ne(0.0).sum()
diff_accum_bwd = diff_bwd.abs().sum()
print(">>> Seed: ", seed, fwd_close, diff_cnt_fwd.item(), diff_accum_fwd.item(), bwd_close, diff_cnt_bwd.item(), diff_accum_bwd.item())
|
apex-master
|
apex/contrib/examples/multihead_attn/func_test_multihead_attn.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are not permit-
# ted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn.functional as F
import torch.nn.init as init
import group_norm_cuda
from torch import Tensor
from torch.nn.parameter import Parameter
from torch._dynamo import disable
from functools import partial
__all__ = ['GroupNorm']
# pytorch group norm requires same input type
def torch_group_norm(x, g, w, b, eps, act=""):
xdtype, wdtype = x.dtype, w.dtype
if xdtype != wdtype:
x = x.to(dtype=wdtype)
y = torch.nn.functional.group_norm(x, g, w, b, eps)
if act in ["silu", "swish"]:
y = torch.nn.functional.silu(y)
if xdtype != wdtype and y.dtype != xdtype:
y = y.to(dtype=xdtype)
return y
class GroupNormNHWC(torch.autograd.Function):
@staticmethod
@disable # This shouldn't be captured by TorchDynamo
def forward(ctx,
x,
G,
weight,
bias,
eps,
act="",
algo=group_norm_cuda.OnePass):
# sanity check
act = act.lower()
assert x.is_contiguous(memory_format=torch.channels_last), \
"Only support NHWC layout."
assert weight.numel() == x.shape[1], "Unexpected parameter count."
assert bias.numel() == x.shape[1], "Unexpected parameter count."
assert x.shape[1] % G == 0, "C % G != 0."
assert act in ["", "silu", "swish"], "Unsupported activation."
with_swish = (act in ["silu", "swish"])
# enqueue fprop kernel
y, sums = group_norm_cuda.forward(x, G, weight, bias, eps, algo,
with_swish)
# save for backward
ctx.save_for_backward(x, weight, bias, sums)
ctx.G = G
ctx.eps = eps
ctx.algo = algo
ctx.with_swish = with_swish
return y
@staticmethod
def backward(ctx, dy):
# sanity check
assert dy.is_contiguous(memory_format=torch.channels_last), \
"Only support NHWC layout."
# retrive saved info
x, w, b, sums = ctx.saved_tensors
G = ctx.G
eps = ctx.eps
algo = ctx.algo
with_swish = ctx.with_swish
# enqueue bprop kernel
dx, dw, db = group_norm_cuda.backward(dy, sums, x, G, w, b, eps, algo,
with_swish)
return dx, None, dw, db, None, None, None
class GroupNormOnePass(GroupNormNHWC):
@staticmethod
@disable
def forward(ctx, x, G, weight, bias, eps, act=""):
return super(GroupNormOnePass,
GroupNormOnePass).forward(ctx, x, G, weight, bias, eps,
act, group_norm_cuda.OnePass)
class GroupNormTwoPass(GroupNormNHWC):
@staticmethod
@disable
def forward(ctx, x, G, weight, bias, eps, act=""):
return super(GroupNormTwoPass,
GroupNormTwoPass).forward(ctx, x, G, weight, bias, eps,
act, group_norm_cuda.TwoPass)
cuda_group_norm_nhwc = GroupNormNHWC.apply
cuda_group_norm_nhwc_one_pass = GroupNormOnePass.apply
cuda_group_norm_nhwc_two_pass = GroupNormTwoPass.apply
# We do not direct inherit from torch.nn.GroupNorm since several fusers don't
# support inheritance. Extends:
# https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/normalization.py
class GroupNorm(torch.nn.Module):
"""Optimized GroupNorm for NHWC layout with optional Swish/SiLU fusion.
There are two version of CUDA kernels under the hood: one pass and two
passes. This operator contains a simple heuristic to choose algorithm.
Limitations:
* Designed for 32 groups, also tested with 16 groups, some other number
of groups can also work but not guaranteed;
* Supported number of channels C are:
128, 256, 320, 448, 512, 640, 768, 896, 960, 1024, 1280, 1344, 1536,
1792, 1920, 2048, 2240, 2560, 2688, 3072, 3136, 3584, 4096.
One pass algorithm supports only channels mentioned above. Two pass
algorithm might automatically support some other channels as well.
* N/H/W do not have lower (except >0) and upper bound limitations;
All the unsupported cases will be forwarded to PyTorch implementation.
"""
__constants__ = [
'num_groups', 'num_channels', 'eps', 'affine', 'act',
'SUPPORTED_CHANNELS', 'SUPPORTED_GROUPS'
]
num_groups: int
num_channels: int
eps: float
affine: bool
act: str
SUPPORTED_CHANNELS = {
128,
256,
320,
448,
512,
640,
768,
896,
960,
1024,
1280,
1344,
1536,
1792,
1920,
2048,
2240,
2560,
2688,
3072,
3136,
3584,
4096,
}
SUPPORTED_GROUPS = {16, 32}
def __init__(self,
num_groups: int,
num_channels: int,
eps: float = 1e-5,
affine: bool = True,
device=None,
dtype=None,
act="") -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if num_channels % num_groups != 0:
raise ValueError('num_channels must be divisible by num_groups')
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
self.act = act.lower()
if self.affine:
self.weight = Parameter(torch.empty(num_channels,
**factory_kwargs))
self.bias = Parameter(torch.empty(num_channels, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
sm = torch.cuda.get_device_capability(device)
self.sm = sm[0] * 10 + sm[1]
def reset_parameters(self) -> None:
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def _check_legality(self, input: Tensor) -> bool:
is_nhwc = input.is_contiguous(memory_format=torch.channels_last)
is_legal_groups = self.num_groups in self.SUPPORTED_GROUPS
is_legal_channels = self.num_channels in self.SUPPORTED_CHANNELS
is_half_or_float_or_bf16 = input.dtype in [
torch.float16, torch.bfloat16, torch.float32
]
is_legal_act = self.act in ['', 'silu', 'swish']
if is_nhwc and is_half_or_float_or_bf16 and is_legal_act and \
self.affine and is_legal_groups and is_legal_channels:
return True
else:
return False
def forward(self, input: Tensor) -> Tensor:
can_use_nhwc_group_norm = self._check_legality(input)
if can_use_nhwc_group_norm:
channels = input.shape[1]
hw = 1
for i in range(2, len(input.shape)):
hw *= input.shape[i]
max_hw_one_pass = 1024 if self.sm >= 80 else 256
if (hw >= 512 and channels
in (3136, 3584, 4096)) or hw > max_hw_one_pass:
algo = group_norm_cuda.TwoPass
else:
algo = group_norm_cuda.OnePass
return cuda_group_norm_nhwc(input, self.num_groups, self.weight,
self.bias, self.eps, self.act, algo)
else:
return torch_group_norm(input, self.num_groups, self.weight,
self.bias, self.eps, self.act)
def extra_repr(self) -> str:
if self.act:
return '{num_groups}, {num_channels}, eps={eps}, ' \
'affine={affine}, act={act}'.format(**self.__dict__)
else:
return '{num_groups}, {num_channels}, eps={eps}, ' \
'affine={affine}'.format(**self.__dict__)
|
apex-master
|
apex/contrib/group_norm/group_norm.py
|
from .group_norm import *
|
apex-master
|
apex/contrib/group_norm/__init__.py
|
from .clip_grad import clip_grad_norm_
|
apex-master
|
apex/contrib/clip_grad/__init__.py
|
from typing import Union, Iterable
import torch
_kernel_import_succeeded = False
try:
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
_kernel_import_succeeded = True
except ImportError:
_kernel_import_succeeded = False
_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]]
def clip_grad_norm_(
parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0,
error_if_nonfinite: bool = False) -> torch.Tensor:
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
This is identical to torch.nn.utils.clip_grad_norm_, except it
uses a fused CUDA kernel when computing the 2-norm of GPU tensors
in float32 and float16.
Args:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
error_if_nonfinite (bool): if True, an error is thrown if the total
norm of the gradients from :attr:`parameters` is ``nan``,
``inf``, or ``-inf``. Default: False (will switch to True in the future)
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
max_norm = float(max_norm)
norm_type = float(norm_type)
# Trivial case
if len(parameters) == 0:
return torch.tensor(0.)
# Fallback implementation
if not (_kernel_import_succeeded
and norm_type == 2.0
and any(p.is_cuda for p in parameters)):
return torch.nn.utils.clip_grad_norm_(
parameters,
max_norm,
norm_type=norm_type,
error_if_nonfinite = error_if_nonfinite,
)
# Find fp32 and fp16 gradients on GPU
device = next(p.device for p in parameters if p.is_cuda)
grads_fp32, grads_fp16, grads_misc = [], [], []
for p in parameters:
grad = p.grad.detach()
if p.dtype == torch.float32 and p.device == device:
grads_fp32.append(grad)
elif p.dtype == torch.float16 and p.device == device:
grads_fp16.append(grad)
else:
grads_misc.append(grad)
# Compute gradient L2 norms
norms = []
dummy_overflow_buf = torch.zeros([1], dtype=torch.int32, device=device)
if grads_fp32:
norms.append(
multi_tensor_applier(
amp_C.multi_tensor_l2norm,
dummy_overflow_buf,
[grads_fp32],
False,
)[0]
)
if grads_fp16:
norms.append(
multi_tensor_applier(
amp_C.multi_tensor_l2norm,
dummy_overflow_buf,
[grads_fp16],
False,
)[0],
)
for g in grads_misc:
norms.append(torch.linalg.norm(g).unsqueeze(0).to(device))
total_norm = torch.linalg.norm(torch.cat(norms))
# Check for non-finite values
if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()):
raise RuntimeError(
f'The total norm of order {norm_type} for gradients from '
'`parameters` is non-finite, so it cannot be clipped. To disable '
'this error and scale the gradients by the non-finite norm anyway, '
'set `error_if_nonfinite=False`')
# Scale gradients
clip_coef = max_norm / (total_norm + 1e-6)
clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
if grads_fp32:
multi_tensor_applier(
amp_C.multi_tensor_scale,
dummy_overflow_buf,
[grads_fp32, grads_fp32],
clip_coef_clamped,
)
if grads_fp16:
multi_tensor_applier(
amp_C.multi_tensor_scale,
dummy_overflow_buf,
[grads_fp16, grads_fp16],
clip_coef_clamped,
)
for g in grads_misc:
g.mul_(clip_coef_clamped.to(g.device))
return total_norm
|
apex-master
|
apex/contrib/clip_grad/clip_grad.py
|
import torch
import transducer_loss_cuda
import transducer_joint_cuda
class TransducerJoint(torch.nn.Module):
"""Transducer joint
Detail of this loss function can be found in: Sequence Transduction with Recurrent Neural
Networks
Arguments:
pack_output (bool, optional): whether to pack the output in a compact form with don't-care
data being removed. (default: False)
relu (bool, optional): apply ReLU to the output of the joint operation. Requires opt=1
(default: False)
dropout (bool, optional): apply dropout to the output of the joint operation. Requires opt=1
(default: False)
opt (int, optional): pick the optimization level in [0, 1]. opt=1 picks a tiled algorithm.
(default: 1)
fwd_tile_size (int, optional): tile size used in forward operation. This argument will be
ignored if opt != 1. (default: 4)
dropout_prob (float, optional): dropout probability. (default: 0.0)
probe_mask (bool, optional): a flag used to probe the mask generated by ReLU and/or dropout
operation. When this argument is set to True, the mask can be accessed through
self.mask_probe. (default: false)
"""
def __init__(self, pack_output=False, relu=False, dropout=False, opt=1, fwd_tile_size=4,
dropout_prob=0, probe_mask=False):
super(TransducerJoint, self).__init__()
self.pack_output = pack_output
self.relu = relu
self.dropout = dropout
self.dropout_prob = dropout_prob
self.opt = opt
self.fwd_tile_size = fwd_tile_size
self.dummy_batch_offset = torch.empty(0)
masked = self.relu or self.dropout
self.mask_probe = [] if masked and probe_mask else None
if masked and opt != 1:
raise NotImplementedError("ReLU and dropout fusion is only supported with opt=1")
def forward(self, f, g, f_len, g_len, batch_offset=None, packed_batch=0):
"""Forward operation of transducer joint
Arguments:
f (tensor): transcription vector from encode block of shape (B, T, H).
g (tensor): prediction vector form predict block of shape (B, U, H).
f_len (tensor): length of transcription vector for each batch.
g_len (tensor): length of prediction vector minus 1 for each batch.
batch_offset (tensor, optional): tensor containing the offset of each batch
in the results. For example, batch offset can be obtained from:
batch_offset = torch.cumsum(f_len*g_len, dim=0)
This argument is required if pack_output == True, and is ignored if
pack_output == False. (default: None)
packed_batch (int, optional): the batch size after packing. This argument is
ignored if pack_output == False. (default: 0)
"""
my_batch_offset = batch_offset if self.pack_output else self.dummy_batch_offset
if self.pack_output and (batch_offset is None or packed_batch == 0):
raise Exception("Please specify batch_offset and packed_batch when packing is enabled")
dropout = self.dropout and self.training # only dropout for training
return TransducerJointFunc.apply(f, g, f_len, g_len, self.pack_output, self.relu, dropout,
my_batch_offset, packed_batch, self.opt,
self.fwd_tile_size, self.dropout_prob, self.mask_probe)
class TransducerLoss(torch.nn.Module):
"""Transducer loss
Detail of this loss function can be found in: Sequence Transduction with Recurrent Neural
Networks
Arguments:
fuse_softmax_backward (bool, optional) whether to fuse the backward of transducer loss with
softmax. (default: True)
opt (int, optional): pick the optimization level in [0, 1]. opt=1 picks a more optimized
algorithm. In some cases, opt=1 might fall back to opt=0. (default: 1)
packed_input (bool, optional): whether to pack the output in a compact form with don't-care
data being removed. (default: False)
"""
def __init__(self, fuse_softmax_backward=True, opt=1, packed_input=False):
super(TransducerLoss, self).__init__()
self.fuse_softmax_backward = fuse_softmax_backward
self.opt = opt
self.packed_input = packed_input
self.dummy_batch_offset = torch.empty(0)
def forward(self, x, label, f_len, y_len, blank_idx, batch_offset=None, max_f_len=None,
debug_list=None):
"""Forward operation of transducer joint
Arguments:
x (tensor): input tensor to the loss function with a shape of (B, T, U, H).
label (tensor): labels for the input data.
f_len (tensor): lengths of the inputs in the time dimension for each batch.
y_len (tensor): lengths of the labels for each batch.
blank_idx (int): index for the null symbol.
batch_offset (tensor, optional): tensor containing the offset of each batch
in the input. For example, batch offset can be obtained from:
batch_offset = torch.cumsum(f_len*(y_len+1), dim=0)
This argument is required if packed_input == True, and is ignored if
packed_input == False. (default: None)
max_f_len (int, optional): maximum length of the input in the time dimension.
For example, it can be obtained as
max_f_len = max(f_len)
This argument is required if packed_input == True, and is ignored if
packed_input == False. (default: None)
(default: None)
debug_list (list, optional): when an empty list is supplied, Alpha and Beta generated
in the forward operation will be attached to this list for debug purpose.
(default: None)
"""
if self.packed_input:
if batch_offset is None or max_f_len is None:
raise Exception("Please specify batch_offset and max_f_len when packing is \
enabled")
my_batch_offset = batch_offset
my_max_f_len = max_f_len
else:
my_batch_offset = self.dummy_batch_offset
my_max_f_len = x.size(1)
return TransducerLossFunc.apply(x, label, f_len, y_len, my_batch_offset, my_max_f_len,
blank_idx, self.fuse_softmax_backward, debug_list,
self.opt, self.packed_input)
class TransducerLossFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x, label, f_len, y_len, batch_offset, max_f_len, blank_idx,
fuse_softmax_backward, debug_list, opt, packed_input):
if fuse_softmax_backward == False:
with torch.enable_grad():
x = torch.nn.functional.log_softmax(x, dim=-1)
else:
x = torch.nn.functional.log_softmax(x, dim=-1)
alpha, beta, loss = transducer_loss_cuda.forward( x, label, f_len, y_len, batch_offset,
max_f_len, blank_idx, opt, packed_input)
if debug_list == []:
debug_list += [alpha, beta]
ctx.save_for_backward(x, alpha, beta, f_len, y_len, label, batch_offset)
ctx.blank_idx = blank_idx
ctx.fuse_softmax_backward = fuse_softmax_backward
ctx.opt = opt
ctx.packed_input = packed_input
ctx.max_f_len = max_f_len
return loss
@staticmethod
def backward(ctx, loss_grad):
x, alpha, beta, f_len, y_len, label, batch_offset = ctx.saved_tensors
x_grad = transducer_loss_cuda.backward( x, loss_grad, alpha, beta, f_len, y_len, label,
batch_offset, ctx.max_f_len, ctx.blank_idx, ctx.opt,
ctx.fuse_softmax_backward, ctx.packed_input)
if ctx.fuse_softmax_backward == False:
x_grad = x.backward(x_grad)
return x_grad, None, None, None, None, None, None, None, None, None, None
class TransducerJointFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, f, g, f_len, g_len, pack_output, relu, dropout, batch_offset, packed_batch,
opt, fwd_tile_size, dropout_prob, mask_probe):
h = transducer_joint_cuda.forward(f, g, f_len, g_len, batch_offset, packed_batch, opt,
pack_output, relu, dropout, dropout_prob, fwd_tile_size)
masked = relu or dropout
if masked:
ctx.save_for_backward(h[1], f_len, g_len, batch_offset)
if mask_probe is not None:
mask_probe.append(h[1])
else:
ctx.save_for_backward(f_len, g_len, batch_offset)
ctx.pack_output = pack_output
ctx.masked = relu or dropout
ctx.max_f_len = f.size(1)
ctx.max_g_len = g.size(1)
ctx.scale = 1 / (1-dropout_prob) if dropout and dropout_prob != 1 else 1
return h[0]
@staticmethod
def backward(ctx, loss_grad):
if ctx.masked:
mask, f_len, g_len, batch_offset = ctx.saved_tensors
inp = [loss_grad, mask]
else:
f_len, g_len, batch_offset = ctx.saved_tensors
inp = [loss_grad]
f_grad, g_grad = transducer_joint_cuda.backward( inp, f_len, g_len, batch_offset,
ctx.max_f_len, ctx.max_g_len,
ctx.pack_output, ctx.scale)
return f_grad, g_grad, None, None, None, None, None, None, None, None, None, None, None, \
None, None, None
|
apex-master
|
apex/contrib/transducer/transducer.py
|
from .transducer import TransducerJoint
from .transducer import TransducerLoss
from . import _transducer_ref
|
apex-master
|
apex/contrib/transducer/__init__.py
|
import torch
def transducer_loss_reference(x, label, f_len, y_len, blank_idx, loss_grad):
def log_sum_exp(a, b):
if (a >= b):
return a + torch.log(1 + torch.exp(b-a))
else:
return b + torch.log(1 + torch.exp(a-b))
def forward_alpha(x, label, f_len, y_len, blank_idx):
B, T, U, V = x.size()
acc_t = torch.float32 if x.dtype in [torch.float16, torch.float32] else x.dtype
alpha = torch.zeros((B, T, U), dtype=acc_t, device=x.device)
for b in range(B):
alpha[b, 0, 0] = 0
for t in range(1, f_len[b]):
alpha[b, t, 0] = alpha[b, t-1, 0] + x[b, t-1, 0, blank_idx]
for u in range(1, y_len[b]+1):
alpha[b, 0, u] = alpha[b, 0, u-1] + x[b, 0, u-1, label[b, u-1]]
for t in range(1, f_len[b]):
for u in range(1, y_len[b]+1):
curr_ = alpha[b, t-1, u] + x[b, t-1, u, blank_idx]
next_ = alpha[b, t, u-1] + x[b, t, u-1, label[b, u-1]]
alpha[b, t, u] = log_sum_exp(curr_, next_)
return alpha
def forward_beta(x, label, f_len, y_len, blank_idx):
B, T, U, V = x.shape
acc_t = torch.float32 if x.dtype in [torch.float16, torch.float32] else x.dtype
beta = torch.zeros((B, T, U), dtype=acc_t, device=x.device)
for b in range(B):
beta[b, f_len[b]-1, y_len[b]] = x[b, f_len[b]-1, y_len[b], blank_idx]
for t in range(f_len[b]-2, -1, -1):
beta[b, t, y_len[b]] = beta[b, t+1, y_len[b]] + x[b, t, y_len[b], blank_idx]
for u in range(y_len[b]-1, -1, -1):
beta[b, f_len[b]-1, u] = beta[b, f_len[b]-1, u+1] + x[b, f_len[b]-1, u, label[b, u]]
for t in range(f_len[b]-2, -1, -1):
for u in range(y_len[b]-1, -1, -1):
curr_ = beta[b, t+1, u] + x[b, t, u, blank_idx]
next_ = beta[b, t, u+1] + x[b, t, u, label[b, u]]
beta[b, t, u] = log_sum_exp(curr_, next_)
return beta
def backward(x, label, f_len, y_len, alpha, beta, loss_grad, blank_idx):
grad = torch.zeros_like(x)
B, T, U, V = x.size()
for b in range(B):
common_factor = torch.log(loss_grad[b]) + alpha - beta[b, 0, 0]
# next
for u in range(y_len[b]):
grad[b, :f_len[b], u, label[b, u]] = -torch.exp(common_factor[b, :f_len[b], u]
+ beta[b, :f_len[b], u+1]
+ x[b, :f_len[b], u, label[b, u]])
# current
grad[b, :f_len[b]-1, :y_len[b]+1, blank_idx] \
= -torch.exp(common_factor[b, :f_len[b]-1, :y_len[b]+1]
+ beta[b, 1:f_len[b], :y_len[b]+1]
+ x[b, :f_len[b]-1, :y_len[b]+1, blank_idx])
grad[b, f_len[b]-1, y_len[b], blank_idx] = -torch.exp(common_factor[b, f_len[b]-1, y_len[b]]
+ x[b, f_len[b]-1, y_len[b], blank_idx])
return grad
x_log = torch.nn.functional.log_softmax(x, dim=-1)
alpha = forward_alpha(x_log, label, f_len, y_len, blank_idx)
beta = forward_beta(x_log, label, f_len, y_len, blank_idx)
grad = backward(x_log, label, f_len, y_len, alpha, beta,
loss_grad, blank_idx)
x_log.backward(grad)
loss = -beta[:, 0, 0]
loss = loss.to(x.dtype)
return alpha, beta, x.grad, loss
def transducer_joint_reference(f, g, h_grad, f_len, g_len, pack_output, relu, dropout,
dropout_prob=0, mask=None):
if dropout and mask == None:
raise NotImplementedError("mask needs to supplied to test dropout.")
B, T, H = f.size()
U = g.size(1)
f_expand = f.unsqueeze(dim=2)
g_expand = g.unsqueeze(dim=1)
h = f_expand + g_expand
if relu:
h = torch.nn.functional.relu(h)
if dropout:
h *= mask
scale = 1/(1-dropout_prob)
h *= scale
h.backward(h_grad)
if pack_output == False:
# intentionally set don't-care region to -1 to test if transducer joint
# write these regions to avoid NaN and inf
for b in range(B):
h[b, f_len[b]:] = -1
h[b, :, g_len[b]:] = -1
return h, f.grad, g.grad
# packing
list_to_pack = []
for b in range(B):
list_to_pack.append(h[b, :f_len[b], :g_len[b], :].reshape(-1, H))
h_packed = torch.cat(list_to_pack)
return h_packed, f.grad, g.grad
|
apex-master
|
apex/contrib/transducer/_transducer_ref.py
|
import torch
from apex.contrib.peer_memory import PeerMemoryPool
import peer_memory_cuda as pm
class PeerHaloExchanger1d:
def __init__(self, ranks, rank_in_group, peer_pool, half_halo):
self.peer_group_size = len(ranks)
self.ranks = ranks
self.peer_rank = rank_in_group
self.low_neighbor = (self.peer_rank + self.peer_group_size - 1) % self.peer_group_size
self.high_neighbor = (self.peer_rank + 1) % self.peer_group_size
self.low_zero = True if self.peer_rank == 0 else False
self.high_zero = True if self.peer_rank == self.peer_group_size - 1 else False
self.peer_pool = peer_pool
self.half_halo = half_halo
def _allocate_peer_tensor(self, halo):
# Compute size in bytes
# Note: Pad buffer so each CUDA block gets required buffer size
size = 4 * halo.numel() * halo.element_size()
size_per_block = 128 * 2 * 16 # 128 threads each require two 128b buffers
size = (size + size_per_block - 1) // size_per_block * size_per_block
# Construct dtype peer buffer with desired size
shape = [1, 1, 1, size // halo.element_size()]
return self.peer_pool.allocate_peer_tensors(shape, halo.dtype, False, True)
def __call__(self, y, H_split=True, explicit_nhwc=False, numSM=0, diagnostics=False):
channels_last = y.is_contiguous(memory_format=torch.channels_last) and not explicit_nhwc
if H_split:
if explicit_nhwc:
_, Hs, _, _ = list(y.shape)
H = Hs - 2*self.half_halo
low_out_halo = y[:,self.half_halo:2*self.half_halo,:,:]
low_tx = self._allocate_peer_tensor(low_out_halo)
low_inp_halo = y[:,:self.half_halo,:,:]
high_out_halo = y[:,H:H+self.half_halo,:,:]
high_tx = self._allocate_peer_tensor(high_out_halo)
high_inp_halo = y[:,H+self.half_halo:H+2*self.half_halo,:,:]
else:
_, _, Hs, _ = list(y.shape)
H = Hs - 2*self.half_halo
low_out_halo = y[:,:,self.half_halo:2*self.half_halo,:]
low_tx = self._allocate_peer_tensor(low_out_halo)
low_inp_halo = y[:,:,:self.half_halo,:]
high_out_halo = y[:,:,H:H+self.half_halo,:]
high_tx = self._allocate_peer_tensor(high_out_halo)
high_inp_halo = y[:,:,H+self.half_halo:H+2*self.half_halo,:]
else:
if explicit_nhwc:
_, _, Ws, _ = list(y.shape)
W = Ws - 2*self.half_halo
low_out_halo = y[:,:,self.half_halo:2*self.half_halo,:]
low_tx = self._allocate_peer_tensor(low_out_halo)
low_inp_halo = y[:,:,:self.half_halo,:]
high_out_halo = y[:,:,W:W+self.half_halo,:]
high_tx = self._allocate_peer_tensor(high_out_halo)
high_inp_halo = y[:,:,W+self.half_halo:W+2*self.half_halo,:]
else:
_, _, _, Ws = list(y.shape)
W = Ws - 2*self.half_halo
low_out_halo = y[:,:,:,self.half_halo:2*self.half_halo]
low_tx = self._allocate_peer_tensor(low_out_halo)
low_inp_halo = y[:,:,:,:self.half_halo]
high_out_halo = y[:,:,:,W:W+self.half_halo]
high_tx = self._allocate_peer_tensor(high_out_halo)
high_inp_halo = y[:,:,:,W+self.half_halo:W+2*self.half_halo]
pm.push_pull_halos_1d(
diagnostics, explicit_nhwc, numSM, self.peer_rank,
self.low_zero, low_out_halo, low_tx[self.peer_rank], high_tx[self.low_neighbor], low_inp_halo,
self.high_zero, high_out_halo, high_tx[self.peer_rank], low_tx[self.high_neighbor], high_inp_halo,
)
|
apex-master
|
apex/contrib/peer_memory/peer_halo_exchanger_1d.py
|
from .peer_memory import PeerMemoryPool
from .peer_halo_exchanger_1d import PeerHaloExchanger1d
|
apex-master
|
apex/contrib/peer_memory/__init__.py
|
import torch
import numpy as np
import peer_memory_cuda as pm
class PeerMemoryPool(object):
def __init__(self, static_size, dynamic_size, peer_ranks=None):
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
ngpus = min(torch.cuda.device_count(), world_size)
peer_group_size = ngpus
peer_group = rank // ngpus
peer_rank_base = peer_group * ngpus
peer_rank = rank - peer_rank_base
if peer_ranks is None:
peer_ranks = [i+peer_rank_base for i in range(peer_group_size)]
peer_rank_start = peer_rank_base
peer_rank_end = peer_rank_start + peer_group_size - 1
for pr in peer_ranks:
assert(pr >= peer_rank_start and pr <= peer_rank_end), "%d :: peer_rank %d not on same node (ranks=[%d,%d])" % (rank, pr, peer_rank_start, peer_rank_end)
self.alignment = 256
self.static_size = ((static_size + self.alignment - 1) // self.alignment) * self.alignment
self.dynamic_size = ((dynamic_size + self.alignment - 1) // self.alignment) * self.alignment
# allocate giant pool of device memory
self.raw = pm.allocate_raw(self.static_size+self.dynamic_size)
# exchange peer pointers with nccl
raw_ipc = pm.get_raw_ipc_address(self.raw).cuda()
peer_raw_ipcs = [torch.empty_like(raw_ipc) for _ in range(world_size)]
torch.distributed.all_gather(peer_raw_ipcs, raw_ipc)
peer_raw_ipcs = torch.stack(peer_raw_ipcs).cpu()
# extract IPC pointers for ranks on same node
peer_raw = pm.get_raw_peers(peer_raw_ipcs[peer_rank_base:peer_rank_base+ngpus], peer_rank, self.raw)
self.peer_raw = [peer_raw[peer_rank-peer_rank_base] for peer_rank in peer_ranks]
self.static_offset = 0
self.dynamic_offset = 0
self.peer_ranks = peer_ranks
def __del__(self):
pm.free_raw(self.raw)
def reset(self):
self.dynamic_offset = 0
def allocate_peer_tensors(self, shape, dtype, channels_last, dynamic):
nels = np.prod(shape)
if dtype == torch.float16:
elem_size = 2
if dynamic:
start = ((self.dynamic_offset + self.alignment - 1) // self.alignment) * self.alignment
self.dynamic_offset = start + nels * elem_size
assert(self.dynamic_offset < self.dynamic_size), "Dynamic peer memory pool exhausted"
return [pm.blob_view_half(pr + self.static_size + start, shape, channels_last) for pr in self.peer_raw]
else:
start = ((self.static_offset + self.alignment - 1) // self.alignment) * self.alignment
self.static_offset = start + nels * elem_size
assert(self.static_offset < self.static_size), "Static peer memory pool exhausted"
return [pm.blob_view_half(pr + start, shape, channels_last) for pr in self.peer_raw]
if dtype == torch.float32:
elem_size = 4
if dynamic:
start = ((self.dynamic_offset + self.alignment - 1) // self.alignment) * self.alignment
self.dynamic_offset = start + nels * elem_size
assert(self.dynamic_offset < self.dynamic_size), "Dynamic peer memory pool exhausted"
return [pm.blob_view_float(pr + self.static_size + start, shape, channels_last) for pr in self.peer_raw]
else:
start = ((self.static_offset + self.alignment - 1) // self.alignment) * self.alignment
self.static_offset = start + nels * elem_size
assert(self.static_offset < self.static_size), "Static peer memory pool exhausted"
return [pm.blob_view_float(pr + start, shape, channels_last) for pr in self.peer_raw]
if dtype == torch.int32:
elem_size = 4
if dynamic:
start = ((self.dynamic_offset + self.alignment - 1) // self.alignment) * self.alignment
self.dynamic_offset = start + nels * elem_size
assert(self.dynamic_offset < self.dynamic_size), "Dynamic peer memory pool exhausted"
return [pm.blob_view_int(pr + self.static_size + start, shape, channels_last) for pr in self.peer_raw]
else:
start = ((self.static_offset + self.alignment - 1) // self.alignment) * self.alignment
self.static_offset = start + nels * elem_size
assert(self.static_offset < self.static_size), "Static peer memory pool exhausted"
return [pm.blob_view_int(pr + start, shape, channels_last) for pr in self.peer_raw]
else:
assert(False), "dtype %s not supported" % (str(dtype))
|
apex-master
|
apex/contrib/peer_memory/peer_memory.py
|
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###############################################################################
import torch
import torch.nn.functional as F
import fmhalib as mha
class FMHAFun(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, p_dropout, max_s, is_training, zero_tensors):
batch_size = cu_seqlens.numel() - 1
if batch_size < 4:
max_s = 512
context, S_dmask = mha.fwd_nl(qkv, cu_seqlens, p_dropout, max_s, is_training, True, zero_tensors, None)
else:
context, S_dmask = mha.fwd(qkv, cu_seqlens, p_dropout, max_s, is_training, False, zero_tensors, None)
ctx.save_for_backward(qkv, S_dmask)
ctx.cu_seqlens = cu_seqlens
ctx.p_dropout = p_dropout
ctx.max_s = max_s
ctx.zero_tensors = zero_tensors
return context
@staticmethod
def backward(ctx, dout):
qkv, S_dmask = ctx.saved_tensors
batch_size = ctx.cu_seqlens.numel() - 1
if batch_size < 4:
dqkv, dp, _ = mha.bwd_nl(dout, qkv, S_dmask, ctx.cu_seqlens, ctx.p_dropout, ctx.max_s, ctx.zero_tensors)
else:
dqkv, dp = mha.bwd(dout, qkv, S_dmask, ctx.cu_seqlens, ctx.p_dropout, ctx.max_s, ctx.zero_tensors)
return dqkv, None, None, None, None, None
class FMHA(torch.nn.Module):
def __init__(self, config):
super(FMHA, self).__init__()
self.p_dropout = config.attention_probs_dropout_prob
self.h = config.num_attention_heads
self.hidden_size = config.hidden_size
self.d = self.hidden_size // self.h
assert self.d * self.h == self.hidden_size, "Invalid hidden size/num_heads"
def forward(self, qkv, cu_seqlens, max_s, is_training=True, zero_tensors=False):
ctx = FMHAFun.apply(qkv.view(-1, 3, self.h, self.d), cu_seqlens, self.p_dropout, max_s, is_training, zero_tensors)
return ctx.view(-1, self.hidden_size)
|
apex-master
|
apex/contrib/fmha/fmha.py
|
from .fmha import FMHAFun
|
apex-master
|
apex/contrib/fmha/__init__.py
|
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedNovoGrad(torch.optim.Optimizer):
"""Implements NovoGrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused NovoGrad implements 2 fusions.
* Fusion of the NovoGrad update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedNovoGrad`'s usage is identical to any Pytorch optimizer::
opt = apex.optimizers.FusedNovoGrad(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedNovoGrad` may be used with or without Amp. If you wish to use :class:`FusedNovoGrad` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedNovoGrad(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
It has been proposed in `Jasper: An End-to-End Convolutional Neural Acoustic Model`_.
More info: https://nvidia.github.io/OpenSeq2Seq/html/optimizers.html#novograd
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
NOT SUPPORTED now! (default: False)
reg_inside_moment (bool, optional): whether do regularization (norm and L2)
in momentum calculation. True for include, False for not include and
only do it on update term. (default: False)
grad_averaging (bool, optional): whether apply (1-beta1) to grad when
calculating running averages of gradient. (default: True)
norm_type (int, optional): which norm to calculate for each layer.
2 for L2 norm, and 0 for infinite norm. These 2 are only supported
type now. (default: 2)
init_zero (bool, optional): whether init norm with 0 (start averaging on
1st step) or first step norm (start averaging on 2nd step). True for
init with 0. (default: False)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Jasper - An End-to-End Convolutional Neural Acoustic Model:
https://arxiv.org/abs/1904.03288
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-8, weight_decay=0.,
amsgrad=False, reg_inside_moment=False,
grad_averaging=True, norm_type=2, init_zero=False,
set_grad_none=True):
if amsgrad:
raise RuntimeError('FusedNovoGrad does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
grad_averaging=grad_averaging, norm_type=norm_type,
init_zero=init_zero)
super(FusedNovoGrad, self).__init__(params, defaults)
if multi_tensor_applier.available:
import amp_C
# Skip buffer
# Creating the overflow buffer on the same device as the params tensors.
self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=self.param_groups[0]["params"][0].device)
self.multi_tensor_novograd = amp_C.multi_tensor_novograd
else:
raise RuntimeError('apex.optimizers.FusedNovoGrad requires cuda extensions')
self.moment_mode = 0 if reg_inside_moment else 1
self.set_grad_none = set_grad_none
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedNovoGrad, self).zero_grad()
def load_state_dict(self, state_dict):
super(FusedNovoGrad, self).load_state_dict(state_dict)
# in case exp_avg_sq is not on the same device as params, move it there
for group in self.param_groups:
if len(group['params']) > 0:
group['exp_avg_sq'][0] = group['exp_avg_sq'][0].to(group['params'][0].device)
group['exp_avg_sq'][1] = group['exp_avg_sq'][1].to(group['params'][0].device)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
# create lists for multi-tensor apply
g_16, p_16, m_16 = [], [], []
g_32, p_32, m_32 = [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError('FusedNovoGrad does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
else:
raise RuntimeError('FusedNovoGrad only support fp16 and fp32.')
# we store per weight norm as one tensor for one group/precision combination
# different from optim.Adam, we store norm here(not ^2) so we can unify calculation for norm types
if 'exp_avg_sq' not in group:
group['exp_avg_sq'] = [None, None]
if group['init_zero']:
# Creating the following parameters on the same device as the params tensors.
group['exp_avg_sq'][0] = torch.cuda.FloatTensor(len(g_16), device=self.param_groups[0]["params"][0].device).contiguous().fill_(0)
group['exp_avg_sq'][1] = torch.cuda.FloatTensor(len(g_32), device=self.param_groups[0]["params"][0].device).contiguous().fill_(0)
else: # init with first step norm, so first blend have no effect
if group['norm_type'] == 0:
v_16 = [torch.max(torch.abs(g.to(torch.float32))).item() for g in g_16]
v_32 = [torch.max(torch.abs(g)).item() for g in g_32]
elif group['norm_type'] == 2:
v_16 = [torch.sum(torch.pow(g.to(torch.float32), 2)).sqrt().item() for g in g_16]
v_32 = [torch.sum(torch.pow(g, 2)).sqrt().item() for g in g_32]
else:
raise RuntimeError('FusedNovoGrad only support l2/inf norm now.')
# Creating the following parameters on the same device as the params tensors.
group['exp_avg_sq'][0] = torch.cuda.FloatTensor(v_16, device=self.param_groups[0]["params"][0].device)
group['exp_avg_sq'][1] = torch.cuda.FloatTensor(v_32, device=self.param_groups[0]["params"][0].device)
else:
assert(len(g_16) == group['exp_avg_sq'][0].numel())
assert(len(g_32) == group['exp_avg_sq'][1].numel())
if(len(g_16) > 0):
multi_tensor_applier(self.multi_tensor_novograd,
self._dummy_overflow_buf,
[g_16, p_16, m_16],
group['exp_avg_sq'][0],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
bias_correction,
group['weight_decay'],
grad_averaging,
self.moment_mode,
group['norm_type'])
if(len(g_32) > 0):
multi_tensor_applier(self.multi_tensor_novograd,
self._dummy_overflow_buf,
[g_32, p_32, m_32],
group['exp_avg_sq'][1],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
bias_correction,
group['weight_decay'],
grad_averaging,
self.moment_mode,
group['norm_type'])
return loss
|
apex-master
|
apex/optimizers/fused_novograd.py
|
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused Adam implements 2 fusions.
* Fusion of the Adam update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
or ``torch.optim.Adam`` with ``adam_w_mode=False``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
.. warning::
A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
are now deprecated and unnecessary.
Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
capturable (bool, optional): whether to use the version of the optimizer
that can be used with CUDA Graphs. (default: False)
master_weights (bool, optional): whether to maintain FP32 master weights
in the optimizer with FP16 mixed precision training, currently can
only be used with capturable set to True. (default: False)
.. _Adam - A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-8, adam_w_mode=True,
weight_decay=0., amsgrad=False, set_grad_none=True,
capturable=False, master_weights=False):
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
if master_weights and not capturable:
raise RuntimeError('Master weights is currently only supported with the capturable version.')
# If the optimizer is capturable then LR should be a tensor (on GPU)
lr = torch.tensor(lr, dtype=torch.float32) if capturable else lr
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay)
super(FusedAdam, self).__init__(params, defaults)
self.adam_w_mode = 1 if adam_w_mode else 0
self.set_grad_none = set_grad_none
self.capturable = capturable
self.master_weights = master_weights
# Create full precision master weights
self.param_groups_master = []
for i, pg in enumerate(self.param_groups):
param_list = pg['params']
self.param_groups_master.append({
'params': [
p.clone().detach().float() if self.master_weights else None
for p in param_list
],
})
if capturable:
for idx, group in enumerate(self.param_groups):
if len(group['params']) == 0:
continue
device = group['params'][0].device
for item in ['lr']:
self.param_groups[idx][item] = group[item].to(device=device)
self._step_supports_amp_scaling = True
if multi_tensor_applier.available:
import amp_C
# Skip buffer
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_adam = amp_C.multi_tensor_adam
self.multi_tensor_adam_capturable = amp_C.multi_tensor_adam_capturable
self.multi_tensor_adam_capturable_master = amp_C.multi_tensor_adam_capturable_master
else:
raise RuntimeError('apex.optimizers.FusedAdam requires cuda extensions')
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedAdam, self).zero_grad()
def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
"""
if any(p is not None for p in [grads, output_params, scale, grad_norms]):
raise RuntimeError('FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.')
loss = None
if closure is not None:
loss = closure()
for group, group_master in zip(self.param_groups, self.param_groups_master):
if len(group['params']) == 0:
continue
device = group['params'][0].device
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1 if not self.capturable else (self._dummy_overflow_buf != 1).to(torch.int)
else:
group['step'] = 1 if not self.capturable else torch.tensor([1], dtype=torch.int, device=device)
# create lists for multi-tensor apply
g_16, p_16, m_16, v_16 = [], [], [], []
g_bf, p_bf, m_bf, v_bf = [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
p_16_master = []
p_32_master = []
for p, p_master in zip(group['params'], group_master['params']):
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data).float()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data).float()
if p.dtype == torch.float16:
if self.master_weights:
p_16_master.append(p_master.data)
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.bfloat16:
g_bf.append(p.grad)
p_bf.append(p)
m_bf.append(state['exp_avg'])
v_bf.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
if self.master_weights:
p_32_master.append(p_master.data)
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedAdam only support fp16 and fp32.')
# If the optimizer is capturable, then if there's a grad scaler it works
# on the GPU + a different multi_tensor_applier should be called
if self.capturable:
# overflow check of gradients
found_inf = (
grad_scaler._check_inf_per_device(self)[device]
if grad_scaler is not None else torch.zeros((1,), device=device)
)
self._dummy_overflow_buf.copy_(found_inf)
# get unscale scale factor
scale, inv_scale = None, None
if grad_scaler:
scale = grad_scaler._get_scale_async()
inv_scale = scale.double().reciprocal().float()
else:
scale = torch.ones((1,), device=device)
inv_scale = torch.ones((1,), device=device)
if len(g_16) > 0:
multi_tensor_applier(self.multi_tensor_adam_capturable_master if self.master_weights
else self.multi_tensor_adam_capturable,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16, p_16_master] if self.master_weights
else [g_16, p_16, m_16, v_16],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'],
inv_scale)
if len(g_bf) > 0:
multi_tensor_applier(
self.multi_tensor_adam_capturable,
self._dummy_overflow_buf,
[g_bf, p_bf, m_bf, v_bf],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'],
inv_scale)
if len(g_32) > 0:
multi_tensor_applier(self.multi_tensor_adam_capturable_master if self.master_weights
else self.multi_tensor_adam_capturable,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32, p_32_master] if self.master_weights
else [g_32, p_32, m_32, v_32],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'],
inv_scale)
else:
if len(g_16) > 0:
multi_tensor_applier(self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'])
if len(g_bf) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_bf, p_bf, m_bf, v_bf],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'])
if len(g_32) > 0:
multi_tensor_applier(self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'])
return loss
|
apex-master
|
apex/optimizers/fused_adam.py
|
from .fused_sgd import FusedSGD
from .fused_adam import FusedAdam
from .fused_novograd import FusedNovoGrad
from .fused_lamb import FusedLAMB
from .fused_adagrad import FusedAdagrad
from .fused_mixed_precision_lamb import FusedMixedPrecisionLamb
|
apex-master
|
apex/optimizers/__init__.py
|
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdagrad(torch.optim.Optimizer):
"""Implements Adagrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused Adagrad implements 2 fusions.
* Fusion of the Adagrad update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedAdagrad`'s usage is identical to any ordinary Pytorch optimizer::
opt = apex.optimizers.FusedAdagrad(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedAdagrad` may be used with or without Amp. If you wish to use :class:`FusedAdagrad` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedAdagrad(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
It has been proposed in `Adaptive Subgradient Methods for Online Learning
and Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
adagrad_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay (also known as AdamW) (default: False)
.. _Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization: http://jmlr.org/papers/v12/duchi11a.html
"""
def __init__(self, params, lr=1e-2, eps=1e-10,
weight_decay=0., set_grad_none=True, adagrad_w_mode=False):
defaults = dict(lr=lr, eps=eps, weight_decay=weight_decay)
super(FusedAdagrad, self).__init__(params, defaults)
self.adagrad_w_mode = 1 if adagrad_w_mode else 0
self.set_grad_none = set_grad_none
if multi_tensor_applier.available:
import amp_C
# Skip buffer
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_adagrad = amp_C.multi_tensor_adagrad
else:
raise RuntimeError('apex.optimizers.FusedAdagrad requires cuda extensions')
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedAdagrad, self).zero_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
# create lists for multi-tensor apply
g_16, p_16, h_16 = [], [], []
g_32, p_32, h_32 = [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError('FusedAdagrad does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['sum'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
h_16.append(state['sum'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
h_32.append(state['sum'])
else:
raise RuntimeError('FusedAdagrad only support fp16 and fp32.')
if(len(g_16) > 0):
multi_tensor_applier(self.multi_tensor_adagrad,
self._dummy_overflow_buf,
[g_16, p_16, h_16],
group['lr'],
group['eps'],
self.adagrad_w_mode,
group['weight_decay'])
if(len(g_32) > 0):
multi_tensor_applier(self.multi_tensor_adagrad,
self._dummy_overflow_buf,
[g_32, p_32, h_32],
group['lr'],
group['eps'],
self.adagrad_w_mode,
group['weight_decay'])
return loss
|
apex-master
|
apex/optimizers/fused_adagrad.py
|
import torch
from copy import deepcopy
from itertools import chain
from collections import defaultdict, abc as container_abcs
from apex.multi_tensor_apply import multi_tensor_applier
class FusedMixedPrecisionLamb(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, step=0, bias_correction=True,
betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01,
amsgrad=False, adam_w_mode=True,
grad_averaging=True, max_grad_norm=1.0, use_nvlamb=False,
reduced_precision_dtype=None):
if amsgrad:
raise RuntimeError('FusedLAMB does not support the AMSGrad variant.')
# init defaults
defaults = dict(lr=torch.tensor(lr, dtype=torch.float32),
step=torch.tensor([step], dtype=torch.int),
bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
grad_averaging=grad_averaging,
max_grad_norm=max_grad_norm)
# init base module
super(FusedMixedPrecisionLamb, self).__init__(params, defaults)
# The learning rate (lr) and optimizer step (step) should be located on device
# in order to faciliated device sync free execution
device = self.param_groups[0]['params'][0].device
tensor_state = ['lr', 'step']
for idx,group in enumerate(self.param_groups):
for item in tensor_state:
self.param_groups[idx][item] = group[item].to(device=device)
if multi_tensor_applier.available:
import amp_C
self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm_mp
# Skip buffer
self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=device)
self.multi_tensor_lamb = amp_C.multi_tensor_lamb_mp
else:
raise RuntimeError('apex.optimizers.FusedLAMB requires cuda extensions')
# Mixed Precision support
self.reduced_precision_dtype = reduced_precision_dtype
self.param_groups_full_precision = []
self._step_supports_amp_scaling = True
self.adam_w_mode = 1 if adam_w_mode else 0
self.use_nvlamb = use_nvlamb
# This method is overridden from the parent class because there is not a way to override
# the nested function cast() that copies a saved piece of state to the device without
# redundantly doing the copy.
def load_state_dict(self, state_dict):
r"""Loads the optimizer state.
Args:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict['param_groups']
if len(groups) != len(saved_groups):
raise ValueError("loaded state dict has a different number of "
"parameter groups")
param_lens = (len(g['params']) for g in groups)
saved_lens = (len(g['params']) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError("loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group")
# Update the state
id_map = {old_id: p for old_id, p in
zip(chain.from_iterable((g['params'] for g in saved_groups)),
chain.from_iterable((g['params'] for g in groups)))}
def cast(param, value):
r"""Make a deep copy of value, casting all tensors to device of param."""
if isinstance(value, torch.Tensor):
# The original version casted the saved value to the params dtype
# This doesn't work for mixed precision Lamb where the momentum and
# velocity are expected to be in full precision while the params are
# in reduced precision
value = value.to(value.device)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, container_abcs.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = defaultdict(dict)
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
new_group['params'] = group['params']
return new_group
param_groups = [
update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.__setstate__({'state': state, 'param_groups': param_groups})
def _setup_full_precision_params(self):
for i, pg in enumerate(self.param_groups):
param_list = pg['params']
self.param_groups_full_precision.append({
'params': [
p.clone().detach().to(dtype=torch.float32)
if (self.reduced_precision_dtype is not None) and (p.dtype == self.reduced_precision_dtype)
else None
for p in param_list
],
})
# add_param_groups() is overridden because default items can be tensors. The
# parent version does not clone the default item, so two param groups can
# accidentally point to the same default item value where they can differ
# given they are in separate groups.
def add_param_group(self, param_group):
super().add_param_group(param_group)
for name, default in self.defaults.items():
if isinstance(default, torch.Tensor):
self.param_groups[len(self.param_groups) - 1][name] = default.clone()
@torch.no_grad()
def step(self, closure=None, grad_scaler=None):
loss = None
if closure is not None:
loss = closure()
# The full precision params are set up in the first step of the optimizer
# instead of in the constructor because the full precision params will get out
# out of sync with the model params if DDP syncs the model params across devices
# after the optimizer is constructed.
if len(self.param_groups_full_precision) == 0 :
self._setup_full_precision_params()
# create separate grad lists for params
grad_list = []
for gid,group in enumerate(self.param_groups):
for pid,p in enumerate(group['params']):
assert group['params'][0].dtype == p.dtype, \
"Error: Parameters are not of the identical type: {} != {}".format(
group['params'][0].dtype, p.dtype)
if p.grad is None:
continue
grad_list.append(p.grad)
# Overflow check of gradients
device = self.param_groups[0]["params"][0].device
found_inf = (
grad_scaler._check_inf_per_device(self)[device]
if grad_scaler is not None else torch.zeros((1,), device=device)
)
self._dummy_overflow_buf.copy_(found_inf)
# Get unscale scale factor
scale, inv_scale = None, None
if grad_scaler:
scale = grad_scaler._get_scale_async()
inv_scale = scale.double().reciprocal().float()
else:
scale = torch.ones((1,), device=device)
inv_scale = torch.ones((1,), device=device)
# grad_norm is of scaled gradients.
# So, multiply `max_grad_norm` by scale.
max_grad_norm = self.defaults['max_grad_norm'] * scale
grad_norm = multi_tensor_applier(
self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[grad_list],
False,
)[0]
# Run LAMB optimization math
for gid, (group, group_full) in enumerate(zip(self.param_groups, self.param_groups_full_precision)):
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
group['step'] += (self._dummy_overflow_buf != 1).to(torch.int)
state_lists = [ [], # (0) grads
[], # (1) params
[], # (2) momentum state
[], # (3) velocity state
]
if self.reduced_precision_dtype is not None:
state_lists.append([]) # (4) params reduced_dtype
for p, p_full in zip(group['params'], group_full['params']):
if p.grad is None:
continue
assert not p.grad.is_sparse
state = self.state[p]
# State initialization
if len(state) == 0:
dtype = p.dtype
if self.reduced_precision_dtype is not None and p.dtype == self.reduced_precision_dtype :
dtype = torch.float32
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data, dtype=dtype)
# Exponential moving average of gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=dtype)
if self.reduced_precision_dtype is not None :
state_lists[0].append(p.grad.data)
state_lists[1].append(p_full.data)
state_lists[2].append(state['exp_avg'])
state_lists[3].append(state['exp_avg_sq'])
state_lists[4].append(p.data)
else :
state_lists[0].append(p.grad.data)
state_lists[1].append(p.data)
state_lists[2].append(state['exp_avg'])
state_lists[3].append(state['exp_avg_sq'])
multi_tensor_applier(
self.multi_tensor_lamb,
self._dummy_overflow_buf,
state_lists,
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
bias_correction,
group['weight_decay'],
grad_averaging,
self.adam_w_mode,
grad_norm,
max_grad_norm,
self.use_nvlamb,
found_inf,
inv_scale)
return loss
|
apex-master
|
apex/optimizers/fused_mixed_precision_lamb.py
|
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused LAMB implements 2 fusions.
* Fusion of the LAMB update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedLAMB`'s usage is identical to any ordinary Pytorch optimizer::
opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedLAMB` may be used with or without Amp. If you wish to use :class:`FusedLAMB` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
NOT SUPPORTED now! (default: False)
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
grad_averaging (bool, optional): whether apply (1-beta2) to grad when
calculating running averages of gradient. (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
max_grad_norm (float, optional): value used to clip global grad norm
(default: 1.0)
use_nvlamb (boolean, optional): Apply adaptive learning rate to 0.0
weight decay parameter (default: False)
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01,
amsgrad=False, adam_w_mode=True,
grad_averaging=True, set_grad_none=True,
max_grad_norm=1.0, use_nvlamb=False):
if amsgrad:
raise RuntimeError('FusedLAMB does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
grad_averaging=grad_averaging,
max_grad_norm=max_grad_norm)
super(FusedLAMB, self).__init__(params, defaults)
if multi_tensor_applier.available:
import amp_C
self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm
# Skip buffer
self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=self.param_groups[0]["params"][0].device)
self.multi_tensor_lamb = amp_C.multi_tensor_lamb
else:
raise RuntimeError('apex.optimizers.FusedLAMB requires cuda extensions')
self.adam_w_mode = 1 if adam_w_mode else 0
self.set_grad_none = set_grad_none
self.use_nvlamb = use_nvlamb
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedLAMB, self).zero_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
# create separate grad lists for fp32 and fp16 params
g_all_32, g_all_16 = [], []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
if p.dtype == torch.float32:
g_all_32.append(p.grad.data)
elif p.dtype == torch.float16:
g_all_16.append(p.grad.data)
else:
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
device = self.param_groups[0]["params"][0].device
g_norm_32, g_norm_16 = torch.zeros(1, device=device), torch.zeros(1, device=device)
# compute grad norm for two lists
if len(g_all_32) > 0:
g_norm_32 = multi_tensor_applier(self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[g_all_32], False)[0]
if len(g_all_16) > 0:
g_norm_16 = multi_tensor_applier(self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[g_all_16], False)[0]
# blend two grad norms to get global grad norm
global_grad_norm = multi_tensor_applier(self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[[g_norm_32, g_norm_16]],
False)[0]
max_grad_norm = self.defaults['max_grad_norm']
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
# create lists for multi-tensor apply
g_16, p_16, m_16, v_16 = [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError('FusedLAMB does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
if(len(g_16) > 0):
multi_tensor_applier(self.multi_tensor_lamb,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
bias_correction,
group['weight_decay'],
grad_averaging,
self.adam_w_mode,
global_grad_norm,
max_grad_norm,
self.use_nvlamb)
if(len(g_32) > 0):
multi_tensor_applier(self.multi_tensor_lamb,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
bias_correction,
group['weight_decay'],
grad_averaging,
self.adam_w_mode,
global_grad_norm,
max_grad_norm,
self.use_nvlamb)
return loss
|
apex-master
|
apex/optimizers/fused_lamb.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.