id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
10,154 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
def piecewise_linear_func_ret_func(xs, ys, x):
assert x >= xs[0] and x <= xs[-1]
return np.interp(x, xs, ys)
The provided code snippet includes necessary dependencies for implementing the `piecewise_linear_func` function. Write a Python function `def piecewise_linear_func(xs, ys)` to solve the following problem:
Return a function created by linear inerpolation.
Here is the function:
def piecewise_linear_func(xs, ys):
"""Return a function created by linear inerpolation."""
indices = np.argsort(xs)
xs = [xs[i] for i in indices]
ys = [ys[i] for i in indices]
# pad left and right
k = 1e5
delta_x_left = xs[0] - xs[1]
delta_y_left = ys[0] - ys[1]
delta_x_right = xs[-1] - xs[-2]
delta_y_right = ys[-1] - ys[-2]
xs = [xs[0] + delta_x_left * k] + xs + [xs[-1] + delta_x_right * k]
ys = [ys[0] + delta_y_left * k] + ys + [ys[-1] + delta_y_right * k]
return functools.partial(piecewise_linear_func_ret_func, xs, ys) | Return a function created by linear inerpolation. |
10,155 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
def sample_from_range(n, k):
assert n >= 1
if k == -1:
ret = [1]
while ret[-1] * 2 < n:
ret.append(ret[-1] * 2)
return ret
else:
if k == 1: return [1]
step = (n - 1) // (k - 1)
return list(range(1, n + 1, step)) | null |
10,156 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
def cpu_mem_stats():
objects = gc.get_objects()
tensors = [obj for obj in objects if torch.is_tensor(obj) and not obj.is_cuda]
total_numel = 0
total_mem = 0
visited_data = set()
for tensor in tensors:
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.add(data_ptr)
numel = tensor.numel()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel * element_size
total_mem += mem
return total_mem | null |
10,157 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
def torch_mem_stats():
objects = gc.get_objects()
tensors = [obj for obj in objects if torch.is_tensor(obj) and obj.is_cuda]
total_numel = 0
total_mem = 0
visited_data = set()
for tensor in tensors:
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.add(data_ptr)
print(tensor.shape, tensor.data_ptr())
numel = tensor.numel()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel * element_size
total_mem += mem
return total_mem | null |
10,158 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
def array_1d(a, cls):
return [cls() for _ in range(a)] | null |
10,159 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
def array_2d(a, b, cls):
return [[cls() for _ in range(b)] for _ in range(a)] | null |
10,160 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
def array_3d(a, b, c, cls):
return [[[cls() for _ in range(c)] for _ in range(b)] for _ in range(a)] | null |
10,161 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
def array_4d(a, b, c, d, cls):
return [[[[cls() for _ in range(d)] for _ in range(c)] for _ in range(b)] for _ in range(a)] | null |
10,162 | import argparse
import dataclasses
from attr import define, field
from attr.setters import frozen
import functools
import gc
import math
import os
from typing import Tuple, Union, Optional, Any, Sequence, List
import numpy as np
import torch
class BenchmarkResult:
def read_benchmark_log(filename):
with open(filename) as fin:
lines = fin.readlines()
def extract(line):
a, b = line.split("\t")
latency = a[a.index(":") + 1:a.index(" s")]
throughput = b[b.index(":") + 1:b.index(" to")]
return float(latency), float(throughput)
prefill_latency, prefill_throughput = extract(lines[2])
decode_latency, decode_throughput = extract(lines[3])
total_latency, total_throughput = extract(lines[4])
return BenchmarkResult(
prefill_latency, prefill_throughput,
decode_latency, decode_throughput,
total_latency, total_throughput,
) | null |
10,163 | import numpy as np
import torch
from flexgen.profile_bandwidth import benchmark_func
def benchmark_func(func, number, repeat, warmup=3):
for i in range(warmup):
func()
costs = [0]
for i in range(repeat):
torch.cuda.synchronize()
tic = time.time()
for i in range(number):
func()
torch.cuda.synchronize()
costs.append((time.time() - tic) / number)
return costs
def bench_matmul():
for device in ["cuda", "cpu"]:
for n in [1024, 2048]:
if device == "cuda":
dtype = torch.float16
else:
dtype = torch.float32
a = torch.rand(n, n).to(dtype).to(device)
b = torch.rand(n, n).to(dtype).to(device)
def func():
return torch.matmul(a, b)
cost = np.mean(benchmark_func(func, number=5, repeat=3))
tflops = 2 * n * n * n / cost / 1e12
print(f"device: {device}, N: {n}, latency: {cost*1e3:.2f} ms, TFLOPS: {tflops:.3f}")
print() | null |
10,164 | import dataclasses
import torch
import numpy as np
from flexgen.pytorch_backend import (TorchTensor, TorchDevice,
DeviceType, general_copy, fix_recursive_import)
from flexgen.utils import np_dtype_to_torch_dtype
default_cache_config = CompressionConfig(
num_bits=0, group_size=0, group_dim=0, symmetric=False, enabled=False)
def set_cache_compression_config(config):
global default_cache_config
default_cache_config = config | null |
10,165 | import dataclasses
import torch
import numpy as np
from flexgen.pytorch_backend import (TorchTensor, TorchDevice,
DeviceType, general_copy, fix_recursive_import)
from flexgen.utils import np_dtype_to_torch_dtype
default_cache_config = CompressionConfig(
num_bits=0, group_size=0, group_dim=0, symmetric=False, enabled=False)
def get_cache_compression_config():
return default_cache_config | null |
10,166 | import dataclasses
import torch
import numpy as np
from flexgen.pytorch_backend import (TorchTensor, TorchDevice,
DeviceType, general_copy, fix_recursive_import)
from flexgen.utils import np_dtype_to_torch_dtype
def compress(tensor, config):
"""Simulate group-wise quantization."""
if not config.enabled:
return tensor
group_size, num_bits, group_dim, symmetric = (
config.group_size, config.num_bits, config.group_dim, config.symmetric)
assert num_bits <= 8
original_shape = tensor.shape
num_groups = (original_shape[group_dim] + group_size - 1) // group_size
new_shape = (original_shape[:group_dim] + (num_groups, group_size) +
original_shape[group_dim+1:])
# Pad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len != 0:
pad_shape = original_shape[:group_dim] + (pad_len,) + original_shape[group_dim+1:]
tensor = torch.cat([
tensor,
torch.zeros(pad_shape, dtype=tensor.dtype, device=tensor.device)],
dim=group_dim)
data = tensor.view(new_shape)
# Quantize
if symmetric:
B = 2 ** (num_bits - 1) - 1
scale = B / torch.max(data.abs(), dim=group_dim + 1, keepdim=True)[0]
data = data * scale
data = data.clamp_(-B, B).round_().to(torch.int8)
return data, scale, original_shape
else:
B = 2 ** num_bits - 1
mn = torch.min(data, dim=group_dim + 1, keepdim=True)[0]
mx = torch.max(data, dim=group_dim + 1, keepdim=True)[0]
scale = B / (mx - mn)
data = data - mn
data.mul_(scale)
data = data.clamp_(0, B).round_().to(torch.uint8)
return data, mn, scale, original_shape
def decompress(packed_data, config):
"""Simulate group-wise dequantization."""
if not config.enabled:
return packed_data
group_size, num_bits, group_dim, symmetric = (
config.group_size, config.num_bits, config.group_dim, config.symmetric)
# Dequantize
if symmetric:
data, scale, original_shape = packed_data
data = data / scale
else:
data, mn, scale, original_shape = packed_data
data = data / scale
data.add_(mn)
# Unpad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len:
padded_original_shape = (
original_shape[:group_dim] +
(original_shape[group_dim] + pad_len,) +
original_shape[group_dim+1:])
data = data.reshape(padded_original_shape)
indices = [slice(0, x) for x in original_shape]
return data[indices].contiguous()
else:
return data.view(original_shape)
def compress_and_decompress(tensor, config):
packed_data = compress(tensor, config)
return decompress(packed_data, config) | null |
10,167 | import dataclasses
import torch
import numpy as np
from flexgen.pytorch_backend import (TorchTensor, TorchDevice,
DeviceType, general_copy, fix_recursive_import)
from flexgen.utils import np_dtype_to_torch_dtype
class CompressionConfig:
"""Group-wise quantization."""
num_bits: int
group_size: int
group_dim: int
symmetric: bool
enabled: bool = True
def compress(tensor, config):
"""Simulate group-wise quantization."""
if not config.enabled:
return tensor
group_size, num_bits, group_dim, symmetric = (
config.group_size, config.num_bits, config.group_dim, config.symmetric)
assert num_bits <= 8
original_shape = tensor.shape
num_groups = (original_shape[group_dim] + group_size - 1) // group_size
new_shape = (original_shape[:group_dim] + (num_groups, group_size) +
original_shape[group_dim+1:])
# Pad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len != 0:
pad_shape = original_shape[:group_dim] + (pad_len,) + original_shape[group_dim+1:]
tensor = torch.cat([
tensor,
torch.zeros(pad_shape, dtype=tensor.dtype, device=tensor.device)],
dim=group_dim)
data = tensor.view(new_shape)
# Quantize
if symmetric:
B = 2 ** (num_bits - 1) - 1
scale = B / torch.max(data.abs(), dim=group_dim + 1, keepdim=True)[0]
data = data * scale
data = data.clamp_(-B, B).round_().to(torch.int8)
return data, scale, original_shape
else:
B = 2 ** num_bits - 1
mn = torch.min(data, dim=group_dim + 1, keepdim=True)[0]
mx = torch.max(data, dim=group_dim + 1, keepdim=True)[0]
scale = B / (mx - mn)
data = data - mn
data.mul_(scale)
data = data.clamp_(0, B).round_().to(torch.uint8)
return data, mn, scale, original_shape
def decompress(packed_data, config):
"""Simulate group-wise dequantization."""
if not config.enabled:
return packed_data
group_size, num_bits, group_dim, symmetric = (
config.group_size, config.num_bits, config.group_dim, config.symmetric)
# Dequantize
if symmetric:
data, scale, original_shape = packed_data
data = data / scale
else:
data, mn, scale, original_shape = packed_data
data = data / scale
data.add_(mn)
# Unpad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len:
padded_original_shape = (
original_shape[:group_dim] +
(original_shape[group_dim] + pad_len,) +
original_shape[group_dim+1:])
data = data.reshape(padded_original_shape)
indices = [slice(0, x) for x in original_shape]
return data[indices].contiguous()
else:
return data.view(original_shape)
def test_simulated_compression():
torch.manual_seed(0)
a = torch.normal(0, 1, (64, 64, 64), dtype=torch.float16).cuda()
config = CompressionConfig(
num_bits=4, group_size=32, group_dim=0, symmetric=False)
packed_data = compress(a, config)
b = decompress(packed_data, config)
print(a[0])
print(b[0]) | null |
10,168 | import dataclasses
import torch
import numpy as np
from flexgen.pytorch_backend import (TorchTensor, TorchDevice,
DeviceType, general_copy, fix_recursive_import)
from flexgen.utils import np_dtype_to_torch_dtype
class CompressionConfig:
"""Group-wise quantization."""
num_bits: int
group_size: int
group_dim: int
symmetric: bool
enabled: bool = True
def compress(tensor, config):
"""Simulate group-wise quantization."""
if not config.enabled:
return tensor
group_size, num_bits, group_dim, symmetric = (
config.group_size, config.num_bits, config.group_dim, config.symmetric)
assert num_bits <= 8
original_shape = tensor.shape
num_groups = (original_shape[group_dim] + group_size - 1) // group_size
new_shape = (original_shape[:group_dim] + (num_groups, group_size) +
original_shape[group_dim+1:])
# Pad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len != 0:
pad_shape = original_shape[:group_dim] + (pad_len,) + original_shape[group_dim+1:]
tensor = torch.cat([
tensor,
torch.zeros(pad_shape, dtype=tensor.dtype, device=tensor.device)],
dim=group_dim)
data = tensor.view(new_shape)
# Quantize
if symmetric:
B = 2 ** (num_bits - 1) - 1
scale = B / torch.max(data.abs(), dim=group_dim + 1, keepdim=True)[0]
data = data * scale
data = data.clamp_(-B, B).round_().to(torch.int8)
return data, scale, original_shape
else:
B = 2 ** num_bits - 1
mn = torch.min(data, dim=group_dim + 1, keepdim=True)[0]
mx = torch.max(data, dim=group_dim + 1, keepdim=True)[0]
scale = B / (mx - mn)
data = data - mn
data.mul_(scale)
data = data.clamp_(0, B).round_().to(torch.uint8)
return data, mn, scale, original_shape
def decompress(packed_data, config):
"""Simulate group-wise dequantization."""
if not config.enabled:
return packed_data
group_size, num_bits, group_dim, symmetric = (
config.group_size, config.num_bits, config.group_dim, config.symmetric)
# Dequantize
if symmetric:
data, scale, original_shape = packed_data
data = data / scale
else:
data, mn, scale, original_shape = packed_data
data = data / scale
data.add_(mn)
# Unpad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len:
padded_original_shape = (
original_shape[:group_dim] +
(original_shape[group_dim] + pad_len,) +
original_shape[group_dim+1:])
data = data.reshape(padded_original_shape)
indices = [slice(0, x) for x in original_shape]
return data[indices].contiguous()
else:
return data.view(original_shape)
class TorchDevice:
"""Wrap tensor and computation APIs of a single CPU or GPU."""
def __init__(self, name, mem_capacity=None, flops=None):
self.name = name
self.mem_capacity = mem_capacity
self.flops = flops
self.dev = torch.device(name)
self.device_type = DeviceType.convert(self.dev.type)
self.compressed_device = TorchCompressedDevice(self)
self.links = {}
self.attention_compute_workspace = None
self.workspace_pt = 0
if self.device_type == DeviceType.CPU:
global global_cpu_device
global_cpu_device = self
def add_link(self, link):
dst = link.b if link.a == self else link.a
self.links[dst] = link
def allocate(self, shape, dtype, pin_memory=None, name=None):
if self.device_type == DeviceType.CPU:
pin_memory = True if pin_memory is None else pin_memory
else:
pin_memory = False
dtype = np_dtype_to_torch_dtype[dtype]
data = torch.empty(shape, dtype=dtype, pin_memory=pin_memory, device=self.dev)
return TorchTensor.create_from_torch(data, self, name=name)
def delete(self, tensor):
pass
def init_attention_compute_workspace(self, config, task, policy):
if self.device_type != DeviceType.CPU:
return # Only CPU requires this fp32 workspace
if not policy.compress_cache:
b = policy.gpu_batch_size
n_head = config.n_head
head_dim = config.input_dim // n_head
max_seq_len = task.prompt_len + task.gen_len - 1
self.attention_compute_workspace = []
self.workspace_pt = 0
# We currently separate SelfAttention and MLP as two layers,
# so we only need one workspace instead of two.
for i in range(1 if policy.sep_layer else 2):
shape = (max_seq_len, b * n_head, head_dim)
k_cache = self.allocate(shape, np.float32, pin_memory=False)
v_cache = self.allocate(shape, np.float32, pin_memory=False)
self.attention_compute_workspace.append((k_cache, v_cache))
else:
self.compressed_device.init_attention_compute_workspace(
config, task, policy)
def next_attention_compute_workspace(self):
self.workspace_pt = (self.workspace_pt + 1) % len(
self.attention_compute_workspace)
return self.attention_compute_workspace[self.workspace_pt]
def del_attention_compute_workspace(self):
self.attention_compute_workspace = None
def gen_attention_mask(self, token_ids, pad_token_id, donate):
data = token_ids.data.ne(pad_token_id)
if donate[0]: token_ids.delete()
return TorchTensor.create_from_torch(data, self)
def extend_attention_mask(self, attention_mask, donate):
bs = attention_mask.shape[0]
data = torch.concat((attention_mask.data,
torch.ones((bs, 1), dtype=attention_mask.dtype, device=self.dev)), dim=1)
if donate[0]: attention_mask.delete()
return TorchTensor.create_from_torch(data, self)
def opt_input_embed(self, inputs, attention_mask, w_token, w_pos, pad_token_id, donate):
# decompress weights
if w_token.device.device_type == DeviceType.COMPRESSED:
w_token = w_token.device.decompress(w_token)
w_pos = w_pos.device.decompress(w_pos)
token_ids = inputs.data
mask = attention_mask.data
if donate[0]: inputs.delete()
if donate[1]: attention_mask.delete()
# token embedding
token_embed = F.embedding(token_ids, w_token.data, pad_token_id)
# pos embedding
positions = torch.cumsum(mask, dim=1).int() * mask + 1
# cut positions if `past_key_values_length` is > 0
past_key_values_length = mask.shape[1] - token_ids.shape[1]
positions = positions[:, past_key_values_length:]
pos_embed = F.embedding(positions, w_pos.data)
data = token_embed + pos_embed
return TorchTensor.create_from_torch(data, self)
def opt_output_embed(self, inputs, w_ln, b_ln, w_token, donate,
do_sample, temperature):
# decompress weights
if w_token.device.device_type == DeviceType.COMPRESSED:
w_token = w_token.device.decompress(w_token)
b, s, h = inputs.shape
hidden = F.layer_norm(inputs.data, (h,), weight=w_ln.data, bias=b_ln.data)
if donate[0]: inputs.delete()
# output embedding
logits = F.linear(hidden, w_token.data)
last_token_logits = logits[:,-1,:]
if do_sample and not temperature < 1e-5:
probs = torch.softmax(last_token_logits / temperature, dim=-1)
ids = torch.multinomial(probs, num_samples=1)
else:
ids = last_token_logits.argmax(dim=1, keepdim=True)
return TorchTensor.create_from_torch(ids, self)
def init_cache_one_gpu_batch(self, config, task, policy):
num_head, hidden_size, prompt_len, gen_len, gpu_batch_size = (
config.n_head, config.input_dim, task.prompt_len, task.gen_len,
policy.gpu_batch_size)
shape = (prompt_len + gen_len - 1, gpu_batch_size * num_head, hidden_size // num_head)
# NOTE: disable pin_memory due to high memory overhead
pin_memory = False
k_cache = self.allocate(shape, np.float16, pin_memory=pin_memory)
v_cache = self.allocate(shape, np.float16, pin_memory=pin_memory)
return k_cache, v_cache
def mha(self, inputs, attention_mask, w_q, b_q, w_k, b_k, w_v, b_v,
w_out, b_out, w_ln, b_ln, n_head, donate, compress_cache, comp_config):
"""Multi-head attention (prefill phase)."""
# decompress weights
if w_q.device.device_type == DeviceType.COMPRESSED:
w_q = w_q.device.decompress(w_q)
w_k = w_k.device.decompress(w_k)
w_v = w_v.device.decompress(w_v)
w_out = w_out.device.decompress(w_out)
b, s, h = inputs.shape
head_dim = h // n_head
scaling = head_dim ** -0.5
hidden = F.layer_norm(inputs.data, (h,), weight=w_ln.data, bias=b_ln.data)
# shape: (b, s, h)
q = F.linear(hidden, w_q.data, bias=b_q.data) * scaling
k = F.linear(hidden, w_k.data, bias=b_k.data)
v = F.linear(hidden, w_v.data, bias=b_v.data)
# shape: (b, s, n_head, head_dim)
q = q.view(b, s, n_head, head_dim)
k = k.view(b, s, n_head, head_dim)
v = v.view(b, s, n_head, head_dim)
# shape: (b * n_head, s, head_dim)
q = q.permute(0, 2, 1, 3).reshape(b * n_head, s, head_dim)
# shape: (b * n_head, head_dim, s)
k = k.permute(0, 2, 3, 1).reshape(b * n_head, head_dim, s)
# shape: (b * n_head, s, head_dim)
v = v.permute(0, 2, 1, 3).reshape(b * n_head, s, head_dim)
# shape: (b * n_head, s, s)
attn_weights = torch.bmm(q, k)
# shape: (b, 1, s, s)
idx = torch.arange(s, device=self.dev)
causal_mask = (idx <= idx.view(s, 1)).view(1, 1, s, s)
mask = attention_mask.data.view(b, 1, 1, s) & causal_mask
# shape: (b, n_head, s, s)
attn_weights = attn_weights.view(b, n_head, s, s)
attn_weights = torch.where(mask, attn_weights, -1e4)
attn_weights = attn_weights.view(b * n_head, s, s)
attn_weights = F.softmax(attn_weights, dim=2)
# shape: (b, n_head, s, head_dim)
value = torch.bmm(attn_weights, v).view(b, n_head, s, head_dim)
# shape: (b, s, h)
value = value.transpose(1, 2).reshape(b, s, h)
value = F.linear(value, w_out.data, bias=b_out.data)
value.add_(inputs.data)
if donate[0]: inputs.delete()
if donate[1]: attention_mask.delete()
# (s, b * n_head, head_dim)
k = k.permute(2, 0, 1)
v = v.permute(1, 0, 2)
if compress_cache:
k = self.compressed_device.compress(k, comp_config)
v = self.compressed_device.compress(v, comp_config)
else:
k = TorchTensor.create_from_torch(k, self)
v = TorchTensor.create_from_torch(v, self)
return TorchTensor.create_from_torch(value, self), k, v
def mha_gen(self, inputs, attention_mask, w_q, b_q, w_k, b_k, w_v, b_v,
w_out, b_out, w_ln, b_ln, n_head, k_cache, v_cache, donate,
attn_sparsity, compress_cache, comp_config):
"""Multi-head attention (decoding phase)."""
# decompress weights
if w_q.device.device_type == DeviceType.COMPRESSED:
w_q = w_q.device.decompress(w_q)
w_k = w_k.device.decompress(w_k)
w_v = w_v.device.decompress(w_v)
w_out = w_out.device.decompress(w_out)
b, tgt_s, h = inputs.shape
src_s = attention_mask.shape[1]
head_dim = h // n_head
scaling = head_dim ** -0.5
hidden = F.layer_norm(inputs.data, (h,), weight=w_ln.data, bias=b_ln.data)
# shape: (b, 1, h)
q = F.linear(hidden, w_q.data, bias=b_q.data) * scaling
k = F.linear(hidden, w_k.data, bias=b_k.data)
v = F.linear(hidden, w_v.data, bias=b_v.data)
# shape: (b, 1, n_head, head_dim)
q = q.view(b, tgt_s, n_head, head_dim)
k = k.view(b, tgt_s, n_head, head_dim)
v = v.view(b, tgt_s, n_head, head_dim)
# shape: (b * n_head, 1, head_dim)
q = q.permute(0, 2, 1, 3).reshape(b * n_head, tgt_s, head_dim)
# shape: (1, b * n_head, head_dim)
k_new = k.permute(1, 0, 2, 3).reshape(tgt_s, b * n_head, head_dim)
# shape: (1, b * n_head, head_dim)
v_new = v.permute(1, 0, 2, 3).reshape(tgt_s, b * n_head, head_dim)
if isinstance(k_cache, TorchTensor):
if attn_sparsity >= 1.0: # Dense attention
if compress_cache:
# shape: (s, b * n_head, head_dim)
k = k_cache.device.decompress(k_cache)[:src_s]
v = v_cache.device.decompress(v_cache)[:src_s]
else:
# shape: (s, b * n_head, head_dim)
k = k_cache.data[:src_s]
v = v_cache.data[:src_s]
k[src_s - 1:src_s] = k_new
v[src_s - 1:src_s] = v_new
# shape: (b * n_head, head_dim, s)
k = k.permute(1, 2, 0).reshape(b * n_head, head_dim, src_s)
# shape: (b * n_head, s, head_dim)
v = v.permute(1, 0, 2).reshape(b * n_head, src_s, head_dim)
if k.is_cuda:
value = self._attention_value(q, k, v, attention_mask.data,
b, src_s, tgt_s, n_head, head_dim)
else:
q = q.float().cpu()
k, v = k.float(), v.float()
value = self._attention_value(q, k, v, attention_mask.data,
b, src_s, tgt_s, n_head, head_dim).cuda().half()
else: # Sparse attention
# shape: (s, b * n_head, head_dim)
k = k_cache.data[:src_s]
k[src_s - 1:src_s] = k_new
# shape: (b * n_head, head_dim, s)
k = k.permute(1, 2, 0).reshape(b * n_head, head_dim, src_s)
if k.is_cuda:
value = self._sparse_attention_value(q, k, v_new, v_cache,
attention_mask.data, b, src_s, tgt_s, n_head, head_dim,
attn_sparsity)
else:
q = q.float().cpu()
value = self._sparse_attention_value(q, k, v_new, v_cache,
attention_mask.data, b, src_s, tgt_s, n_head, head_dim,
attn_sparsity).cuda().half()
else: # Mixed device attention
assert attn_sparsity >= 1.0
value = self._mixed_device_attention(q, k_cache, v_cache,
k_new, v_new, attention_mask.data, b, src_s, tgt_s,
n_head, head_dim)
# shape: (b, 1, h)
value = value.transpose(1, 2).view(b, tgt_s, h)
value = F.linear(value, w_out.data, bias=b_out.data)
value.add_(inputs.data)
if donate[0]: inputs.delete()
if donate[1]: attention_mask.delete()
if compress_cache:
if comp_config.group_dim == 0:
s_ = src_s // comp_config.group_size * comp_config.group_size
k_new = k[:, :, s_:].permute(2, 0, 1)
v_new = v[:, s_:, :].permute(1, 0, 2)
k_new = self.compressed_device.compress(k_new, comp_config)
v_new = self.compressed_device.compress(v_new, comp_config)
else:
k_new = TorchTensor.create_from_torch(k_new, self)
v_new = TorchTensor.create_from_torch(v_new, self)
return TorchTensor.create_from_torch(value, self), k_new, v_new
def _attention_weights(self, q, k, mask, b, src_s, n_head):
# shape: (b * n_head, 1, s)
attn_weights = torch.bmm(q, k)
# shape: (b, 1, 1, s)
mask = mask.view(b, 1, 1, src_s)
# shape: (b * n_head, 1, s)
attn_weights = attn_weights.view(b, n_head, 1, src_s)
attn_weights = torch.where(mask, attn_weights, -1e4)
attn_weights = attn_weights.view(b * n_head, 1, src_s)
attn_weights = F.softmax(attn_weights, dim=2)
return attn_weights
def _attention_value(self, q, k, v, mask, b, src_s, tgt_s, n_head, head_dim):
# shape: (b * n_head, 1, s)
attn_weights = self._attention_weights(q, k, mask, b, src_s, n_head)
# shape: (b, n_head, 1, head_dim)
return torch.bmm(attn_weights, v).view(b, n_head, tgt_s, head_dim)
def _sparse_attention_value(self, q, k, v_new, v_cache, mask, b,
src_s, tgt_s, n_head, head_dim, attn_sparsity):
# shape: (b * n_head, 1, s)
attn_weights = self._attention_weights(q, k, mask, b, src_s, n_head)
topk = int(attn_sparsity * (attn_weights.shape[2] - 1))
topk_weights, topk_indices = attn_weights[:, :, :-1].topk(
topk, dim=2, sorted=False)
topk_indices = topk_indices.view(b * n_head, topk).transpose(0, 1)
# shape: (b * n_head, 1, topk+1)
attn_weights = torch.cat([topk_weights,
attn_weights[:, :, -1].unsqueeze(-1)], dim=-1)
if k.is_cuda:
v_home = v_cache
v_buf = self.allocate((topk+1, b*n_head, head_dim), np.float16)
topk_indices = topk_indices.cpu()
else:
(v_home, v_buf) = v_cache
# shape: (s, b * n_head, head_dim)
indices_src = topk_indices
indices_tgt = (slice(0, indices_src.shape[0]), slice(0, v_home.shape[1]))
general_copy(v_buf, indices_tgt, v_home, indices_src)
v_home.device.synchronize()
# shape: (topk+1, b * n_head, head_dim)
v = v_buf.data[:topk+1]
v[topk:topk+1] = v_new
# shape: (b * n_head, topk+1, head_dim)
v = v.permute(1, 0, 2).reshape(b * n_head, topk+1, head_dim)
# shape: (b * n_head, 1, head_dim)
return torch.bmm(attn_weights, v).view(b, n_head, tgt_s, head_dim)
def _mixed_device_attention(self, q, k_cache, v_cache, k_new, v_new,
mask, b, src_s, tgt_s, n_head, head_dim):
# The caches are stored on both gpu and cpu.
# Compute attention on gpu for caches stored on gpu.
# Compute attention on cpu for caches stored on cpu.
k_gpu, k_cpu = k_cache[0].data, k_cache[1].data
v_gpu, v_cpu = v_cache[0].data, v_cache[1].data
seg = k_gpu.shape[1]
# Compute GPU part
b_gpu = seg // n_head
q_gpu = q[:seg]
# shape: (s, b * n_head, head_dim)
k_gpu = k_gpu[:src_s, :seg, :]
v_gpu = v_gpu[:src_s, :seg, :]
k_gpu[src_s-1:src_s, :, :] = k_new[:, :seg, :]
v_gpu[src_s-1:src_s, :, :] = v_new[:, :seg, :]
# shape: (b * n_head, head_dim, s)
k_gpu = k_gpu.permute(1, 2, 0)
# shape: (b * n_head, s, head_dim)
v_gpu = v_gpu.permute(1, 0, 2)
mask_gpu = mask[:b_gpu].cuda()
value_gpu = self._attention_value(q_gpu, k_gpu, v_gpu, mask_gpu,
b_gpu, src_s, tgt_s, n_head, head_dim)
# Compute CPU Part
b_cpu = b - b_gpu
q_cpu = q[seg:].float().cpu()
# shape: (s, b * n_head, head_dim)
k_cpu = k_cpu[:src_s, seg:, :]
v_cpu = v_cpu[:src_s, seg:, :]
k_cpu[src_s-1:src_s, :, :] = k_new[:, seg:, :]
v_cpu[src_s-1:src_s, :, :] = v_new[:, seg:, :]
# shape: (b * n_head, head_dim, s)
k_cpu = k_cpu.permute(1, 2, 0)
# shape: (b * n_head, s, head_dim)
v_cpu = v_cpu.permute(1, 0, 2)
mask_cpu = mask[b_gpu:]
value_cpu = self._attention_value(q_cpu, k_cpu, v_cpu, mask_cpu,
b_cpu, src_s, tgt_s, n_head, head_dim)
value = torch.cat([value_gpu, value_cpu.cuda().half()], dim=0)
return value
def mlp(self, inputs, wi, bi, wo, bo, w_ln, b_ln, donate):
# decompress weights
if wi.device.device_type == DeviceType.COMPRESSED:
wi = wi.device.decompress(wi)
wo = wo.device.decompress(wo)
b, s, h = inputs.shape
out = F.layer_norm(inputs.data, (h,), weight=w_ln.data, bias=b_ln.data)
out = F.linear(out, wi.data, bias=bi.data)
F.relu(out, inplace=True)
out = F.linear(out, wo.data, bias=bo.data)
out.add_(inputs.data)
if donate[0]: inputs.delete()
return TorchTensor.create_from_torch(out, self)
def synchronize(self):
torch.cuda.synchronize()
def mem_stats(self):
if self.device_type == DeviceType.CUDA:
cur_mem = torch.cuda.memory_allocated(self.dev)
peak_mem = torch.cuda.max_memory_allocated(self.dev)
elif self.device_type == DeviceType.CPU:
cur_mem = cpu_mem_stats()
peak_mem = 0
else:
raise NotImplementedError()
return cur_mem, peak_mem
def print_stats(self, output_file=None):
torch.cuda.synchronize()
cur_mem, peak_mem = self.mem_stats()
if output_file is not None:
with open(output_file, "w") as f:
f.write(f"TorchDevice: {self.name}\n")
f.write(f" cur_mem: {cur_mem/GB:.4f} GB, "
f" peak_mem: {peak_mem/GB:.4f} GB\n")
else:
print(f"TorchDevice: {self.name}")
print(f" cur_mem: {cur_mem/GB:.4f} GB, "
f" peak_mem: {peak_mem/GB:.4f} GB")
return cur_mem, peak_mem
def __str__(self):
return f"TorchDevice(name={self.name})"
def test_real_compression():
torch.manual_seed(0)
a = torch.normal(0, 1, (32, 1, 1), dtype=torch.float16).cuda()
config = CompressionConfig(
num_bits=4, group_size=32, group_dim=0, symmetric=False)
dev = TorchDevice("cuda:0", 0, 0).compressed_device
packed = dev.compress(a, config)
b = dev.decompress(packed)
print(a.flatten())
print(b.flatten()) | null |
10,169 | from enum import Enum, auto
from functools import partial
from itertools import count
import os
import queue
import shutil
import time
import threading
from typing import Optional, Union, Tuple
import torch
import torch.nn.functional as F
import numpy as np
from flexgen.utils import (GB, T, cpu_mem_stats, vector_gather,
np_dtype_to_torch_dtype, torch_dtype_to_np_dtype,
torch_dtype_to_num_bytes)
general_copy_compressed = TorchCompressedDevice = None
global_cpu_device = None
def fix_recursive_import():
global general_copy_compressed, TorchCompressedDevice, global_cpu_device
from flexgen import compression
general_copy_compressed = compression.general_copy_compressed
TorchCompressedDevice = compression.TorchCompressedDevice | null |
10,170 | from enum import Enum, auto
from functools import partial
from itertools import count
import os
import queue
import shutil
import time
import threading
from typing import Optional, Union, Tuple
import torch
import torch.nn.functional as F
import numpy as np
from flexgen.utils import (GB, T, cpu_mem_stats, vector_gather,
np_dtype_to_torch_dtype, torch_dtype_to_np_dtype,
torch_dtype_to_num_bytes)
class DeviceType(Enum):
CPU = auto()
CUDA = auto()
DISK = auto()
MIXED = auto()
COMPRESSED = auto()
def convert(name):
if name == "cpu":
return DeviceType.CPU
elif name == "cuda":
return DeviceType.CUDA
elif name == "disk":
return DeviceType.DISK
elif name == "mixed":
return DeviceType.MIXED
elif name == "compressed":
return DeviceType.COMPRESSED
else:
raise ValueError(f"Invalid name: {name}")
def map_to_torch_tensor(tensor, indices):
if tensor.device.device_type == DeviceType.DISK:
data = torch.from_numpy(np.lib.format.open_memmap(tensor.data))
else:
data = tensor.data
# BC: this is supposed to only handle the sparse v_cache case
if torch.is_tensor(indices):
return vector_gather(data, indices)
return data[indices] if indices else data
GB = 1 << 30
The provided code snippet includes necessary dependencies for implementing the `copy_worker_func` function. Write a Python function `def copy_worker_func(queue, cuda_id)` to solve the following problem:
The copy worker thread.
Here is the function:
def copy_worker_func(queue, cuda_id):
"""The copy worker thread."""
torch.cuda.set_device(cuda_id)
cpu_buf = torch.empty((1 * GB,), dtype=torch.float16, pin_memory=True)
copy_stream = torch.cuda.Stream()
with torch.cuda.stream(copy_stream):
while True:
item = queue.get()
if item is None:
queue.task_done()
return
dst, dst_indices, src, src_indices = item
src_data = map_to_torch_tensor(src, src_indices)
dst_data = map_to_torch_tensor(dst, dst_indices)
if (src.device.device_type == DeviceType.CUDA or
dst.device.device_type == DeviceType.CUDA):
# Use a pinned cpu buffer as a relay
size = np.prod(src_data.shape)
tmp_cpu_buf = cpu_buf[:size].view(src_data.shape)
tmp_cpu_buf.copy_(src_data)
dst_data.copy_(tmp_cpu_buf)
else:
dst_data.copy_(src_data)
queue.task_done() | The copy worker thread. |
10,171 | import argparse
from dataclasses import asdict, replace
import json
import math
import os
import time
from flexgen.flex_opt import (Policy, OptLM, ExecutionEnv, CompressionConfig,
str2bool)
from helm.benchmark.presentation.run_entry import RunEntry
from helm.benchmark.run import run_entries_to_run_specs
from helm.benchmark.run_specs import (ScenarioSpec, RunSpec, get_summarization_adapter_spec,
get_summarization_metric_specs, get_generative_harms_metric_specs,
ADAPT_MULTIPLE_CHOICE_JOINT, get_multiple_choice_adapter_spec)
from helm.benchmark.runner import (create_scenario, AdapterFactory, with_instance_ids, create_metric,
TokensMetric, Metric, MetricSpec, MetricResult, PerInstanceStats, create_metric, Stat,
ScenarioState, Counter, MetricName, ensure_directory_exists, write, asdict_without_nones,
DataPreprocessor)
from helm.common.request import Request, RequestResult, Sequence, Token
from helm.common.tokenization_request import (TokenizationRequestResult,
TokenizationRequest, TokenizationToken, DecodeRequest, DecodeRequestResult)
from helm.proxy.clients.client import truncate_sequence
import numpy as np
from tqdm import tqdm
from transformers import AutoTokenizer, AutoConfig
class OptTokenizer:
# Adapted from helm/proxy/clients/huggingface_client.py
def __init__(self, name):
self.tokenizer = AutoTokenizer.from_pretrained(name, padding_side="left")
self.tokenizer.add_bos_token = False
if 'galactica' in name:
config = AutoConfig.from_pretrained(name)
self.tokenizer.pad_token = config.pad_token_id
self.tokenizer.eos_token = config.eos_token_id
def tokenize(self, request: TokenizationRequest) -> TokenizationRequestResult:
tokenizer = self.tokenizer
def do_it():
if request.encode:
if request.truncation:
tokens = tokenizer.encode(
request.text,
truncation=request.truncation,
max_length=request.max_length,
add_special_tokens=False,
)
else:
tokens = tokenizer.encode(request.text, add_special_tokens=False)
else:
tokens = tokenizer.tokenize(request.text)
return {"tokens": tokens}
result = do_it()
return TokenizationRequestResult(
success=True,
cached=False,
text=request.text,
tokens=[TokenizationToken(value) for value in result["tokens"]],
request_time=0,
)
def decode(self, request: DecodeRequest) -> DecodeRequestResult:
tokenizer = self.tokenizer
def do_it():
return {
"text": tokenizer.decode(
request.tokens, clean_up_tokenization_spaces=request.clean_up_tokenization_spaces
)
}
result = do_it()
return DecodeRequestResult(
success=True, cached=False, text=result["text"], request_time=0,
)
def execute(scenario_state, tokenizer, effective_bs, pad_to_seq_len):
generation_args = get_hf_generation_args(
scenario_state.request_states[0].request, tokenizer)
batches = get_batches(scenario_state, tokenizer,
effective_bs, pad_to_seq_len=pad_to_seq_len)
# Initialize environment
env = ExecutionEnv.create(args.offload_dir)
# Offloading policy
policy = Policy(args.gpu_batch_size, args.num_gpu_batches,
args.percent[0], args.percent[1],
args.percent[2], args.percent[3],
args.percent[4], args.percent[5],
overlap=True, sep_layer=True, pin_weight=args.pin_weight,
cpu_cache_compute=args.cpu_cache_compute, attn_sparsity=1.0,
compress_weight=args.compress_weight,
comp_weight_config=CompressionConfig(
num_bits=4, group_size=64,
group_dim=0, symmetric=False),
compress_cache=args.compress_cache,
comp_cache_config=CompressionConfig(
num_bits=4, group_size=64,
group_dim=2, symmetric=False))
print(f"Init weights begin.")
tic = time.time()
model = OptLM(args.model, env, args.path, policy)
print(f"Init weights end. Elapsed: {time.time() - tic:.2f} s", flush=True)
# Generate
print(f"Generate begin. #sequences: {len(batches) * effective_bs}")
tic = time.time()
input_ids_batches = []
output_ids_batches = []
for batch in tqdm(batches):
input_ids_tmp = batch["input_ids"]
output_ids_tmp = model.generate(
input_ids_tmp,
do_sample=generation_args["do_sample"],
temperature=generation_args["temperature"],
max_new_tokens=generation_args["max_new_tokens"],
stop=generation_args.get("eos_token_id", None))
input_ids_batches.append(input_ids_tmp)
output_ids_batches.append(output_ids_tmp)
print(f"Generate end. Elapsed: {time.time() - tic:.2f} s", flush=True)
input_ids = np.concatenate(input_ids_batches)
output_ids = np.concatenate(output_ids_batches)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
#print("Outputs:\n" + 70 * '-')
##for i in range(len(outputs)):
#for i in [0, len(outputs) - 1]:
# print(f"{i}:\n{outputs[i]}")
# print("-" * 70)
# Shutdown
print("Shutdown...")
env.close_copy_threads()
request_states = []
for i, request_state in enumerate(scenario_state.request_states):
request = request_state.request
encoded_input = input_ids[i]
sequences = [output_ids[i]]
if not request.echo_prompt:
sequences = [sequence[len(encoded_input) :] for sequence in sequences]
all_tokens = [tokenizer.convert_ids_to_tokens(sequence) for sequence in sequences]
all_decoded_text = tokenizer.batch_decode(sequences)
all_logprobs_of_chosen_tokens = [[0] * len(x) for x in all_tokens]
all_top_logprobs_dicts = [[{}] * len(x) for x in all_tokens]
completions = []
for (decoded_text, tokens, logprobs_of_chosen_tokens, top_logprobs_dicts) in zip(
all_decoded_text, all_tokens, all_logprobs_of_chosen_tokens, all_top_logprobs_dicts
):
completions.append(
{
"text": decoded_text,
"tokens": tokens,
"logprobs": logprobs_of_chosen_tokens,
"top_logprobs_dicts": top_logprobs_dicts,
}
)
response = {
"completions": completions, "input_length": len(encoded_input)}
completions = []
for raw_completion in response["completions"]:
sequence_logprob: float = 0
tokens: List[Token] = []
if request.echo_prompt:
# Add prompt to list of generated tokens.
generated_tokens = raw_completion["tokens"][response["input_length"] :]
for token_text in raw_completion["tokens"][: response["input_length"]]:
tokens.append(Token(text=token_text, logprob=0.0, top_logprobs={}))
else:
generated_tokens = raw_completion["tokens"]
# Compute logprob for the entire sequence.
for token_text, logprob, top_logprobs_dict in zip(
generated_tokens, raw_completion["logprobs"], raw_completion["top_logprobs_dicts"]
):
tokens.append(Token(text=token_text, logprob=logprob, top_logprobs=top_logprobs_dict))
sequence_logprob += logprob
completion = Sequence(text=raw_completion["text"], logprob=sequence_logprob, tokens=tokens)
completion = truncate_sequence(completion, request)
completions.append(completion)
result = RequestResult(
success=True,
cached=False,
request_time=0,
request_datetime=0,
completions=completions,
embedding=[],
)
request_states.append(replace(request_state, result=result))
return ScenarioState(scenario_state.adapter_spec, request_states)
def run_entry(description, pad_to_seq_len, args):
effective_bs = args.gpu_batch_size * args.num_gpu_batches
parallelism = 4
##### RunSpec #####
run_entries = [RunEntry(description, priority=1, groups=None)]
run_specs = run_entries_to_run_specs(
run_entries=run_entries,
max_eval_instances=args.max_eval_instances,
num_train_trials=3,
)
run_spec = run_specs[0]
run_path: str = os.path.join(args.run_path, run_spec.name)
ensure_directory_exists(run_path)
eval_cache_path: str = os.path.join(run_path, "eval_cache")
ensure_directory_exists(eval_cache_path)
##### Adapter #####
#tokenizer_service = OptTokenizer("facebook/opt-30b")
tokenizer_service = OptTokenizer(args.model)
tokenizer = tokenizer_service.tokenizer
adapter = AdapterFactory.get_adapter(run_spec.adapter_spec, tokenizer_service)
##### Scenario #####
print(run_spec)
scenario = create_scenario(run_spec.scenario_spec)
scenario.output_path = f"data/{run_spec.name}"
os.makedirs(scenario.output_path, exist_ok=True)
instances = scenario.get_instances()
# Give each instance a unique ID
instances = with_instance_ids(instances)
# Get the instances necessary for this run.
instances = adapter.get_run_instances(instances)
# Data preprocessing
instances = DataPreprocessor(run_spec.data_augmenter_spec).preprocess(
instances, parallelism=parallelism
)
scenario_state = adapter.adapt(instances, parallelism=parallelism)
##### Execute #####
if pad_to_seq_len is None:
pad_to_seq_len = adapter.window_service.max_sequence_length - run_spec.adapter_spec.max_tokens + 1
scenario_state = execute(scenario_state, tokenizer, effective_bs, pad_to_seq_len)
##### Metrics #####
metrics = (
[create_metric(metric_spec) for metric_spec in run_spec.metric_specs]) + [TokensMetric()]
metrics = [metrics[0]]
stats: List[Stat] = []
per_instance_stats: List[PerInstanceStats] = []
for metric in metrics:
metric_result: MetricResult = metric.evaluate(
scenario_state,
tokenizer_service,
eval_cache_path,
parallelism=parallelism,
)
stats.extend(metric_result.aggregated_stats)
per_instance_stats.extend(metric_result.per_instance_stats)
# Check that there aren't duplicate `Stat`s
# Note: doesn't catch near misses.
metric_counts: typing.Counter[MetricName] = Counter([stat.name for stat in stats])
for metric_name, count in metric_counts.items():
if count > 1:
print(f"WARNING: duplicate metric name {metric_name}")
# Print out the number of stats
print(f"Generated {len(stats)} stats.")
# Output benchmarking information and results to files
write(os.path.join(run_path, "run_spec.json"), json.dumps(asdict_without_nones(run_spec), indent=2))
# Write out scenario
write(os.path.join(run_path, "scenario.json"), json.dumps(asdict_without_nones(scenario), indent=2))
# Write scenario state
write(os.path.join(run_path, "scenario_state.json"), json.dumps(asdict_without_nones(scenario_state), indent=2))
write(
os.path.join(run_path, "stats.json"), json.dumps([asdict_without_nones(stat) for stat in stats], indent=2)
)
write(
os.path.join(run_path, "per_instance_stats.json"),
json.dumps(list(map(asdict_without_nones, per_instance_stats)), indent=2),
) | null |
10,172 | import argparse
from tqdm import tqdm
import json
import math
import logging
from pathlib import Path
import time
import numpy as np
from transformers import AutoTokenizer, AutoConfig
import flexgen.apps.data_wrangle.utils.data_utils as data_utils
import flexgen.apps.data_wrangle.utils.prompt_utils as prompt_utils
from flexgen.apps.data_wrangle.utils import constants
from flexgen.apps.data_wrangle.utils.utils import compute_metrics, setup_logger
from flexgen.flex_opt import (Policy, OptLM, ExecutionEnv, CompressionConfig, str2bool)
def add_flexgen_args(parser):
parser.add_argument("--pad-to-seq-len", type=int)
parser.add_argument("--model", type=str, default="facebook/opt-1.3b",
help="The model name.")
parser.add_argument("--path", type=str, default="~/opt_weights",
help="The path to the model weights. If there are no cached weights, "
"FlexGen will automatically download them from HuggingFace.")
parser.add_argument("--run-path", type=str, default="runs")
parser.add_argument("--offload-dir", type=str, default="~/flexgen_offload_dir",
help="The directory to offload tensors. ")
parser.add_argument("--gpu-batch-size", type=int, default=16)
parser.add_argument("--num-gpu-batches", type=int, default=1)
parser.add_argument("--percent", nargs="+", type=int,
default=[100, 0, 100, 0, 100, 0],
help="Six numbers. They are "
"the percentage of weight on GPU, "
"the percentage of weight on CPU, "
"the percentage of attention cache on GPU, "
"the percentage of attention cache on CPU, "
"the percentage of activations on GPU, "
"the percentage of activations on CPU")
parser.add_argument("--pin-weight", type=str2bool, nargs="?",
const=True, default=True)
parser.add_argument("--cpu-cache-compute", action="store_true")
parser.add_argument("--compress-weight", action="store_true",
help="Whether to compress weight.")
parser.add_argument("--compress-cache", action="store_true",
help="Whether to compress cache.")
The provided code snippet includes necessary dependencies for implementing the `parse_args` function. Write a Python function `def parse_args() -> argparse.Namespace` to solve the following problem:
Generate args.
Here is the function:
def parse_args() -> argparse.Namespace:
"""Generate args."""
parser = argparse.ArgumentParser(description="Simple calculator")
parser.add_argument(
"--data_dir",
type=str,
help="Which data directory to run.",
required=True,
)
parser.add_argument(
"--output_dir", type=str, help="Output directory.", default="outputs"
)
parser.add_argument(
"--cache_name",
type=str,
help="Manifest cache type.",
default="sqlite",
choices=["redis", "sqlite", "noop"],
)
parser.add_argument(
"--cache_connection",
type=str,
help="Manifest cache connection string.",
default="fm_data_tasks.sqlite",
)
parser.add_argument(
"--client_name",
type=str,
help="Manifest client type.",
default="openai",
choices=["openai", "opt", "huggingface"],
)
parser.add_argument(
"--client_connection",
type=str,
help="Manifest client connection string.",
default=None,
)
parser.add_argument(
"--run_tag",
type=str,
help="Tag for run saving.",
default="default",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite sqlite cache of input/output results.",
)
parser.add_argument("--k", type=int, help="Number examples in prompt", default=1)
parser.add_argument(
"--sample_method",
type=str,
help="Example generation method",
default="random",
choices=["random", "manual", "validation_clusters"],
)
parser.add_argument("--seed", type=int, default=1234)
parser.add_argument(
"--class_balanced",
help="Class balance training data. Good for classification tasks \
with random prompts.",
action="store_true",
)
parser.add_argument(
"--sep_tok",
type=str,
help="Separate for attr: val pairs in row. Default is '.'.",
default=".",
)
parser.add_argument(
"--nan_tok",
type=str,
help="Token to represent nan entries. Default is 'nan'.",
default="nan",
)
parser.add_argument(
"--num_run",
type=int,
help="Number examples to run through model.",
default=-1,
)
parser.add_argument(
"--num_trials",
type=int,
help="Number trials to run. Results will be averaged with variance reported.",
default=1,
)
parser.add_argument(
"--num_print",
type=int,
help="Number example prompts to print.",
default=10,
)
parser.add_argument(
"--add_task_instruction",
help="Add task instruction to the prompt before examples.",
action="store_true",
)
parser.add_argument("--task_instruction_idx", type=int, default=0)
parser.add_argument("--do_test", help="Run on test file.", action="store_true")
parser.add_argument(
"--dry_run", help="Dry run. Do not actually ping model.", action="store_true"
)
parser.add_argument(
"--stop_token", help="Token to stop on for a given generated response", default="\n"
)
# Model args
parser.add_argument("--temperature", type=float, help="Temperature.", default=0.0)
parser.add_argument(
"--max_tokens", type=int, help="Max tokens to generate.", default=3
)
parser.add_argument(
"--batch_run", help="Use FlexGen batch inference.", action="store_true"
)
add_flexgen_args(parser)
args = parser.parse_args()
return args | Generate args. |
10,173 | import argparse
from tqdm import tqdm
import json
import math
import logging
from pathlib import Path
import time
import numpy as np
from transformers import AutoTokenizer, AutoConfig
import flexgen.apps.data_wrangle.utils.data_utils as data_utils
import flexgen.apps.data_wrangle.utils.prompt_utils as prompt_utils
from flexgen.apps.data_wrangle.utils import constants
from flexgen.apps.data_wrangle.utils.utils import compute_metrics, setup_logger
from flexgen.flex_opt import (Policy, OptLM, ExecutionEnv, CompressionConfig, str2bool)
logger = logging.getLogger(__name__)
def get_tokenizer(name):
if name == 'facebook/opt-175b':
tokenizer = AutoTokenizer.from_pretrained('facebook/opt-30b', padding_side="left")
else:
tokenizer = AutoTokenizer.from_pretrained(name, padding_side="left")
tokenizer.add_bos_token = False
if 'galactica' in name:
config = AutoConfig.from_pretrained(name)
tokenizer.pad_token = config.pad_token_id
tokenizer.eos_token = config.eos_token_id
return tokenizer
def compute_metrics(preds: List, golds: List, task: str):
"""Compute metrics."""
mets = {"tp": 0, "tn": 0, "fp": 0, "fn": 0, "crc": 0, "total": 0}
for pred, label in zip(preds, golds):
label = label.strip().lower()
pred = pred.strip().lower()
mets["total"] += 1
if task in {
"data_imputation",
"entity_matching",
}:
crc = pred == label
elif task in {"entity_matching", "schema_matching", "error_detection_spelling"}:
crc = pred.startswith(label)
elif task in {"error_detection"}:
pred = pred.split("\n\n")[-1]
breakpoint()
crc = pred.endswith(label)
else:
raise ValueError(f"Unknown task: {task}")
# Measure equal accuracy for generation
if crc:
mets["crc"] += 1
if label == "yes":
if crc:
mets["tp"] += 1
else:
mets["fn"] += 1
elif label == "no":
if crc:
mets["tn"] += 1
else:
mets["fp"] += 1
prec = mets["tp"] / max(1, (mets["tp"] + mets["fp"]))
rec = mets["tp"] / max(1, (mets["tp"] + mets["fn"]))
acc = mets["crc"] / mets["total"]
f1 = 2 * prec * rec / max(1, (prec + rec))
return prec, rec, acc, f1
class Policy:
gpu_batch_size: int
num_gpu_batches: int
# percent = a means a%
w_gpu_percent: float
w_cpu_percent: float
cache_gpu_percent: float
cache_cpu_percent: float
act_gpu_percent: float
act_cpu_percent: float
# Whether to overlap the I/O and compute
overlap: bool
# Whether to separate attention and mlp as two layers
sep_layer: bool
# Whether to use pinned memory for weights on CPU
pin_weight: bool
# Whether to compute attention on CPU
cpu_cache_compute: bool
# Sparsity of attention weights
attn_sparsity: float
# Compress weights with group-wise quantization
compress_weight: bool
comp_weight_config: CompressionConfig
# Compress KV cache with group-wise quantization
compress_cache: bool
comp_cache_config: CompressionConfig
def w_disk_percent(self):
return 100 - self.w_gpu_percent - self.w_cpu_percent
def cache_disk_percent(self):
return 100 - self.cache_gpu_percent - self.cache_cpu_percent
def act_disk_percent(self):
return 100 - self.act_gpu_percent - self.act_cpu_percent
class OptLM:
def __init__(self,
config: Union[str, OptConfig],
env: ExecutionEnv,
path: str,
policy: Policy):
if isinstance(config, str):
config = get_opt_config(config)
self.config = config
self.env = env
self.path = path
self.policy = policy
self.num_gpu_batches = policy.num_gpu_batches
layers = []
layers.append(InputEmbed(self.config, self.env, self.policy))
for i in range(self.config.num_hidden_layers):
if policy.sep_layer:
layers.append(SelfAttention(self.config, self.env, self.policy, i))
layers.append(MLP(self.config, self.env, self.policy, i))
else:
layers.append(TransformerLayer(self.config, self.env, self.policy, i))
layers.append(OutputEmbed(self.config, self.env, self.policy))
self.layers = layers
self.num_layers = len(layers)
if self.policy.act_gpu_percent == 100:
self.act_home = self.env.gpu
elif self.policy.act_cpu_percent == 100:
self.act_home = self.env.cpu
elif self.policy.act_disk_percent == 100:
self.act_home = self.env.disk
else:
raise NotImplementedError()
# CUDA streams
self.load_weight_stream = torch.cuda.Stream()
self.load_cache_stream = torch.cuda.Stream()
self.store_cache_stream = torch.cuda.Stream()
# Intermediate tensors
# The following buffers store values used
# for the i-th token, j-th layer, k-th gpu batch.
num_layers, num_gpu_batches = self.num_layers, self.policy.num_gpu_batches
# cache[j][k]
self.cache_home = array_2d(num_layers, num_gpu_batches, ValueHolder)
self.cache_read_buf = array_2d(num_layers, num_gpu_batches, ValueHolder)
self.cache_write_buf = array_2d(num_layers, num_gpu_batches, ValueHolder)
# weight[j]
self.weight_read_buf = array_1d(num_layers, ValueHolder)
# attention_mask[k]
self.attention_mask = array_1d(num_gpu_batches, ValueHolder)
self.task = None
self.init_all_weights()
def set_task(self, task):
self.task = task
for l in self.layers:
l.set_task(task)
def init_weight(self, j):
expanded_path = os.path.abspath(os.path.expanduser(
os.path.join(self.path, f"{self.config.name}-np")))
check_path = os.path.join(expanded_path, "decoder.embed_positions.weight")
if not os.path.exists(check_path) and DUMMY_WEIGHT not in check_path:
download_opt_weights(self.config.name, self.path)
self.layers[j].init_weight(self.weight_home[j], expanded_path)
def load_weight(self, i, j, k, overlap=True):
# Handle corner cases
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load from weight_home to weight_read_buf
if overlap:
with torch.cuda.stream(self.load_weight_stream):
self.layers[j].load_weight(self.weight_home[j], self.weight_read_buf[j], k)
else:
self.layers[j].load_weight(self.weight_home[j], self.weight_read_buf[j], k)
def delete_weight(self, j, k):
if k == 0:
for x in self.weight_home[j].pop():
if isinstance(x, ValueHolder):
for y in x.pop():
y.delete()
else:
x.delete()
def init_cache(self, j, k):
self.layers[j].init_cache_one_gpu_batch(self.cache_home[j][k])
def load_cache(self, i, j, k, overlap=True):
# Handle corner cases
if i == 0: # prefill, no cache
return
if k == self.num_gpu_batches:
k = 0
j += 1
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load from cache_home to cache_read_buf
if overlap:
with torch.cuda.stream(self.load_cache_stream):
self.layers[j].load_cache(self.cache_home[j][k], self.cache_read_buf[j][k], i)
else:
self.layers[j].load_cache(self.cache_home[j][k], self.cache_read_buf[j][k], i)
def store_cache(self, i, j, k, overlap=True):
# Handle corner cases
if k == -1:
k = self.num_gpu_batches - 1
j -= 1
if j == -1:
j = self.num_layers - 1
i -= 1
if i == -1:
return
if i == self.task.gen_len - 1: # last token, no need to store cache
self.cache_write_buf[j][k].pop()
return
# Store cache_write_buf to cache_home
# Delete cache_write_buf
if overlap:
with torch.cuda.stream(self.store_cache_stream):
self.layers[j].store_cache(self.cache_home[j][k], self.cache_write_buf[j][k], i)
else:
self.layers[j].store_cache(self.cache_home[j][k], self.cache_write_buf[j][k], i)
def delete_cache(self, j, k):
v = self.cache_home[j][k].pop()
if v:
for x in v:
x.delete()
def load_hidden(self, i, j, k):
# Handle corner cases
if k == self.num_gpu_batches:
k = 0
j += 1
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load to hidden states buffers
dst = self.layers[j].compute
if j == 0:
gpu_batch_size = self.policy.gpu_batch_size
left, right = k * gpu_batch_size, (k + 1) * gpu_batch_size
if i == 0: # load from the input ids
val = dst.allocate((gpu_batch_size, self.task.prompt_len), np.int32)
val.load_from_np(self.output_ids[left:right, :self.task.prompt_len])
else: # load from the last generated token
pos = self.task.prompt_len + i
val = dst.allocate((gpu_batch_size, 1), np.int32)
val.load_from_np(self.output_ids[left:right, pos-1:pos])
else: # load from the last layer
val = self.hidden[i][j-1][k].pop().move(dst)
self.hidden[i][j][k].store(val)
def store_hidden(self, i, j, k):
# Handle corner cases
if k == -1:
k = self.num_gpu_batches - 1
j -= 1
if j == -1:
j = self.num_layers - 1
i -= 1
if i == -1:
return
# Store to hidden states buffers
if j == self.num_layers - 1: # store to output
gpu_batch_size = self.policy.gpu_batch_size
left, right = k * gpu_batch_size, (k + 1) * gpu_batch_size
ids = self.hidden[i][j][k].pop().data.detach().cpu().numpy()
pos = self.task.prompt_len + i
if self.task.stop:
stopped = self.stopped[left:right]
self.output_ids[left:right, pos:pos+1] = np.where(
stopped, self.config.pad_token_id, ids)
stopped[:] = np.logical_or(stopped, ids == self.task.stop)
else:
self.output_ids[left:right, pos:pos+1] = ids
else: # move to home
x = self.hidden[i][j][k]
if x.val: # x may already be moved due to overlapping
x.val = x.val.move(self.act_home)
def compute_layer(self, i, j, k):
# Update the hidden in place
# Clear the weight_read_buf if it is the last gpu batch
# Clear the cache_read_buf
# Run layer computation
self.layers[j].forward(self.hidden[i][j][k], self.cache_read_buf[j][k],
self.weight_read_buf[j], self.attention_mask[k],
self.cache_write_buf[j][k], i, k)
def sync(self):
self.env.disk.synchronize()
torch.cuda.synchronize()
def init_all_weights(self):
self.weight_home = array_1d(self.num_layers, ValueHolder)
for j in range(self.num_layers):
self.init_weight(j)
def delete_all_weights(self):
for j in range(self.num_layers):
self.delete_weight(j, 0)
def update_attention_mask(self, i, k):
if i > 0:
mask = self.attention_mask[k]
assert mask.val is not None
mask.val = mask.val.device.extend_attention_mask(mask.val, [True])
return
gpu_batch_size = self.policy.gpu_batch_size
left = k * gpu_batch_size
right = left + gpu_batch_size
input_ids = self.output_ids[left:right, :self.task.prompt_len]
attention_compute = (self.env.cpu if self.policy.cpu_cache_compute
else self.env.gpu)
val = attention_compute.allocate(
(self.policy.gpu_batch_size, self.task.prompt_len), bool)
val.load_from_np((input_ids != self.config.pad_token_id))
self.attention_mask[k].store(val)
def generate(self,
inputs: Union[np.array, List[List[int]]],
max_new_tokens: int = 32,
do_sample: bool = False,
temperature: float = 1.0,
stop: Optional[int] = None,
debug_mode: Optional[str] = None,
cut_gen_len: Optional[int] = None,
verbose: int = 0):
task = Task(
inputs=inputs,
prompt_len=len(inputs[0]),
gen_len=max_new_tokens,
cut_gen_len=cut_gen_len,
do_sample=do_sample,
temperature=temperature,
stop=stop,
)
num_layers = self.num_layers
num_gpu_batches = self.num_gpu_batches
gpu_batch_size = self.policy.gpu_batch_size
overlap = self.policy.overlap
prompt_len, gen_len = task.prompt_len, task.gen_len
self.execute_gen_len = task.cut_gen_len if task.cut_gen_len else task.gen_len
# Output token ids
self.output_ids = np.full((len(task.inputs), prompt_len + gen_len),
self.config.pad_token_id, dtype=np.int32)
self.stopped = np.zeros((len(task.inputs), 1), dtype=bool)
self.output_ids[:, :prompt_len] = np.asarray(task.inputs)
assert gpu_batch_size * num_gpu_batches == len(task.inputs)
# Intermediate tensors
# The following buffers store values used
# for the i-th token, j-th layer, k-th gpu batch.
num_layers, num_gpu_batches = self.num_layers, self.policy.num_gpu_batches
for j in range(num_layers):
for k in range(num_gpu_batches):
self.cache_home[j][k].clear()
self.cache_read_buf[j][k].clear()
self.cache_write_buf[j][k].clear()
for j in range(num_layers):
self.weight_read_buf[j].clear()
for k in range(num_gpu_batches):
self.attention_mask[k].clear()
self.hidden = array_3d(gen_len, num_layers, num_gpu_batches, ValueHolder)
# Init cache
self.set_task(task)
for j in range(num_layers):
for k in range(num_gpu_batches):
self.init_cache(j, k)
if self.policy.cpu_cache_compute:
self.env.cpu.init_attention_compute_workspace(self.config, self.task, self.policy)
# Generate
if debug_mode is None:
if not overlap:
# No overlap, easy to understand, suitable for debugging
self.generation_loop_normal()
else:
# Overlap I/O and compute
if num_gpu_batches == 1:
self.generation_loop_overlap_single_batch()
else:
self.generation_loop_overlap_multi_batch()
elif debug_mode == "fewer_batch":
# Run fewer layeres and batches for debugging
if num_gpu_batches == 1:
self.generation_loop_debug_single_batch()
else:
self.generation_loop_debug_multi_batch()
elif debug_mode == "breakdown":
# No overlap, fewer batches, execution time breakdown
self.generation_loop_debug_normal()
else:
raise ValueError("Invalid debug mode: {debug_mode}")
# Delete cache
for j in range(num_layers):
for k in range(num_gpu_batches):
self.delete_cache(j, k)
if self.policy.cpu_cache_compute:
self.env.cpu.del_attention_compute_workspace()
return self.output_ids
def generation_loop_normal(self):
for i in range(self.execute_gen_len):
timers("generate").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
for k in range(self.num_gpu_batches):
self.load_weight(i, j, k, overlap=False)
for k in range(self.num_gpu_batches):
self.load_cache(i, j, k, overlap=False)
self.load_hidden(i, j, k)
self.compute_layer(i, j, k)
self.store_hidden(i, j, k)
self.store_cache(i, j, k, overlap=False)
timers("generate").stop()
def generation_loop_debug_normal(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill_total").reset()
timers("decoding_gpu_batch").reset()
timers("load_weight").reset()
timers("load_cache_prefill").reset()
timers("load_cache_decoding").reset()
timers("store_cache_prefill").reset()
timers("store_cache_decoding").reset()
timers("compute_layer_prefill").reset()
timers("compute_layer_decoding").reset()
load_weight_timer = timers("load_weight")
for i in range(self.execute_gen_len):
if i == 0:
timers("prefill_total").start()
load_cache_timer = timers("load_cache_prefill")
store_cache_timer = timers("store_cache_prefill")
compute_layer_timer = timers("compute_layer_prefill")
else:
load_cache_timer = timers("load_cache_decoding")
store_cache_timer = timers("store_cache_decoding")
compute_layer_timer = timers("compute_layer_decoding")
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
load_weight_timer.start(self.sync)
for k in range(self.num_gpu_batches):
self.load_weight(i, j, k)
load_weight_timer.stop(self.sync)
for k in range(self.num_gpu_batches):
load_cache_timer.start(self.sync)
self.load_cache(i, j, k)
load_cache_timer.stop(self.sync)
self.load_hidden(i, j, k)
compute_layer_timer.start(self.sync)
self.compute_layer(i, j, k)
compute_layer_timer.stop(self.sync)
self.store_hidden(i, j, k)
store_cache_timer.start(self.sync)
self.store_cache(i, j, k)
store_cache_timer.stop(self.sync)
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill_total").stop(self.sync)
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill_total").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
# Debug the costs of individual functions
print(f"#layers: {self.num_layers}")
print(f"#batches prefill: "
f"{self.num_layers * self.num_gpu_batches}")
print(f"#batches decoding: "
f"{(self.task.gen_len - 1) * self.num_layers * self.num_gpu_batches}")
print(f"load_weight (per-layer)"
f": {np.mean(timers('load_weight').costs):.6f} s")
for stage in ["prefill", "decoding"]:
for func in ["load_cache", "store_cache", "compute_layer"]:
name = func + "_" + stage
costs = timers(name).costs
print(f"{name:22s} (per-batch): {np.mean(costs):.6f} s")
def generation_loop_overlap_single_batch(self):
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.sync()
# Generate
for i in range(self.execute_gen_len):
timers("generate").start()
self.update_attention_mask(i, 0)
for j in range(self.num_layers):
self.load_weight(i, j+1, 0)
self.load_cache(i, j+1, 0)
self.load_hidden(i, j, 0)
self.compute_layer(i, j, 0)
self.store_cache(i, j-1, 0)
self.store_hidden(i, j, 0)
self.sync()
timers("generate").stop()
if self.task.stop and np.all(self.stopped):
break
def generation_loop_overlap_multi_batch(self):
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.load_hidden(0, 0, 0)
self.sync()
# Generate
for i in range(self.execute_gen_len):
timers("generate").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
for k in range(self.num_gpu_batches):
self.load_weight(i, j+1, k)
self.load_cache(i, j, k+1)
self.store_hidden(i, j, k-1)
self.load_hidden(i, j, k+1)
self.compute_layer(i, j, k)
self.store_cache(i, j, k-1)
self.sync()
timers("generate").stop()
# Epilogue
self.store_hidden(
self.execute_gen_len-1, self.num_layers-1, self.num_gpu_batches-1)
def generation_loop_debug_single_batch(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill").reset()
timers("decoding_gpu_batch").reset()
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.sync()
# Generate
for i in range(self.execute_gen_len):
if i == 0: timers("prefill").start()
self.update_attention_mask(i, 0)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
self.load_weight(i, j+1, 0)
self.load_cache(i, j+1, 0)
self.load_hidden(i, j, 0)
self.compute_layer(i, j, 0)
self.store_cache(i, j-1, 0)
self.store_hidden(i, j, 0)
self.sync()
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill").stop()
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
def generation_loop_debug_multi_batch(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill").reset()
timers("decoding_gpu_batch").reset()
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.load_hidden(0, 0, 0)
self.sync()
# Generate
for i in range(self.execute_gen_len):
if i == 0: timers("prefill").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
for k in range(self.num_gpu_batches):
self.load_weight(i, j+1, k)
self.load_cache(i, j, k+1)
self.store_hidden(i, j, k-1)
self.load_hidden(i, j, k+1)
self.compute_layer(i, j, k)
self.store_cache(i, j, k-1)
self.sync()
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill").stop()
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
def __del__(self):
self.delete_all_weights()
def single_query_test(args, task_instruction, test_data, task, pd_data_files, test_file):
# Initialize environment
tokenizer = get_tokenizer(args.model)
env = ExecutionEnv.create(args.offload_dir)
# Offloading policy
policy = Policy(1, 1,
args.percent[0], args.percent[1],
args.percent[2], args.percent[3],
args.percent[4], args.percent[5],
overlap=True, sep_layer=True, pin_weight=args.pin_weight,
cpu_cache_compute=args.cpu_cache_compute, attn_sparsity=1.0,
compress_weight=args.compress_weight,
comp_weight_config=CompressionConfig(
num_bits=4, group_size=64,
group_dim=0, symmetric=False),
compress_cache=args.compress_cache,
comp_cache_config=CompressionConfig(
num_bits=4, group_size=64,
group_dim=2, symmetric=False))
logger.info(f"Init weights begin.")
tic = time.time()
model = OptLM(args.model, env, args.path, policy)
logger.info(f"Init weights end. Elapsed: {time.time() - tic:.2f} s")
if args.add_task_instruction:
prompt = lambda x: f"{task_instruction} {x}"
else:
prompt = lambda x: f"{x}"
trial_metrics = {"prec": [], "rec": [], "f1": [], "acc": []}
saved_prefix = None
tic = time.time()
for trial_num in range(args.num_trials):
np.random.seed(args.seed + trial_num)
queries = []
for _, row in test_data.iterrows():
serialized_r = row["text"]
if args.sample_method == "manual":
prefix_exs = prompt_utils.get_manual_prompt(args.data_dir, row)
elif args.sample_method == "validation_clusters":
if saved_prefix is None:
logger.info("Generating validation cluster prompt.")
saved_prefix = prompt_utils.get_validation_prompt(
args.validation_path,
num_examples=args.k,
task=task,
)
prefix_exs = saved_prefix
else:
if saved_prefix is None:
saved_prefix = prompt_utils.get_random_prompt(
pd_data_files["train"], num_examples=args.k
)
prefix_exs = saved_prefix
queries.append((prefix_exs + "\n" + serialized_r).strip())
gt = test_data["label_str"]
preds = []
idx = 0
for _ in range(args.num_print):
logger.info(prompt(queries[idx]))
tic = time.time()
input_ids_tmp = tokenizer(prompt(queries[idx]), padding="max_length",
return_tensors="np",
max_length=args.pad_to_seq_len).input_ids
logger.info(input_ids_tmp.shape)
output_ids_tmp = model.generate(input_ids_tmp,
do_sample=True,
temperature=args.temperature,
max_new_tokens=args.max_tokens,
stop=args.stop_token)
input_strs = tokenizer.batch_decode(input_ids_tmp, skip_special_tokens=True)
output_strs = tokenizer.batch_decode(output_ids_tmp, skip_special_tokens=True)
anwsers = [ output_strs[i][len(input_strs[i]):] for i in range(len(input_strs))]
logger.info(f"====> {anwsers[0]} <====")
preds.extend(anwsers)
idx += 1
logger.info(f"Current Inference query elapsed: {time.time() - tic:.2f} s")
# Save trial predictions
save_data = test_data.iloc[:args.num_print].copy(deep=True).reset_index()
gt = gt[:args.num_print]
save_data["preds"] = preds
save_data["queries"] = queries[:args.num_print]
prec, rec, acc, f1 = compute_metrics(preds, gt, task)
logger.info(
f"Metrics Trial {trial_num}\n"
f"Prec: {prec:.3f} Recall: {rec:.3f} Acc: {acc:.3f} F1: {f1:.3f}"
)
trial_metrics["rec"].append(rec)
trial_metrics["prec"].append(prec)
trial_metrics["acc"].append(acc)
trial_metrics["f1"].append(f1)
output_file = (
Path(args.output_dir)
/ f"{Path(args.data_dir).stem}"
/ f"{test_file}"
/ f"{args.run_tag}"
/ f"{args.k}k"
f"_{int(args.add_task_instruction)}inst"
f"_{int(args.class_balanced)}cb"
f"_{args.sample_method}"
f"_{args.model}"
f"_{args.num_print}run"
f"_{int(args.dry_run)}dry" / f"trial_{trial_num}.feather"
)
output_file.parent.mkdir(parents=True, exist_ok=True)
logger.info(f"Saved to {output_file}")
save_data.to_feather(output_file)
for k, values in list(trial_metrics.items()):
trial_metrics[f"{k}_avg"] = np.average(values)
trial_metrics[f"{k}_std"] = np.std(values)
output_metrics = output_file.parent / "metrics.json"
json.dump(trial_metrics, open(output_metrics, "w"))
logger.info(f"Final Metrics {json.dumps(trial_metrics, indent=4)}")
logger.info(f"Metrics dumped to {output_metrics}")
# Shutdown
logger.info("Shutdown FlexGen...")
env.close_copy_threads() | null |
10,174 | import argparse
from tqdm import tqdm
import json
import math
import logging
from pathlib import Path
import time
import numpy as np
from transformers import AutoTokenizer, AutoConfig
import flexgen.apps.data_wrangle.utils.data_utils as data_utils
import flexgen.apps.data_wrangle.utils.prompt_utils as prompt_utils
from flexgen.apps.data_wrangle.utils import constants
from flexgen.apps.data_wrangle.utils.utils import compute_metrics, setup_logger
from flexgen.flex_opt import (Policy, OptLM, ExecutionEnv, CompressionConfig, str2bool)
logger = logging.getLogger(__name__)
def get_tokenizer(name):
if name == 'facebook/opt-175b':
tokenizer = AutoTokenizer.from_pretrained('facebook/opt-30b', padding_side="left")
else:
tokenizer = AutoTokenizer.from_pretrained(name, padding_side="left")
tokenizer.add_bos_token = False
if 'galactica' in name:
config = AutoConfig.from_pretrained(name)
tokenizer.pad_token = config.pad_token_id
tokenizer.eos_token = config.eos_token_id
return tokenizer
def compute_metrics(preds: List, golds: List, task: str):
"""Compute metrics."""
mets = {"tp": 0, "tn": 0, "fp": 0, "fn": 0, "crc": 0, "total": 0}
for pred, label in zip(preds, golds):
label = label.strip().lower()
pred = pred.strip().lower()
mets["total"] += 1
if task in {
"data_imputation",
"entity_matching",
}:
crc = pred == label
elif task in {"entity_matching", "schema_matching", "error_detection_spelling"}:
crc = pred.startswith(label)
elif task in {"error_detection"}:
pred = pred.split("\n\n")[-1]
breakpoint()
crc = pred.endswith(label)
else:
raise ValueError(f"Unknown task: {task}")
# Measure equal accuracy for generation
if crc:
mets["crc"] += 1
if label == "yes":
if crc:
mets["tp"] += 1
else:
mets["fn"] += 1
elif label == "no":
if crc:
mets["tn"] += 1
else:
mets["fp"] += 1
prec = mets["tp"] / max(1, (mets["tp"] + mets["fp"]))
rec = mets["tp"] / max(1, (mets["tp"] + mets["fn"]))
acc = mets["crc"] / mets["total"]
f1 = 2 * prec * rec / max(1, (prec + rec))
return prec, rec, acc, f1
class Policy:
gpu_batch_size: int
num_gpu_batches: int
# percent = a means a%
w_gpu_percent: float
w_cpu_percent: float
cache_gpu_percent: float
cache_cpu_percent: float
act_gpu_percent: float
act_cpu_percent: float
# Whether to overlap the I/O and compute
overlap: bool
# Whether to separate attention and mlp as two layers
sep_layer: bool
# Whether to use pinned memory for weights on CPU
pin_weight: bool
# Whether to compute attention on CPU
cpu_cache_compute: bool
# Sparsity of attention weights
attn_sparsity: float
# Compress weights with group-wise quantization
compress_weight: bool
comp_weight_config: CompressionConfig
# Compress KV cache with group-wise quantization
compress_cache: bool
comp_cache_config: CompressionConfig
def w_disk_percent(self):
return 100 - self.w_gpu_percent - self.w_cpu_percent
def cache_disk_percent(self):
return 100 - self.cache_gpu_percent - self.cache_cpu_percent
def act_disk_percent(self):
return 100 - self.act_gpu_percent - self.act_cpu_percent
class OptLM:
def __init__(self,
config: Union[str, OptConfig],
env: ExecutionEnv,
path: str,
policy: Policy):
if isinstance(config, str):
config = get_opt_config(config)
self.config = config
self.env = env
self.path = path
self.policy = policy
self.num_gpu_batches = policy.num_gpu_batches
layers = []
layers.append(InputEmbed(self.config, self.env, self.policy))
for i in range(self.config.num_hidden_layers):
if policy.sep_layer:
layers.append(SelfAttention(self.config, self.env, self.policy, i))
layers.append(MLP(self.config, self.env, self.policy, i))
else:
layers.append(TransformerLayer(self.config, self.env, self.policy, i))
layers.append(OutputEmbed(self.config, self.env, self.policy))
self.layers = layers
self.num_layers = len(layers)
if self.policy.act_gpu_percent == 100:
self.act_home = self.env.gpu
elif self.policy.act_cpu_percent == 100:
self.act_home = self.env.cpu
elif self.policy.act_disk_percent == 100:
self.act_home = self.env.disk
else:
raise NotImplementedError()
# CUDA streams
self.load_weight_stream = torch.cuda.Stream()
self.load_cache_stream = torch.cuda.Stream()
self.store_cache_stream = torch.cuda.Stream()
# Intermediate tensors
# The following buffers store values used
# for the i-th token, j-th layer, k-th gpu batch.
num_layers, num_gpu_batches = self.num_layers, self.policy.num_gpu_batches
# cache[j][k]
self.cache_home = array_2d(num_layers, num_gpu_batches, ValueHolder)
self.cache_read_buf = array_2d(num_layers, num_gpu_batches, ValueHolder)
self.cache_write_buf = array_2d(num_layers, num_gpu_batches, ValueHolder)
# weight[j]
self.weight_read_buf = array_1d(num_layers, ValueHolder)
# attention_mask[k]
self.attention_mask = array_1d(num_gpu_batches, ValueHolder)
self.task = None
self.init_all_weights()
def set_task(self, task):
self.task = task
for l in self.layers:
l.set_task(task)
def init_weight(self, j):
expanded_path = os.path.abspath(os.path.expanduser(
os.path.join(self.path, f"{self.config.name}-np")))
check_path = os.path.join(expanded_path, "decoder.embed_positions.weight")
if not os.path.exists(check_path) and DUMMY_WEIGHT not in check_path:
download_opt_weights(self.config.name, self.path)
self.layers[j].init_weight(self.weight_home[j], expanded_path)
def load_weight(self, i, j, k, overlap=True):
# Handle corner cases
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load from weight_home to weight_read_buf
if overlap:
with torch.cuda.stream(self.load_weight_stream):
self.layers[j].load_weight(self.weight_home[j], self.weight_read_buf[j], k)
else:
self.layers[j].load_weight(self.weight_home[j], self.weight_read_buf[j], k)
def delete_weight(self, j, k):
if k == 0:
for x in self.weight_home[j].pop():
if isinstance(x, ValueHolder):
for y in x.pop():
y.delete()
else:
x.delete()
def init_cache(self, j, k):
self.layers[j].init_cache_one_gpu_batch(self.cache_home[j][k])
def load_cache(self, i, j, k, overlap=True):
# Handle corner cases
if i == 0: # prefill, no cache
return
if k == self.num_gpu_batches:
k = 0
j += 1
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load from cache_home to cache_read_buf
if overlap:
with torch.cuda.stream(self.load_cache_stream):
self.layers[j].load_cache(self.cache_home[j][k], self.cache_read_buf[j][k], i)
else:
self.layers[j].load_cache(self.cache_home[j][k], self.cache_read_buf[j][k], i)
def store_cache(self, i, j, k, overlap=True):
# Handle corner cases
if k == -1:
k = self.num_gpu_batches - 1
j -= 1
if j == -1:
j = self.num_layers - 1
i -= 1
if i == -1:
return
if i == self.task.gen_len - 1: # last token, no need to store cache
self.cache_write_buf[j][k].pop()
return
# Store cache_write_buf to cache_home
# Delete cache_write_buf
if overlap:
with torch.cuda.stream(self.store_cache_stream):
self.layers[j].store_cache(self.cache_home[j][k], self.cache_write_buf[j][k], i)
else:
self.layers[j].store_cache(self.cache_home[j][k], self.cache_write_buf[j][k], i)
def delete_cache(self, j, k):
v = self.cache_home[j][k].pop()
if v:
for x in v:
x.delete()
def load_hidden(self, i, j, k):
# Handle corner cases
if k == self.num_gpu_batches:
k = 0
j += 1
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load to hidden states buffers
dst = self.layers[j].compute
if j == 0:
gpu_batch_size = self.policy.gpu_batch_size
left, right = k * gpu_batch_size, (k + 1) * gpu_batch_size
if i == 0: # load from the input ids
val = dst.allocate((gpu_batch_size, self.task.prompt_len), np.int32)
val.load_from_np(self.output_ids[left:right, :self.task.prompt_len])
else: # load from the last generated token
pos = self.task.prompt_len + i
val = dst.allocate((gpu_batch_size, 1), np.int32)
val.load_from_np(self.output_ids[left:right, pos-1:pos])
else: # load from the last layer
val = self.hidden[i][j-1][k].pop().move(dst)
self.hidden[i][j][k].store(val)
def store_hidden(self, i, j, k):
# Handle corner cases
if k == -1:
k = self.num_gpu_batches - 1
j -= 1
if j == -1:
j = self.num_layers - 1
i -= 1
if i == -1:
return
# Store to hidden states buffers
if j == self.num_layers - 1: # store to output
gpu_batch_size = self.policy.gpu_batch_size
left, right = k * gpu_batch_size, (k + 1) * gpu_batch_size
ids = self.hidden[i][j][k].pop().data.detach().cpu().numpy()
pos = self.task.prompt_len + i
if self.task.stop:
stopped = self.stopped[left:right]
self.output_ids[left:right, pos:pos+1] = np.where(
stopped, self.config.pad_token_id, ids)
stopped[:] = np.logical_or(stopped, ids == self.task.stop)
else:
self.output_ids[left:right, pos:pos+1] = ids
else: # move to home
x = self.hidden[i][j][k]
if x.val: # x may already be moved due to overlapping
x.val = x.val.move(self.act_home)
def compute_layer(self, i, j, k):
# Update the hidden in place
# Clear the weight_read_buf if it is the last gpu batch
# Clear the cache_read_buf
# Run layer computation
self.layers[j].forward(self.hidden[i][j][k], self.cache_read_buf[j][k],
self.weight_read_buf[j], self.attention_mask[k],
self.cache_write_buf[j][k], i, k)
def sync(self):
self.env.disk.synchronize()
torch.cuda.synchronize()
def init_all_weights(self):
self.weight_home = array_1d(self.num_layers, ValueHolder)
for j in range(self.num_layers):
self.init_weight(j)
def delete_all_weights(self):
for j in range(self.num_layers):
self.delete_weight(j, 0)
def update_attention_mask(self, i, k):
if i > 0:
mask = self.attention_mask[k]
assert mask.val is not None
mask.val = mask.val.device.extend_attention_mask(mask.val, [True])
return
gpu_batch_size = self.policy.gpu_batch_size
left = k * gpu_batch_size
right = left + gpu_batch_size
input_ids = self.output_ids[left:right, :self.task.prompt_len]
attention_compute = (self.env.cpu if self.policy.cpu_cache_compute
else self.env.gpu)
val = attention_compute.allocate(
(self.policy.gpu_batch_size, self.task.prompt_len), bool)
val.load_from_np((input_ids != self.config.pad_token_id))
self.attention_mask[k].store(val)
def generate(self,
inputs: Union[np.array, List[List[int]]],
max_new_tokens: int = 32,
do_sample: bool = False,
temperature: float = 1.0,
stop: Optional[int] = None,
debug_mode: Optional[str] = None,
cut_gen_len: Optional[int] = None,
verbose: int = 0):
task = Task(
inputs=inputs,
prompt_len=len(inputs[0]),
gen_len=max_new_tokens,
cut_gen_len=cut_gen_len,
do_sample=do_sample,
temperature=temperature,
stop=stop,
)
num_layers = self.num_layers
num_gpu_batches = self.num_gpu_batches
gpu_batch_size = self.policy.gpu_batch_size
overlap = self.policy.overlap
prompt_len, gen_len = task.prompt_len, task.gen_len
self.execute_gen_len = task.cut_gen_len if task.cut_gen_len else task.gen_len
# Output token ids
self.output_ids = np.full((len(task.inputs), prompt_len + gen_len),
self.config.pad_token_id, dtype=np.int32)
self.stopped = np.zeros((len(task.inputs), 1), dtype=bool)
self.output_ids[:, :prompt_len] = np.asarray(task.inputs)
assert gpu_batch_size * num_gpu_batches == len(task.inputs)
# Intermediate tensors
# The following buffers store values used
# for the i-th token, j-th layer, k-th gpu batch.
num_layers, num_gpu_batches = self.num_layers, self.policy.num_gpu_batches
for j in range(num_layers):
for k in range(num_gpu_batches):
self.cache_home[j][k].clear()
self.cache_read_buf[j][k].clear()
self.cache_write_buf[j][k].clear()
for j in range(num_layers):
self.weight_read_buf[j].clear()
for k in range(num_gpu_batches):
self.attention_mask[k].clear()
self.hidden = array_3d(gen_len, num_layers, num_gpu_batches, ValueHolder)
# Init cache
self.set_task(task)
for j in range(num_layers):
for k in range(num_gpu_batches):
self.init_cache(j, k)
if self.policy.cpu_cache_compute:
self.env.cpu.init_attention_compute_workspace(self.config, self.task, self.policy)
# Generate
if debug_mode is None:
if not overlap:
# No overlap, easy to understand, suitable for debugging
self.generation_loop_normal()
else:
# Overlap I/O and compute
if num_gpu_batches == 1:
self.generation_loop_overlap_single_batch()
else:
self.generation_loop_overlap_multi_batch()
elif debug_mode == "fewer_batch":
# Run fewer layeres and batches for debugging
if num_gpu_batches == 1:
self.generation_loop_debug_single_batch()
else:
self.generation_loop_debug_multi_batch()
elif debug_mode == "breakdown":
# No overlap, fewer batches, execution time breakdown
self.generation_loop_debug_normal()
else:
raise ValueError("Invalid debug mode: {debug_mode}")
# Delete cache
for j in range(num_layers):
for k in range(num_gpu_batches):
self.delete_cache(j, k)
if self.policy.cpu_cache_compute:
self.env.cpu.del_attention_compute_workspace()
return self.output_ids
def generation_loop_normal(self):
for i in range(self.execute_gen_len):
timers("generate").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
for k in range(self.num_gpu_batches):
self.load_weight(i, j, k, overlap=False)
for k in range(self.num_gpu_batches):
self.load_cache(i, j, k, overlap=False)
self.load_hidden(i, j, k)
self.compute_layer(i, j, k)
self.store_hidden(i, j, k)
self.store_cache(i, j, k, overlap=False)
timers("generate").stop()
def generation_loop_debug_normal(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill_total").reset()
timers("decoding_gpu_batch").reset()
timers("load_weight").reset()
timers("load_cache_prefill").reset()
timers("load_cache_decoding").reset()
timers("store_cache_prefill").reset()
timers("store_cache_decoding").reset()
timers("compute_layer_prefill").reset()
timers("compute_layer_decoding").reset()
load_weight_timer = timers("load_weight")
for i in range(self.execute_gen_len):
if i == 0:
timers("prefill_total").start()
load_cache_timer = timers("load_cache_prefill")
store_cache_timer = timers("store_cache_prefill")
compute_layer_timer = timers("compute_layer_prefill")
else:
load_cache_timer = timers("load_cache_decoding")
store_cache_timer = timers("store_cache_decoding")
compute_layer_timer = timers("compute_layer_decoding")
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
load_weight_timer.start(self.sync)
for k in range(self.num_gpu_batches):
self.load_weight(i, j, k)
load_weight_timer.stop(self.sync)
for k in range(self.num_gpu_batches):
load_cache_timer.start(self.sync)
self.load_cache(i, j, k)
load_cache_timer.stop(self.sync)
self.load_hidden(i, j, k)
compute_layer_timer.start(self.sync)
self.compute_layer(i, j, k)
compute_layer_timer.stop(self.sync)
self.store_hidden(i, j, k)
store_cache_timer.start(self.sync)
self.store_cache(i, j, k)
store_cache_timer.stop(self.sync)
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill_total").stop(self.sync)
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill_total").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
# Debug the costs of individual functions
print(f"#layers: {self.num_layers}")
print(f"#batches prefill: "
f"{self.num_layers * self.num_gpu_batches}")
print(f"#batches decoding: "
f"{(self.task.gen_len - 1) * self.num_layers * self.num_gpu_batches}")
print(f"load_weight (per-layer)"
f": {np.mean(timers('load_weight').costs):.6f} s")
for stage in ["prefill", "decoding"]:
for func in ["load_cache", "store_cache", "compute_layer"]:
name = func + "_" + stage
costs = timers(name).costs
print(f"{name:22s} (per-batch): {np.mean(costs):.6f} s")
def generation_loop_overlap_single_batch(self):
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.sync()
# Generate
for i in range(self.execute_gen_len):
timers("generate").start()
self.update_attention_mask(i, 0)
for j in range(self.num_layers):
self.load_weight(i, j+1, 0)
self.load_cache(i, j+1, 0)
self.load_hidden(i, j, 0)
self.compute_layer(i, j, 0)
self.store_cache(i, j-1, 0)
self.store_hidden(i, j, 0)
self.sync()
timers("generate").stop()
if self.task.stop and np.all(self.stopped):
break
def generation_loop_overlap_multi_batch(self):
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.load_hidden(0, 0, 0)
self.sync()
# Generate
for i in range(self.execute_gen_len):
timers("generate").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
for k in range(self.num_gpu_batches):
self.load_weight(i, j+1, k)
self.load_cache(i, j, k+1)
self.store_hidden(i, j, k-1)
self.load_hidden(i, j, k+1)
self.compute_layer(i, j, k)
self.store_cache(i, j, k-1)
self.sync()
timers("generate").stop()
# Epilogue
self.store_hidden(
self.execute_gen_len-1, self.num_layers-1, self.num_gpu_batches-1)
def generation_loop_debug_single_batch(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill").reset()
timers("decoding_gpu_batch").reset()
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.sync()
# Generate
for i in range(self.execute_gen_len):
if i == 0: timers("prefill").start()
self.update_attention_mask(i, 0)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
self.load_weight(i, j+1, 0)
self.load_cache(i, j+1, 0)
self.load_hidden(i, j, 0)
self.compute_layer(i, j, 0)
self.store_cache(i, j-1, 0)
self.store_hidden(i, j, 0)
self.sync()
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill").stop()
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
def generation_loop_debug_multi_batch(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill").reset()
timers("decoding_gpu_batch").reset()
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.load_hidden(0, 0, 0)
self.sync()
# Generate
for i in range(self.execute_gen_len):
if i == 0: timers("prefill").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
for k in range(self.num_gpu_batches):
self.load_weight(i, j+1, k)
self.load_cache(i, j, k+1)
self.store_hidden(i, j, k-1)
self.load_hidden(i, j, k+1)
self.compute_layer(i, j, k)
self.store_cache(i, j, k-1)
self.sync()
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill").stop()
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
def __del__(self):
self.delete_all_weights()
def batch_query_test(args, task_instruction, test_data, task, pd_data_files, test_file):
# Initialize environment
tokenizer = get_tokenizer(args.model)
env = ExecutionEnv.create(args.offload_dir)
# Offloading policy
policy = Policy(args.gpu_batch_size, args.num_gpu_batches,
args.percent[0], args.percent[1],
args.percent[2], args.percent[3],
args.percent[4], args.percent[5],
overlap=True, sep_layer=True, pin_weight=args.pin_weight,
cpu_cache_compute=args.cpu_cache_compute, attn_sparsity=1.0,
compress_weight=args.compress_weight,
comp_weight_config=CompressionConfig(
num_bits=4, group_size=64,
group_dim=0, symmetric=False),
compress_cache=args.compress_cache,
comp_cache_config=CompressionConfig(
num_bits=4, group_size=64,
group_dim=2, symmetric=False))
logger.info(f"Init weights begin.")
tic = time.time()
model = OptLM(args.model, env, args.path, policy)
logger.info(f"Init weights end. Elapsed: {time.time() - tic:.2f} s.")
if args.add_task_instruction:
prompt = lambda x: f"{task_instruction} {x}"
else:
prompt = lambda x: f"{x}"
trial_metrics = {"prec": [], "rec": [], "f1": [], "acc": [], "total_time": [],
"output_throughput": [], "total_throughput": []}
saved_prefix = None
for trial_num in range(args.num_trials):
np.random.seed(args.seed + trial_num)
queries = []
for _, row in test_data.iterrows():
serialized_r = row["text"]
if args.sample_method == "manual":
prefix_exs = prompt_utils.get_manual_prompt(args.data_dir, row)
elif args.sample_method == "validation_clusters":
if saved_prefix is None:
logger.info("Generating validation cluster prompt.")
saved_prefix = prompt_utils.get_validation_prompt(
args.validation_path,
num_examples=args.k,
task=task,
)
prefix_exs = saved_prefix
else:
if saved_prefix is None:
saved_prefix = prompt_utils.get_random_prompt(
pd_data_files["train"], num_examples=args.k
)
prefix_exs = saved_prefix
queries.append((prefix_exs + "\n" + serialized_r).strip())
gt = test_data["label_str"]
preds = []
idx = 0
max_prompt_seq_length = 0
prompt_strs = []
for _ in range(args.num_run):
# if idx == 0:
# logger.info(f"This is a sample prompt: {prompt(queries[idx])}")
prompt_strs.append(prompt(queries[idx]))
current_prompt_tmp = tokenizer(prompt(queries[idx]), padding="max_length",
return_tensors="np", max_length=args.pad_to_seq_len).input_ids
# logger.info(f"Current prompt <{idx}> length: {current_prompt_tmp.shape[1]}")
max_prompt_seq_length = max(max_prompt_seq_length, current_prompt_tmp.shape[1])
idx += 1
logger.info(f"max_prompt_seq_length: {max_prompt_seq_length}")
tic = time.time()
input_ids = tokenizer(prompt_strs, padding="max_length",
return_tensors="np",
max_length=max_prompt_seq_length).input_ids
output_ids = []
flexgen_batch_size = args.gpu_batch_size*args.num_gpu_batches
num_batched_run = math.floor(args.num_run/flexgen_batch_size)
args.num_run = num_batched_run * flexgen_batch_size
input_ids = input_ids[0:args.num_run]
for i in tqdm(range(num_batched_run)):
input_ids_tmp = input_ids[i*flexgen_batch_size: (i+1)*flexgen_batch_size]
output_ids_tmp = model.generate(input_ids_tmp,
do_sample=True,
temperature=args.temperature,
max_new_tokens=args.max_tokens,
stop=args.stop_token)
output_ids.extend(output_ids_tmp)
toc = time.time()
input_strs = tokenizer.batch_decode(input_ids, skip_special_tokens=True)
output_strs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
preds = [output_strs[i][len(input_strs[i]):] for i in range(len(input_strs))]
total_time = time.time() - tic
total_prompt_tokens = args.num_run * max_prompt_seq_length
total_generate_tokens = args.num_run * args.max_tokens
output_throughput = total_generate_tokens/total_time
total_throughput = (total_prompt_tokens+total_generate_tokens)/total_time
logger.info(f"Batch inference run end. Elapsed: {total_time:.2f} s;")
logger.info(f"Output throughput: {output_throughput:.2f} token/s;")
logger.info(f"Total throughput: {total_throughput:.2f} token/s;")
# Save trial predictions
save_data = test_data.iloc[:args.num_run].copy(deep=True).reset_index()
gt = gt[:args.num_run]
save_data["preds"] = preds
save_data["queries"] = queries[:args.num_run]
prec, rec, acc, f1 = compute_metrics(preds, gt, task)
logger.info(
f"Metrics Trial {trial_num}\n"
f"Prec: {prec:.3f} Recall: {rec:.3f} Acc: {acc:.3f} F1: {f1:.3f} \n"
f"<FlexGen> time: {total_time:.3f} \n"
f"<FlexGen> output throughput: {output_throughput:.3f} \n"
f"<FlexGen> total throughput: {total_throughput:.3f}"
)
trial_metrics["rec"].append(rec)
trial_metrics["prec"].append(prec)
trial_metrics["acc"].append(acc)
trial_metrics["f1"].append(f1)
trial_metrics["total_time"].append(total_time)
trial_metrics["output_throughput"].append(output_throughput)
trial_metrics["total_throughput"].append(total_throughput)
output_file = (
Path(args.output_dir)
/ f"{Path(args.data_dir).stem}"
/ f"{test_file}"
/ f"{args.run_tag}"
/ f"{args.k}k"
f"_{int(args.add_task_instruction)}inst"
f"_{int(args.class_balanced)}cb"
f"_{args.sample_method}"
f"_{args.model}"
f"_{args.num_run}run"
f"_{int(args.dry_run)}dry" / f"trial_{trial_num}.feather"
)
output_file.parent.mkdir(parents=True, exist_ok=True)
logger.info(f"Saved to {output_file}")
save_data.to_feather(output_file)
for k, values in list(trial_metrics.items()):
trial_metrics[f"{k}_avg"] = np.average(values)
trial_metrics[f"{k}_std"] = np.std(values)
output_metrics = output_file.parent / "metrics.json"
json.dump(trial_metrics, open(output_metrics, "w"))
logger.info(f"Final Metrics {json.dumps(trial_metrics, indent=4)}")
logger.info(f"Metrics dumped to {output_metrics}")
# Shutdown
logger.info("Shutdown FlexGen...")
env.close_copy_threads() | null |
10,175 | import logging
from pathlib import Path
from typing import List
from rich.logging import RichHandler
import logging
The provided code snippet includes necessary dependencies for implementing the `setup_logger` function. Write a Python function `def setup_logger(log_dir: str)` to solve the following problem:
Create log directory and logger.
Here is the function:
def setup_logger(log_dir: str):
"""Create log directory and logger."""
Path(log_dir).mkdir(exist_ok=True, parents=True)
log_path = str(Path(log_dir) / "log.txt")
handlers = [logging.FileHandler(log_path), RichHandler(rich_tracebacks=True)]
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(module)s] [%(levelname)s] %(message)s",
handlers=handlers,
) | Create log directory and logger. |
10,176 | import logging
from functools import partial
from pathlib import Path
from typing import Dict, List
import pandas as pd
from flexgen.apps.data_wrangle.utils import constants
logger = logging.getLogger(__name__)
def read_raw_data(
data_dir: str,
add_instruction: bool = False,
task_instruction_idx: int = 0,
sep_tok: str = ".",
nan_tok: str = "nan",
):
"""Read in data where each directory is unique for a task."""
data_files_sep = {"test": {}, "train": {}, "validation": {}}
logger.info(f"Processing {data_dir}")
if data_dir not in constants.DATA2TASK:
raise ValueError(
f"{data_dir} not one of {constants.DATA2TASK.keys()}. Make sure to set DATASET_PATH."
)
task = constants.DATA2TASK[data_dir]
instruction = constants.DATA2INSTRUCT[data_dir]
suffix = constants.DATA2SUFFIX[data_dir]
cols_to_drop = constants.DATA2DROPCOLS[data_dir]
col_renaming = constants.DATA2COLREMAP[data_dir]
data_dir_p = Path(data_dir)
if task == "entity_matching":
train_file = data_dir_p / "train.csv"
valid_file = data_dir_p / "valid.csv"
test_file = data_dir_p / "test.csv"
tableA_file = data_dir_p / "tableA.csv"
tableB_file = data_dir_p / "tableB.csv"
tableA = pd.read_csv(tableA_file)
tableB = pd.read_csv(tableB_file)
label_col = "label"
read_data_func = partial(
read_blocked_pairs,
tableA=tableA,
tableB=tableB,
cols_to_drop=cols_to_drop,
col_renaming=col_renaming,
add_instruction=add_instruction,
instruction=instruction,
suffix=suffix,
prod_name=constants.MATCH_PROD_NAME[data_dir],
sep_tok=sep_tok,
nan_tok=nan_tok,
)
elif task == "data_imputation":
train_file = data_dir_p / "train.csv"
valid_file = data_dir_p / "valid.csv"
test_file = data_dir_p / "test.csv"
label_col = constants.IMPUTE_COLS[data_dir]
read_data_func = partial(
read_imputation_single,
impute_col=label_col,
cols_to_drop=cols_to_drop,
col_renaming=col_renaming,
add_instruction=add_instruction,
instruction=instruction,
suffix=suffix,
sep_tok=sep_tok,
nan_tok=nan_tok,
)
elif task == "error_detection_spelling":
train_file = data_dir_p / "train.csv"
valid_file = data_dir_p / "valid.csv"
test_file = data_dir_p / "test.csv"
table_file = data_dir_p / "table.csv"
table = pd.read_csv(table_file)
label_col = "is_clean"
read_data_func = partial(
read_error_detection_single,
table=table,
cols_to_drop=cols_to_drop,
col_renaming=col_renaming,
add_instruction=add_instruction,
instruction=instruction,
suffix=suffix,
sep_tok=sep_tok,
nan_tok=nan_tok,
spelling=True,
)
elif task == "error_detection":
train_file = data_dir_p / "train.csv"
valid_file = data_dir_p / "valid.csv"
test_file = data_dir_p / "test.csv"
table_file = data_dir_p / "table.csv"
table = pd.read_csv(table_file)
label_col = "is_clean"
read_data_func = partial(
read_error_detection_single,
table=table,
cols_to_drop=cols_to_drop,
col_renaming=col_renaming,
add_instruction=add_instruction,
instruction=instruction,
suffix=suffix,
sep_tok=sep_tok,
nan_tok=nan_tok,
spelling=False,
)
elif task == "schema_matching":
train_file = data_dir_p / "train.csv"
valid_file = data_dir_p / "valid.csv"
test_file = data_dir_p / "test.csv"
table_file = data_dir_p / "table.csv"
label_col = "label"
table = pd.read_csv(table_file)
read_data_func = partial(
read_schema_match_single,
table=table,
cols_to_drop=cols_to_drop,
col_renaming=col_renaming,
add_instruction=add_instruction,
instruction=instruction,
suffix=suffix,
sep_tok=sep_tok,
nan_tok=nan_tok,
)
else:
raise ValueError(f"Task {task} not recognized.")
data_files_sep["train"] = read_data_func(train_file)
# Read validation
if valid_file.exists():
data_files_sep["validation"] = read_data_func(valid_file)
# Read test
if test_file.exists():
data_files_sep["test"] = read_data_func(test_file)
return data_files_sep, label_col
The provided code snippet includes necessary dependencies for implementing the `read_data` function. Write a Python function `def read_data( data_dir: str, class_balanced: bool = False, add_instruction: bool = False, task_instruction_idx: int = 0, max_train_samples: int = -1, max_train_percent: float = -1, sep_tok: str = ".", nan_tok: str = "nan", )` to solve the following problem:
Read in data where each directory is unique for a task.
Here is the function:
def read_data(
data_dir: str,
class_balanced: bool = False,
add_instruction: bool = False,
task_instruction_idx: int = 0,
max_train_samples: int = -1,
max_train_percent: float = -1,
sep_tok: str = ".",
nan_tok: str = "nan",
):
"""Read in data where each directory is unique for a task."""
data_files_sep, label_col = read_raw_data(
data_dir=data_dir,
add_instruction=add_instruction,
task_instruction_idx=task_instruction_idx,
sep_tok=sep_tok,
nan_tok=nan_tok,
)
task = constants.DATA2TASK[data_dir]
# Don't class balance on open ended classificiation tasks
if class_balanced and task != "data_imputation":
# Class balance sample the train data
label_cnts = data_files_sep["train"].groupby(label_col).count()
sample_per_class = label_cnts.min()["text"]
logger.info(f"Class balanced: train sample per class: {sample_per_class}")
data_files_sep["train"] = (
data_files_sep["train"]
.groupby(label_col, group_keys=False)
.apply(lambda x: x.sample(sample_per_class, random_state=42))
)
# Shuffle train data
data_files_sep["train"] = (
data_files_sep["train"].sample(frac=1, random_state=42).reset_index(drop=True)
)
if max_train_samples > 0:
orig_train_len = len(data_files_sep["train"])
if max_train_samples > 1.0:
raise ValueError("max_train_samples must be between 0 and 1")
max_examples = int(max_train_samples * orig_train_len)
data_files_sep["train"] = data_files_sep["train"].iloc[:max_examples]
logger.info(
f"Length of {data_dir} train is "
f"{data_files_sep['train'].shape[0]} from {orig_train_len}"
)
return data_files_sep | Read in data where each directory is unique for a task. |
10,177 | import torch
import torch.distributed as dist
_COMM_DEVICE = None
_PIPELINE_PARALLEL_PRED_GROUP = None
_PIPELINE_PARALLEL_SUCC_GROUP = None
def suppress_output(rank):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', True)
if force:
builtin_print("rank #%d:" % rank, *args, **kwargs, flush=True)
elif rank == 0:
builtin_print(*args, **kwargs)
__builtin__.print = print
def initialize_distributed(head_ip, port, world_size, rank, local_rank,
comm_device):
print(f'Initializing distributed environment at {head_ip}:{port}, '
f'world_size={world_size}, rank={rank}, local_rank={local_rank}.')
# Initialize distributed environment
torch.cuda.set_device(local_rank)
distributed_init_method = f'tcp://{head_ip}:{port}'
global _COMM_DEVICE
_COMM_DEVICE = comm_device
if comm_device == 'cpu':
backend = 'gloo'
elif comm_device == 'gpu':
backend = 'nccl'
else:
raise ValueError(f'Unknown comm_device: {comm_device}')
dist.init_process_group(backend=backend,
init_method=distributed_init_method,
world_size=world_size,
rank=rank)
# Create groups for pipeline parallelism
global _PIPELINE_PARALLEL_PRED_GROUP, _PIPELINE_PARALLEL_SUCC_GROUP
if world_size > 1:
for pred in range(world_size):
succ = (pred + 1) % world_size
group = dist.new_group([pred, succ])
if pred == rank:
_PIPELINE_PARALLEL_PRED_GROUP = group
if succ == rank:
_PIPELINE_PARALLEL_SUCC_GROUP = group
suppress_output(rank)
print("Finished initializing distributed environment") | null |
10,178 | import torch
import torch.distributed as dist
_PIPELINE_PARALLEL_PRED_GROUP = None
def get_pipeline_parallel_pred_group():
return _PIPELINE_PARALLEL_PRED_GROUP | null |
10,179 | import torch
import torch.distributed as dist
_PIPELINE_PARALLEL_SUCC_GROUP = None
def get_pipeline_parallel_succ_group():
return _PIPELINE_PARALLEL_SUCC_GROUP | null |
10,180 | import torch
import torch.distributed as dist
_COMM_DEVICE = None
def get_comm_device():
return _COMM_DEVICE | null |
10,181 | import argparse
import dataclasses
import os
import pickle
import time
from typing import Union, List, Optional
import numpy as np
from tqdm import tqdm
import torch
from transformers import AutoTokenizer
from flexgen.compression import CompressionConfig
from flexgen.opt_config import OptConfig, get_opt_config, download_opt_weights
from flexgen.pytorch_backend import (TorchDevice, TorchDisk, TorchLink,
TorchMixedDevice, DeviceType, general_copy, fix_recursive_import)
from flexgen.timer import timers
from flexgen.utils import (Task, ExecutionEnv, GB, T, ValueHolder,
array_1d, array_2d, array_3d, str2bool, project_decode_latency,
torch_mem_stats, torch_dtype_to_np_dtype, write_benchmark_log,
read_benchmark_log)
DUMMY_WEIGHT = "_DUMMY_"
def get_choice(cur_percent, percents, choices):
torch_dtype_to_np_dtype = {
torch.float16: np.float16, torch.float32: np.float32,
torch.uint8: np.uint8, torch.int8: np.int8, torch.int32: np.int32,
torch.int64: np.int64, torch.bool: bool,
}
def init_weight_list(weight_specs, policy, env):
dev_percents = [policy.w_disk_percent, policy.w_cpu_percent, policy.w_gpu_percent]
dev_choices = [env.disk, env.cpu, env.gpu]
sizes = [np.prod(spec[0]) for spec in weight_specs]
sizes_cumsum = np.cumsum(sizes)
ret = []
for i in range(len(weight_specs)):
mid_percent = (sizes_cumsum[i] - sizes[i] / 2) / sizes_cumsum[-1]
home = get_choice(mid_percent * 100, dev_percents, dev_choices)
shape, dtype, filename = weight_specs[i]
if len(shape) < 2:
pin_memory = True
compress = False
else:
pin_memory = policy.pin_weight
compress = policy.compress_weight
if not compress:
weight = home.allocate(shape, dtype, pin_memory=pin_memory)
if DUMMY_WEIGHT not in filename:
weight.load_from_np_file(weight_specs[i][2])
else:
weight.load_from_np(np.ones(shape, dtype))
#weight.load_from_np(np.random.rand(*shape).astype(dtype))
else:
weight = home.compressed_device.allocate(
shape, dtype, policy.comp_weight_config, pin_memory=pin_memory)
if DUMMY_WEIGHT not in filename:
weight.load_from_np_file(weight_specs[i][2])
else:
for i in range(2):
x = weight.data[i]
x.load_from_np(np.ones(x.shape, torch_dtype_to_np_dtype[x.dtype]))
ret.append(weight)
return ret | null |
10,182 | import argparse
import dataclasses
import os
import pickle
import time
from typing import Union, List, Optional
import numpy as np
from tqdm import tqdm
import torch
from transformers import AutoTokenizer
from flexgen.compression import CompressionConfig
from flexgen.opt_config import OptConfig, get_opt_config, download_opt_weights
from flexgen.pytorch_backend import (TorchDevice, TorchDisk, TorchLink,
TorchMixedDevice, DeviceType, general_copy, fix_recursive_import)
from flexgen.timer import timers
from flexgen.utils import (Task, ExecutionEnv, GB, T, ValueHolder,
array_1d, array_2d, array_3d, str2bool, project_decode_latency,
torch_mem_stats, torch_dtype_to_np_dtype, write_benchmark_log,
read_benchmark_log)
DUMMY_WEIGHT = "_DUMMY_"
class Policy:
gpu_batch_size: int
num_gpu_batches: int
# percent = a means a%
w_gpu_percent: float
w_cpu_percent: float
cache_gpu_percent: float
cache_cpu_percent: float
act_gpu_percent: float
act_cpu_percent: float
# Whether to overlap the I/O and compute
overlap: bool
# Whether to separate attention and mlp as two layers
sep_layer: bool
# Whether to use pinned memory for weights on CPU
pin_weight: bool
# Whether to compute attention on CPU
cpu_cache_compute: bool
# Sparsity of attention weights
attn_sparsity: float
# Compress weights with group-wise quantization
compress_weight: bool
comp_weight_config: CompressionConfig
# Compress KV cache with group-wise quantization
compress_cache: bool
comp_cache_config: CompressionConfig
def w_disk_percent(self):
return 100 - self.w_gpu_percent - self.w_cpu_percent
def cache_disk_percent(self):
return 100 - self.cache_gpu_percent - self.cache_cpu_percent
def act_disk_percent(self):
return 100 - self.act_gpu_percent - self.act_cpu_percent
class OptLM:
def __init__(self,
config: Union[str, OptConfig],
env: ExecutionEnv,
path: str,
policy: Policy):
if isinstance(config, str):
config = get_opt_config(config)
self.config = config
self.env = env
self.path = path
self.policy = policy
self.num_gpu_batches = policy.num_gpu_batches
layers = []
layers.append(InputEmbed(self.config, self.env, self.policy))
for i in range(self.config.num_hidden_layers):
if policy.sep_layer:
layers.append(SelfAttention(self.config, self.env, self.policy, i))
layers.append(MLP(self.config, self.env, self.policy, i))
else:
layers.append(TransformerLayer(self.config, self.env, self.policy, i))
layers.append(OutputEmbed(self.config, self.env, self.policy))
self.layers = layers
self.num_layers = len(layers)
if self.policy.act_gpu_percent == 100:
self.act_home = self.env.gpu
elif self.policy.act_cpu_percent == 100:
self.act_home = self.env.cpu
elif self.policy.act_disk_percent == 100:
self.act_home = self.env.disk
else:
raise NotImplementedError()
# CUDA streams
self.load_weight_stream = torch.cuda.Stream()
self.load_cache_stream = torch.cuda.Stream()
self.store_cache_stream = torch.cuda.Stream()
# Intermediate tensors
# The following buffers store values used
# for the i-th token, j-th layer, k-th gpu batch.
num_layers, num_gpu_batches = self.num_layers, self.policy.num_gpu_batches
# cache[j][k]
self.cache_home = array_2d(num_layers, num_gpu_batches, ValueHolder)
self.cache_read_buf = array_2d(num_layers, num_gpu_batches, ValueHolder)
self.cache_write_buf = array_2d(num_layers, num_gpu_batches, ValueHolder)
# weight[j]
self.weight_read_buf = array_1d(num_layers, ValueHolder)
# attention_mask[k]
self.attention_mask = array_1d(num_gpu_batches, ValueHolder)
self.task = None
self.init_all_weights()
def set_task(self, task):
self.task = task
for l in self.layers:
l.set_task(task)
def init_weight(self, j):
expanded_path = os.path.abspath(os.path.expanduser(
os.path.join(self.path, f"{self.config.name}-np")))
check_path = os.path.join(expanded_path, "decoder.embed_positions.weight")
if not os.path.exists(check_path) and DUMMY_WEIGHT not in check_path:
download_opt_weights(self.config.name, self.path)
self.layers[j].init_weight(self.weight_home[j], expanded_path)
def load_weight(self, i, j, k, overlap=True):
# Handle corner cases
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load from weight_home to weight_read_buf
if overlap:
with torch.cuda.stream(self.load_weight_stream):
self.layers[j].load_weight(self.weight_home[j], self.weight_read_buf[j], k)
else:
self.layers[j].load_weight(self.weight_home[j], self.weight_read_buf[j], k)
def delete_weight(self, j, k):
if k == 0:
for x in self.weight_home[j].pop():
if isinstance(x, ValueHolder):
for y in x.pop():
y.delete()
else:
x.delete()
def init_cache(self, j, k):
self.layers[j].init_cache_one_gpu_batch(self.cache_home[j][k])
def load_cache(self, i, j, k, overlap=True):
# Handle corner cases
if i == 0: # prefill, no cache
return
if k == self.num_gpu_batches:
k = 0
j += 1
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load from cache_home to cache_read_buf
if overlap:
with torch.cuda.stream(self.load_cache_stream):
self.layers[j].load_cache(self.cache_home[j][k], self.cache_read_buf[j][k], i)
else:
self.layers[j].load_cache(self.cache_home[j][k], self.cache_read_buf[j][k], i)
def store_cache(self, i, j, k, overlap=True):
# Handle corner cases
if k == -1:
k = self.num_gpu_batches - 1
j -= 1
if j == -1:
j = self.num_layers - 1
i -= 1
if i == -1:
return
if i == self.task.gen_len - 1: # last token, no need to store cache
self.cache_write_buf[j][k].pop()
return
# Store cache_write_buf to cache_home
# Delete cache_write_buf
if overlap:
with torch.cuda.stream(self.store_cache_stream):
self.layers[j].store_cache(self.cache_home[j][k], self.cache_write_buf[j][k], i)
else:
self.layers[j].store_cache(self.cache_home[j][k], self.cache_write_buf[j][k], i)
def delete_cache(self, j, k):
v = self.cache_home[j][k].pop()
if v:
for x in v:
x.delete()
def load_hidden(self, i, j, k):
# Handle corner cases
if k == self.num_gpu_batches:
k = 0
j += 1
if j == self.num_layers:
j = 0
i += 1
if i == self.execute_gen_len:
return
# Load to hidden states buffers
dst = self.layers[j].compute
if j == 0:
gpu_batch_size = self.policy.gpu_batch_size
left, right = k * gpu_batch_size, (k + 1) * gpu_batch_size
if i == 0: # load from the input ids
val = dst.allocate((gpu_batch_size, self.task.prompt_len), np.int32)
val.load_from_np(self.output_ids[left:right, :self.task.prompt_len])
else: # load from the last generated token
pos = self.task.prompt_len + i
val = dst.allocate((gpu_batch_size, 1), np.int32)
val.load_from_np(self.output_ids[left:right, pos-1:pos])
else: # load from the last layer
val = self.hidden[i][j-1][k].pop().move(dst)
self.hidden[i][j][k].store(val)
def store_hidden(self, i, j, k):
# Handle corner cases
if k == -1:
k = self.num_gpu_batches - 1
j -= 1
if j == -1:
j = self.num_layers - 1
i -= 1
if i == -1:
return
# Store to hidden states buffers
if j == self.num_layers - 1: # store to output
gpu_batch_size = self.policy.gpu_batch_size
left, right = k * gpu_batch_size, (k + 1) * gpu_batch_size
ids = self.hidden[i][j][k].pop().data.detach().cpu().numpy()
pos = self.task.prompt_len + i
if self.task.stop:
stopped = self.stopped[left:right]
self.output_ids[left:right, pos:pos+1] = np.where(
stopped, self.config.pad_token_id, ids)
stopped[:] = np.logical_or(stopped, ids == self.task.stop)
else:
self.output_ids[left:right, pos:pos+1] = ids
else: # move to home
x = self.hidden[i][j][k]
if x.val: # x may already be moved due to overlapping
x.val = x.val.move(self.act_home)
def compute_layer(self, i, j, k):
# Update the hidden in place
# Clear the weight_read_buf if it is the last gpu batch
# Clear the cache_read_buf
# Run layer computation
self.layers[j].forward(self.hidden[i][j][k], self.cache_read_buf[j][k],
self.weight_read_buf[j], self.attention_mask[k],
self.cache_write_buf[j][k], i, k)
def sync(self):
self.env.disk.synchronize()
torch.cuda.synchronize()
def init_all_weights(self):
self.weight_home = array_1d(self.num_layers, ValueHolder)
for j in range(self.num_layers):
self.init_weight(j)
def delete_all_weights(self):
for j in range(self.num_layers):
self.delete_weight(j, 0)
def update_attention_mask(self, i, k):
if i > 0:
mask = self.attention_mask[k]
assert mask.val is not None
mask.val = mask.val.device.extend_attention_mask(mask.val, [True])
return
gpu_batch_size = self.policy.gpu_batch_size
left = k * gpu_batch_size
right = left + gpu_batch_size
input_ids = self.output_ids[left:right, :self.task.prompt_len]
attention_compute = (self.env.cpu if self.policy.cpu_cache_compute
else self.env.gpu)
val = attention_compute.allocate(
(self.policy.gpu_batch_size, self.task.prompt_len), bool)
val.load_from_np((input_ids != self.config.pad_token_id))
self.attention_mask[k].store(val)
def generate(self,
inputs: Union[np.array, List[List[int]]],
max_new_tokens: int = 32,
do_sample: bool = False,
temperature: float = 1.0,
stop: Optional[int] = None,
debug_mode: Optional[str] = None,
cut_gen_len: Optional[int] = None,
verbose: int = 0):
task = Task(
inputs=inputs,
prompt_len=len(inputs[0]),
gen_len=max_new_tokens,
cut_gen_len=cut_gen_len,
do_sample=do_sample,
temperature=temperature,
stop=stop,
)
num_layers = self.num_layers
num_gpu_batches = self.num_gpu_batches
gpu_batch_size = self.policy.gpu_batch_size
overlap = self.policy.overlap
prompt_len, gen_len = task.prompt_len, task.gen_len
self.execute_gen_len = task.cut_gen_len if task.cut_gen_len else task.gen_len
# Output token ids
self.output_ids = np.full((len(task.inputs), prompt_len + gen_len),
self.config.pad_token_id, dtype=np.int32)
self.stopped = np.zeros((len(task.inputs), 1), dtype=bool)
self.output_ids[:, :prompt_len] = np.asarray(task.inputs)
assert gpu_batch_size * num_gpu_batches == len(task.inputs)
# Intermediate tensors
# The following buffers store values used
# for the i-th token, j-th layer, k-th gpu batch.
num_layers, num_gpu_batches = self.num_layers, self.policy.num_gpu_batches
for j in range(num_layers):
for k in range(num_gpu_batches):
self.cache_home[j][k].clear()
self.cache_read_buf[j][k].clear()
self.cache_write_buf[j][k].clear()
for j in range(num_layers):
self.weight_read_buf[j].clear()
for k in range(num_gpu_batches):
self.attention_mask[k].clear()
self.hidden = array_3d(gen_len, num_layers, num_gpu_batches, ValueHolder)
# Init cache
self.set_task(task)
for j in range(num_layers):
for k in range(num_gpu_batches):
self.init_cache(j, k)
if self.policy.cpu_cache_compute:
self.env.cpu.init_attention_compute_workspace(self.config, self.task, self.policy)
# Generate
if debug_mode is None:
if not overlap:
# No overlap, easy to understand, suitable for debugging
self.generation_loop_normal()
else:
# Overlap I/O and compute
if num_gpu_batches == 1:
self.generation_loop_overlap_single_batch()
else:
self.generation_loop_overlap_multi_batch()
elif debug_mode == "fewer_batch":
# Run fewer layeres and batches for debugging
if num_gpu_batches == 1:
self.generation_loop_debug_single_batch()
else:
self.generation_loop_debug_multi_batch()
elif debug_mode == "breakdown":
# No overlap, fewer batches, execution time breakdown
self.generation_loop_debug_normal()
else:
raise ValueError("Invalid debug mode: {debug_mode}")
# Delete cache
for j in range(num_layers):
for k in range(num_gpu_batches):
self.delete_cache(j, k)
if self.policy.cpu_cache_compute:
self.env.cpu.del_attention_compute_workspace()
return self.output_ids
def generation_loop_normal(self):
for i in range(self.execute_gen_len):
timers("generate").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
for k in range(self.num_gpu_batches):
self.load_weight(i, j, k, overlap=False)
for k in range(self.num_gpu_batches):
self.load_cache(i, j, k, overlap=False)
self.load_hidden(i, j, k)
self.compute_layer(i, j, k)
self.store_hidden(i, j, k)
self.store_cache(i, j, k, overlap=False)
timers("generate").stop()
def generation_loop_debug_normal(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill_total").reset()
timers("decoding_gpu_batch").reset()
timers("load_weight").reset()
timers("load_cache_prefill").reset()
timers("load_cache_decoding").reset()
timers("store_cache_prefill").reset()
timers("store_cache_decoding").reset()
timers("compute_layer_prefill").reset()
timers("compute_layer_decoding").reset()
load_weight_timer = timers("load_weight")
for i in range(self.execute_gen_len):
if i == 0:
timers("prefill_total").start()
load_cache_timer = timers("load_cache_prefill")
store_cache_timer = timers("store_cache_prefill")
compute_layer_timer = timers("compute_layer_prefill")
else:
load_cache_timer = timers("load_cache_decoding")
store_cache_timer = timers("store_cache_decoding")
compute_layer_timer = timers("compute_layer_decoding")
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
load_weight_timer.start(self.sync)
for k in range(self.num_gpu_batches):
self.load_weight(i, j, k)
load_weight_timer.stop(self.sync)
for k in range(self.num_gpu_batches):
load_cache_timer.start(self.sync)
self.load_cache(i, j, k)
load_cache_timer.stop(self.sync)
self.load_hidden(i, j, k)
compute_layer_timer.start(self.sync)
self.compute_layer(i, j, k)
compute_layer_timer.stop(self.sync)
self.store_hidden(i, j, k)
store_cache_timer.start(self.sync)
self.store_cache(i, j, k)
store_cache_timer.stop(self.sync)
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill_total").stop(self.sync)
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill_total").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
# Debug the costs of individual functions
print(f"#layers: {self.num_layers}")
print(f"#batches prefill: "
f"{self.num_layers * self.num_gpu_batches}")
print(f"#batches decoding: "
f"{(self.task.gen_len - 1) * self.num_layers * self.num_gpu_batches}")
print(f"load_weight (per-layer)"
f": {np.mean(timers('load_weight').costs):.6f} s")
for stage in ["prefill", "decoding"]:
for func in ["load_cache", "store_cache", "compute_layer"]:
name = func + "_" + stage
costs = timers(name).costs
print(f"{name:22s} (per-batch): {np.mean(costs):.6f} s")
def generation_loop_overlap_single_batch(self):
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.sync()
# Generate
for i in range(self.execute_gen_len):
timers("generate").start()
self.update_attention_mask(i, 0)
for j in range(self.num_layers):
self.load_weight(i, j+1, 0)
self.load_cache(i, j+1, 0)
self.load_hidden(i, j, 0)
self.compute_layer(i, j, 0)
self.store_cache(i, j-1, 0)
self.store_hidden(i, j, 0)
self.sync()
timers("generate").stop()
if self.task.stop and np.all(self.stopped):
break
def generation_loop_overlap_multi_batch(self):
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.load_hidden(0, 0, 0)
self.sync()
# Generate
for i in range(self.execute_gen_len):
timers("generate").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
for k in range(self.num_gpu_batches):
self.load_weight(i, j+1, k)
self.load_cache(i, j, k+1)
self.store_hidden(i, j, k-1)
self.load_hidden(i, j, k+1)
self.compute_layer(i, j, k)
self.store_cache(i, j, k-1)
self.sync()
timers("generate").stop()
# Epilogue
self.store_hidden(
self.execute_gen_len-1, self.num_layers-1, self.num_gpu_batches-1)
def generation_loop_debug_single_batch(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill").reset()
timers("decoding_gpu_batch").reset()
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.sync()
# Generate
for i in range(self.execute_gen_len):
if i == 0: timers("prefill").start()
self.update_attention_mask(i, 0)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
self.load_weight(i, j+1, 0)
self.load_cache(i, j+1, 0)
self.load_hidden(i, j, 0)
self.compute_layer(i, j, 0)
self.store_cache(i, j-1, 0)
self.store_hidden(i, j, 0)
self.sync()
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill").stop()
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
def generation_loop_debug_multi_batch(self):
execute_num_batches = 20
batch_ct = 0
pbar = tqdm(total=execute_num_batches)
timers("prefill").reset()
timers("decoding_gpu_batch").reset()
# Prologue
for k in range(self.num_gpu_batches):
self.load_weight(0, 0, k)
self.load_hidden(0, 0, 0)
self.sync()
# Generate
for i in range(self.execute_gen_len):
if i == 0: timers("prefill").start()
for k in range(self.num_gpu_batches):
self.update_attention_mask(i, k)
for j in range(self.num_layers):
if i > 0: timers("decoding_gpu_batch").start()
for k in range(self.num_gpu_batches):
self.load_weight(i, j+1, k)
self.load_cache(i, j, k+1)
self.store_hidden(i, j, k-1)
self.load_hidden(i, j, k+1)
self.compute_layer(i, j, k)
self.store_cache(i, j, k-1)
self.sync()
if i > 0:
timers("decoding_gpu_batch").stop()
pbar.update(1)
batch_ct += 1
if batch_ct >= execute_num_batches: break
if batch_ct >= execute_num_batches: break
if i == 0: timers("prefill").stop()
# Convert "decoding_gpu_batch" timer to "generate" timer
batch_cost = np.mean(timers("decoding_gpu_batch").costs[10:])
for i in range(self.execute_gen_len):
if i == 0:
timers("generate").costs.append(timers("prefill").costs[0])
else:
timers("generate").costs.append(self.num_layers * batch_cost)
def __del__(self):
self.delete_all_weights()
def get_filename(args):
model_size = args.model.split('-')[-1]
percent = ""
for i in range(len(args.percent)):
percent += str(args.percent[i]) + "-"
filename = f"fo-{model_size}-gbs{args.gpu_batch_size}-" \
f"ngbs{args.num_gpu_batches}-" \
f"prompt{args.prompt_len}-" \
f"gen{args.gen_len}-percent-{percent}"
if args.cpu_cache_compute:
filename += "cpu-cache"
else:
filename += "gpu-cache"
if args.compress_weight:
filename += "-compw"
if args.compress_cache:
filename += "-compc"
return filename
def get_test_inputs(prompt_len, num_prompts, tokenizer):
prompts = ["Paris is the capital city of"]
input_ids = tokenizer(prompts, padding="max_length",
max_length=prompt_len).input_ids
return (input_ids[0],) * num_prompts
class CompressionConfig:
"""Group-wise quantization."""
num_bits: int
group_size: int
group_dim: int
symmetric: bool
enabled: bool = True
def get_opt_config(name, **kwargs):
if "/" in name:
name = name.split("/")[1]
name = name.lower()
# Handle opt-iml-30b and opt-iml-max-30b
if "-iml-max" in name:
arch_name = name.replace("-iml-max", "")
elif "-iml" in name:
arch_name = name.replace("-iml", "")
else:
arch_name = name
if arch_name == "opt-125m":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=12, n_head=12,
hidden_size=768, input_dim=768, ffn_embed_dim=768 * 4,
)
elif arch_name == "opt-350m":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=24, n_head=16,
hidden_size=1024, input_dim=1024, ffn_embed_dim=1024 * 4,
)
raise NotImplementedError("Not implemented because this model "
"has a different architecture")
elif arch_name == "opt-1.3b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=24, n_head=32,
hidden_size=2048, input_dim=2048, ffn_embed_dim=2048 * 4,
)
elif arch_name == "opt-2.7b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=32, n_head=32,
hidden_size=2560, input_dim=2560, ffn_embed_dim=2560 * 4,
)
elif arch_name == "opt-6.7b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=32, n_head=32,
hidden_size=4096, input_dim=4096, ffn_embed_dim=4096 * 4,
)
elif arch_name == "opt-13b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=40, n_head=40,
hidden_size=5120, input_dim=5120, ffn_embed_dim=5120 * 4,
)
elif arch_name == "opt-30b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=48, n_head=56,
hidden_size=7168, input_dim=7168, ffn_embed_dim=7168 * 4,
)
elif arch_name == "galactica-30b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=48, n_head=56,
hidden_size=7168, input_dim=7168, ffn_embed_dim=7168 * 4, vocab_size=50000,
)
elif arch_name == "opt-66b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=64, n_head=72,
hidden_size=9216, input_dim=9216, ffn_embed_dim=9216 * 4,
)
elif arch_name == "opt-175b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=96, n_head=96,
hidden_size=12288, input_dim=12288, ffn_embed_dim=12288 * 4,
)
elif arch_name == "opt-175b-stage":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=24, n_head=96,
hidden_size=12288, input_dim=12288, ffn_embed_dim=12288 * 4,
)
else:
raise ValueError(f"Invalid model name: {name}")
return dataclasses.replace(config, **kwargs)
class TorchDevice:
"""Wrap tensor and computation APIs of a single CPU or GPU."""
def __init__(self, name, mem_capacity=None, flops=None):
self.name = name
self.mem_capacity = mem_capacity
self.flops = flops
self.dev = torch.device(name)
self.device_type = DeviceType.convert(self.dev.type)
self.compressed_device = TorchCompressedDevice(self)
self.links = {}
self.attention_compute_workspace = None
self.workspace_pt = 0
if self.device_type == DeviceType.CPU:
global global_cpu_device
global_cpu_device = self
def add_link(self, link):
dst = link.b if link.a == self else link.a
self.links[dst] = link
def allocate(self, shape, dtype, pin_memory=None, name=None):
if self.device_type == DeviceType.CPU:
pin_memory = True if pin_memory is None else pin_memory
else:
pin_memory = False
dtype = np_dtype_to_torch_dtype[dtype]
data = torch.empty(shape, dtype=dtype, pin_memory=pin_memory, device=self.dev)
return TorchTensor.create_from_torch(data, self, name=name)
def delete(self, tensor):
pass
def init_attention_compute_workspace(self, config, task, policy):
if self.device_type != DeviceType.CPU:
return # Only CPU requires this fp32 workspace
if not policy.compress_cache:
b = policy.gpu_batch_size
n_head = config.n_head
head_dim = config.input_dim // n_head
max_seq_len = task.prompt_len + task.gen_len - 1
self.attention_compute_workspace = []
self.workspace_pt = 0
# We currently separate SelfAttention and MLP as two layers,
# so we only need one workspace instead of two.
for i in range(1 if policy.sep_layer else 2):
shape = (max_seq_len, b * n_head, head_dim)
k_cache = self.allocate(shape, np.float32, pin_memory=False)
v_cache = self.allocate(shape, np.float32, pin_memory=False)
self.attention_compute_workspace.append((k_cache, v_cache))
else:
self.compressed_device.init_attention_compute_workspace(
config, task, policy)
def next_attention_compute_workspace(self):
self.workspace_pt = (self.workspace_pt + 1) % len(
self.attention_compute_workspace)
return self.attention_compute_workspace[self.workspace_pt]
def del_attention_compute_workspace(self):
self.attention_compute_workspace = None
def gen_attention_mask(self, token_ids, pad_token_id, donate):
data = token_ids.data.ne(pad_token_id)
if donate[0]: token_ids.delete()
return TorchTensor.create_from_torch(data, self)
def extend_attention_mask(self, attention_mask, donate):
bs = attention_mask.shape[0]
data = torch.concat((attention_mask.data,
torch.ones((bs, 1), dtype=attention_mask.dtype, device=self.dev)), dim=1)
if donate[0]: attention_mask.delete()
return TorchTensor.create_from_torch(data, self)
def opt_input_embed(self, inputs, attention_mask, w_token, w_pos, pad_token_id, donate):
# decompress weights
if w_token.device.device_type == DeviceType.COMPRESSED:
w_token = w_token.device.decompress(w_token)
w_pos = w_pos.device.decompress(w_pos)
token_ids = inputs.data
mask = attention_mask.data
if donate[0]: inputs.delete()
if donate[1]: attention_mask.delete()
# token embedding
token_embed = F.embedding(token_ids, w_token.data, pad_token_id)
# pos embedding
positions = torch.cumsum(mask, dim=1).int() * mask + 1
# cut positions if `past_key_values_length` is > 0
past_key_values_length = mask.shape[1] - token_ids.shape[1]
positions = positions[:, past_key_values_length:]
pos_embed = F.embedding(positions, w_pos.data)
data = token_embed + pos_embed
return TorchTensor.create_from_torch(data, self)
def opt_output_embed(self, inputs, w_ln, b_ln, w_token, donate,
do_sample, temperature):
# decompress weights
if w_token.device.device_type == DeviceType.COMPRESSED:
w_token = w_token.device.decompress(w_token)
b, s, h = inputs.shape
hidden = F.layer_norm(inputs.data, (h,), weight=w_ln.data, bias=b_ln.data)
if donate[0]: inputs.delete()
# output embedding
logits = F.linear(hidden, w_token.data)
last_token_logits = logits[:,-1,:]
if do_sample and not temperature < 1e-5:
probs = torch.softmax(last_token_logits / temperature, dim=-1)
ids = torch.multinomial(probs, num_samples=1)
else:
ids = last_token_logits.argmax(dim=1, keepdim=True)
return TorchTensor.create_from_torch(ids, self)
def init_cache_one_gpu_batch(self, config, task, policy):
num_head, hidden_size, prompt_len, gen_len, gpu_batch_size = (
config.n_head, config.input_dim, task.prompt_len, task.gen_len,
policy.gpu_batch_size)
shape = (prompt_len + gen_len - 1, gpu_batch_size * num_head, hidden_size // num_head)
# NOTE: disable pin_memory due to high memory overhead
pin_memory = False
k_cache = self.allocate(shape, np.float16, pin_memory=pin_memory)
v_cache = self.allocate(shape, np.float16, pin_memory=pin_memory)
return k_cache, v_cache
def mha(self, inputs, attention_mask, w_q, b_q, w_k, b_k, w_v, b_v,
w_out, b_out, w_ln, b_ln, n_head, donate, compress_cache, comp_config):
"""Multi-head attention (prefill phase)."""
# decompress weights
if w_q.device.device_type == DeviceType.COMPRESSED:
w_q = w_q.device.decompress(w_q)
w_k = w_k.device.decompress(w_k)
w_v = w_v.device.decompress(w_v)
w_out = w_out.device.decompress(w_out)
b, s, h = inputs.shape
head_dim = h // n_head
scaling = head_dim ** -0.5
hidden = F.layer_norm(inputs.data, (h,), weight=w_ln.data, bias=b_ln.data)
# shape: (b, s, h)
q = F.linear(hidden, w_q.data, bias=b_q.data) * scaling
k = F.linear(hidden, w_k.data, bias=b_k.data)
v = F.linear(hidden, w_v.data, bias=b_v.data)
# shape: (b, s, n_head, head_dim)
q = q.view(b, s, n_head, head_dim)
k = k.view(b, s, n_head, head_dim)
v = v.view(b, s, n_head, head_dim)
# shape: (b * n_head, s, head_dim)
q = q.permute(0, 2, 1, 3).reshape(b * n_head, s, head_dim)
# shape: (b * n_head, head_dim, s)
k = k.permute(0, 2, 3, 1).reshape(b * n_head, head_dim, s)
# shape: (b * n_head, s, head_dim)
v = v.permute(0, 2, 1, 3).reshape(b * n_head, s, head_dim)
# shape: (b * n_head, s, s)
attn_weights = torch.bmm(q, k)
# shape: (b, 1, s, s)
idx = torch.arange(s, device=self.dev)
causal_mask = (idx <= idx.view(s, 1)).view(1, 1, s, s)
mask = attention_mask.data.view(b, 1, 1, s) & causal_mask
# shape: (b, n_head, s, s)
attn_weights = attn_weights.view(b, n_head, s, s)
attn_weights = torch.where(mask, attn_weights, -1e4)
attn_weights = attn_weights.view(b * n_head, s, s)
attn_weights = F.softmax(attn_weights, dim=2)
# shape: (b, n_head, s, head_dim)
value = torch.bmm(attn_weights, v).view(b, n_head, s, head_dim)
# shape: (b, s, h)
value = value.transpose(1, 2).reshape(b, s, h)
value = F.linear(value, w_out.data, bias=b_out.data)
value.add_(inputs.data)
if donate[0]: inputs.delete()
if donate[1]: attention_mask.delete()
# (s, b * n_head, head_dim)
k = k.permute(2, 0, 1)
v = v.permute(1, 0, 2)
if compress_cache:
k = self.compressed_device.compress(k, comp_config)
v = self.compressed_device.compress(v, comp_config)
else:
k = TorchTensor.create_from_torch(k, self)
v = TorchTensor.create_from_torch(v, self)
return TorchTensor.create_from_torch(value, self), k, v
def mha_gen(self, inputs, attention_mask, w_q, b_q, w_k, b_k, w_v, b_v,
w_out, b_out, w_ln, b_ln, n_head, k_cache, v_cache, donate,
attn_sparsity, compress_cache, comp_config):
"""Multi-head attention (decoding phase)."""
# decompress weights
if w_q.device.device_type == DeviceType.COMPRESSED:
w_q = w_q.device.decompress(w_q)
w_k = w_k.device.decompress(w_k)
w_v = w_v.device.decompress(w_v)
w_out = w_out.device.decompress(w_out)
b, tgt_s, h = inputs.shape
src_s = attention_mask.shape[1]
head_dim = h // n_head
scaling = head_dim ** -0.5
hidden = F.layer_norm(inputs.data, (h,), weight=w_ln.data, bias=b_ln.data)
# shape: (b, 1, h)
q = F.linear(hidden, w_q.data, bias=b_q.data) * scaling
k = F.linear(hidden, w_k.data, bias=b_k.data)
v = F.linear(hidden, w_v.data, bias=b_v.data)
# shape: (b, 1, n_head, head_dim)
q = q.view(b, tgt_s, n_head, head_dim)
k = k.view(b, tgt_s, n_head, head_dim)
v = v.view(b, tgt_s, n_head, head_dim)
# shape: (b * n_head, 1, head_dim)
q = q.permute(0, 2, 1, 3).reshape(b * n_head, tgt_s, head_dim)
# shape: (1, b * n_head, head_dim)
k_new = k.permute(1, 0, 2, 3).reshape(tgt_s, b * n_head, head_dim)
# shape: (1, b * n_head, head_dim)
v_new = v.permute(1, 0, 2, 3).reshape(tgt_s, b * n_head, head_dim)
if isinstance(k_cache, TorchTensor):
if attn_sparsity >= 1.0: # Dense attention
if compress_cache:
# shape: (s, b * n_head, head_dim)
k = k_cache.device.decompress(k_cache)[:src_s]
v = v_cache.device.decompress(v_cache)[:src_s]
else:
# shape: (s, b * n_head, head_dim)
k = k_cache.data[:src_s]
v = v_cache.data[:src_s]
k[src_s - 1:src_s] = k_new
v[src_s - 1:src_s] = v_new
# shape: (b * n_head, head_dim, s)
k = k.permute(1, 2, 0).reshape(b * n_head, head_dim, src_s)
# shape: (b * n_head, s, head_dim)
v = v.permute(1, 0, 2).reshape(b * n_head, src_s, head_dim)
if k.is_cuda:
value = self._attention_value(q, k, v, attention_mask.data,
b, src_s, tgt_s, n_head, head_dim)
else:
q = q.float().cpu()
k, v = k.float(), v.float()
value = self._attention_value(q, k, v, attention_mask.data,
b, src_s, tgt_s, n_head, head_dim).cuda().half()
else: # Sparse attention
# shape: (s, b * n_head, head_dim)
k = k_cache.data[:src_s]
k[src_s - 1:src_s] = k_new
# shape: (b * n_head, head_dim, s)
k = k.permute(1, 2, 0).reshape(b * n_head, head_dim, src_s)
if k.is_cuda:
value = self._sparse_attention_value(q, k, v_new, v_cache,
attention_mask.data, b, src_s, tgt_s, n_head, head_dim,
attn_sparsity)
else:
q = q.float().cpu()
value = self._sparse_attention_value(q, k, v_new, v_cache,
attention_mask.data, b, src_s, tgt_s, n_head, head_dim,
attn_sparsity).cuda().half()
else: # Mixed device attention
assert attn_sparsity >= 1.0
value = self._mixed_device_attention(q, k_cache, v_cache,
k_new, v_new, attention_mask.data, b, src_s, tgt_s,
n_head, head_dim)
# shape: (b, 1, h)
value = value.transpose(1, 2).view(b, tgt_s, h)
value = F.linear(value, w_out.data, bias=b_out.data)
value.add_(inputs.data)
if donate[0]: inputs.delete()
if donate[1]: attention_mask.delete()
if compress_cache:
if comp_config.group_dim == 0:
s_ = src_s // comp_config.group_size * comp_config.group_size
k_new = k[:, :, s_:].permute(2, 0, 1)
v_new = v[:, s_:, :].permute(1, 0, 2)
k_new = self.compressed_device.compress(k_new, comp_config)
v_new = self.compressed_device.compress(v_new, comp_config)
else:
k_new = TorchTensor.create_from_torch(k_new, self)
v_new = TorchTensor.create_from_torch(v_new, self)
return TorchTensor.create_from_torch(value, self), k_new, v_new
def _attention_weights(self, q, k, mask, b, src_s, n_head):
# shape: (b * n_head, 1, s)
attn_weights = torch.bmm(q, k)
# shape: (b, 1, 1, s)
mask = mask.view(b, 1, 1, src_s)
# shape: (b * n_head, 1, s)
attn_weights = attn_weights.view(b, n_head, 1, src_s)
attn_weights = torch.where(mask, attn_weights, -1e4)
attn_weights = attn_weights.view(b * n_head, 1, src_s)
attn_weights = F.softmax(attn_weights, dim=2)
return attn_weights
def _attention_value(self, q, k, v, mask, b, src_s, tgt_s, n_head, head_dim):
# shape: (b * n_head, 1, s)
attn_weights = self._attention_weights(q, k, mask, b, src_s, n_head)
# shape: (b, n_head, 1, head_dim)
return torch.bmm(attn_weights, v).view(b, n_head, tgt_s, head_dim)
def _sparse_attention_value(self, q, k, v_new, v_cache, mask, b,
src_s, tgt_s, n_head, head_dim, attn_sparsity):
# shape: (b * n_head, 1, s)
attn_weights = self._attention_weights(q, k, mask, b, src_s, n_head)
topk = int(attn_sparsity * (attn_weights.shape[2] - 1))
topk_weights, topk_indices = attn_weights[:, :, :-1].topk(
topk, dim=2, sorted=False)
topk_indices = topk_indices.view(b * n_head, topk).transpose(0, 1)
# shape: (b * n_head, 1, topk+1)
attn_weights = torch.cat([topk_weights,
attn_weights[:, :, -1].unsqueeze(-1)], dim=-1)
if k.is_cuda:
v_home = v_cache
v_buf = self.allocate((topk+1, b*n_head, head_dim), np.float16)
topk_indices = topk_indices.cpu()
else:
(v_home, v_buf) = v_cache
# shape: (s, b * n_head, head_dim)
indices_src = topk_indices
indices_tgt = (slice(0, indices_src.shape[0]), slice(0, v_home.shape[1]))
general_copy(v_buf, indices_tgt, v_home, indices_src)
v_home.device.synchronize()
# shape: (topk+1, b * n_head, head_dim)
v = v_buf.data[:topk+1]
v[topk:topk+1] = v_new
# shape: (b * n_head, topk+1, head_dim)
v = v.permute(1, 0, 2).reshape(b * n_head, topk+1, head_dim)
# shape: (b * n_head, 1, head_dim)
return torch.bmm(attn_weights, v).view(b, n_head, tgt_s, head_dim)
def _mixed_device_attention(self, q, k_cache, v_cache, k_new, v_new,
mask, b, src_s, tgt_s, n_head, head_dim):
# The caches are stored on both gpu and cpu.
# Compute attention on gpu for caches stored on gpu.
# Compute attention on cpu for caches stored on cpu.
k_gpu, k_cpu = k_cache[0].data, k_cache[1].data
v_gpu, v_cpu = v_cache[0].data, v_cache[1].data
seg = k_gpu.shape[1]
# Compute GPU part
b_gpu = seg // n_head
q_gpu = q[:seg]
# shape: (s, b * n_head, head_dim)
k_gpu = k_gpu[:src_s, :seg, :]
v_gpu = v_gpu[:src_s, :seg, :]
k_gpu[src_s-1:src_s, :, :] = k_new[:, :seg, :]
v_gpu[src_s-1:src_s, :, :] = v_new[:, :seg, :]
# shape: (b * n_head, head_dim, s)
k_gpu = k_gpu.permute(1, 2, 0)
# shape: (b * n_head, s, head_dim)
v_gpu = v_gpu.permute(1, 0, 2)
mask_gpu = mask[:b_gpu].cuda()
value_gpu = self._attention_value(q_gpu, k_gpu, v_gpu, mask_gpu,
b_gpu, src_s, tgt_s, n_head, head_dim)
# Compute CPU Part
b_cpu = b - b_gpu
q_cpu = q[seg:].float().cpu()
# shape: (s, b * n_head, head_dim)
k_cpu = k_cpu[:src_s, seg:, :]
v_cpu = v_cpu[:src_s, seg:, :]
k_cpu[src_s-1:src_s, :, :] = k_new[:, seg:, :]
v_cpu[src_s-1:src_s, :, :] = v_new[:, seg:, :]
# shape: (b * n_head, head_dim, s)
k_cpu = k_cpu.permute(1, 2, 0)
# shape: (b * n_head, s, head_dim)
v_cpu = v_cpu.permute(1, 0, 2)
mask_cpu = mask[b_gpu:]
value_cpu = self._attention_value(q_cpu, k_cpu, v_cpu, mask_cpu,
b_cpu, src_s, tgt_s, n_head, head_dim)
value = torch.cat([value_gpu, value_cpu.cuda().half()], dim=0)
return value
def mlp(self, inputs, wi, bi, wo, bo, w_ln, b_ln, donate):
# decompress weights
if wi.device.device_type == DeviceType.COMPRESSED:
wi = wi.device.decompress(wi)
wo = wo.device.decompress(wo)
b, s, h = inputs.shape
out = F.layer_norm(inputs.data, (h,), weight=w_ln.data, bias=b_ln.data)
out = F.linear(out, wi.data, bias=bi.data)
F.relu(out, inplace=True)
out = F.linear(out, wo.data, bias=bo.data)
out.add_(inputs.data)
if donate[0]: inputs.delete()
return TorchTensor.create_from_torch(out, self)
def synchronize(self):
torch.cuda.synchronize()
def mem_stats(self):
if self.device_type == DeviceType.CUDA:
cur_mem = torch.cuda.memory_allocated(self.dev)
peak_mem = torch.cuda.max_memory_allocated(self.dev)
elif self.device_type == DeviceType.CPU:
cur_mem = cpu_mem_stats()
peak_mem = 0
else:
raise NotImplementedError()
return cur_mem, peak_mem
def print_stats(self, output_file=None):
torch.cuda.synchronize()
cur_mem, peak_mem = self.mem_stats()
if output_file is not None:
with open(output_file, "w") as f:
f.write(f"TorchDevice: {self.name}\n")
f.write(f" cur_mem: {cur_mem/GB:.4f} GB, "
f" peak_mem: {peak_mem/GB:.4f} GB\n")
else:
print(f"TorchDevice: {self.name}")
print(f" cur_mem: {cur_mem/GB:.4f} GB, "
f" peak_mem: {peak_mem/GB:.4f} GB")
return cur_mem, peak_mem
def __str__(self):
return f"TorchDevice(name={self.name})"
class TorchDisk:
"""Manage tensors stored on a disk."""
def __init__(self, path, mem_capacity=None, cuda_id=0, num_copy_threads=4):
self.name = path
self.path = os.path.abspath(os.path.expanduser(path))
self.mem_capacity = mem_capacity
self.device_type = DeviceType.DISK
self.compressed_device = TorchCompressedDevice(self)
if os.path.exists(self.path):
assert os.path.isdir(self.path)
else:
os.makedirs(self.path)
self.links = {}
# Copy threads
self.copy_queue = queue.Queue()
self.copy_threads = [
threading.Thread(
target=copy_worker_func, args=(self.copy_queue, cuda_id)
) for _ in range(num_copy_threads)
]
for t in self.copy_threads:
t.start()
global global_disk_device
global_disk_device = self
def add_link(self, link):
dst = link.b if link.a == self else link.a
self.links[dst] = link
def allocate(self, shape, dtype, pin_memory=None, name=None):
name = name or TorchTensor.next_name()
path = os.path.join(self.path, name)
np.lib.format.open_memmap(path, mode="w+", shape=shape, dtype=dtype)
return TorchTensor(shape, np_dtype_to_torch_dtype[dtype],
path, self, name=name)
def delete(self, tensor):
if os.path.exists(tensor.data) and tensor.delete_file:
os.remove(tensor.data)
def init_cache_one_gpu_batch(self, config, task, policy):
num_head, hidden_size, prompt_len, gen_len, gpu_batch_size = (
config.n_head, config.input_dim, task.prompt_len, task.gen_len,
policy.gpu_batch_size)
shape = (prompt_len + gen_len - 1, gpu_batch_size * num_head, hidden_size // num_head)
k_cache = self.allocate(shape, np.float16)
v_cache = self.allocate(shape, np.float16)
return k_cache, v_cache
def submit_copy(self, *args):
self.copy_queue.put_nowait(args)
def synchronize(self):
self.copy_queue.join()
def close_copy_threads(self):
for _ in range(len(self.copy_threads)):
self.copy_queue.put_nowait(None)
for t in self.copy_threads:
t.join()
self.copy_queue.join()
self.copy_queue = None
def mem_stats(self):
raise NotImplementedError()
def print_stats(self):
raise NotImplementedError()
def __del__(self):
if self.copy_queue:
self.close_copy_threads()
class TorchMixedDevice:
"""Manage tensors stored on multiple physical devices."""
def __init__(self, base_devices):
self.name = "mixed"
self.device_type = DeviceType.MIXED
self.base_devices = base_devices
def allocate(self, shape, dtype, seg_lengths, pin_memory=None, name=None):
assert sum(seg_lengths) == shape[SEG_DIM]
assert len(seg_lengths) == len(self.base_devices)
seg_points = [0]
for l in seg_lengths:
seg_points.append(seg_points[-1] + l)
devices = self.base_devices
tensors = []
for i in range(len(devices)):
seg_len = seg_points[i+1] - seg_points[i]
if seg_len == 0:
tensors.append(None)
else:
seg_shape = shape[:SEG_DIM] + (seg_len,) + shape[SEG_DIM+1:]
tensors.append(devices[i].allocate(seg_shape, dtype,
pin_memory=pin_memory))
return TorchTensor(shape, np_dtype_to_torch_dtype[dtype],
(tensors, seg_points), self, name=name)
def delete(self, tensor):
for x in self.tensor.data[0]:
if x:
x.delete()
def init_cache_one_gpu_batch(self, config, task, policy):
num_head, hidden_size, prompt_len, gen_len, gpu_batch_size = (
config.n_head, config.input_dim, task.prompt_len, task.gen_len,
policy.gpu_batch_size)
shape = (prompt_len + gen_len - 1, gpu_batch_size * num_head, hidden_size // num_head)
# We have to round to a multiple of `num_head`
if policy.cache_disk_percent == 0:
len_gpu = int(shape[SEG_DIM] * policy.cache_gpu_percent / 100) // num_head * num_head
len_cpu = shape[SEG_DIM] - len_gpu
len_disk = 0
else:
len_gpu = int(shape[SEG_DIM] * policy.cache_gpu_percent / 100) // num_head * num_head
len_cpu = int(shape[SEG_DIM] * policy.cache_cpu_percent / 100) // num_head * num_head
len_disk = shape[SEG_DIM] - len_gpu - len_cpu
lens = [len_gpu, len_cpu, len_disk]
pin_memory = False
k_cache = self.allocate(shape, np.float16,
seg_lengths=lens, pin_memory=pin_memory)
v_cache = self.allocate(shape, np.float16,
seg_lengths=lens, pin_memory=pin_memory)
return k_cache, v_cache
timers = Timers()
GB = 1 << 30
class ExecutionEnv:
"""Hardware environment."""
gpu: Any = None
cpu: Any = None
disk: Any = None
mixed: Any = None
def create(cls, offload_dir):
# fix recursive import
from flexgen.pytorch_backend import TorchDevice, TorchDisk, TorchMixedDevice
gpu = TorchDevice("cuda:0")
cpu = TorchDevice("cpu")
disk = TorchDisk(offload_dir)
return cls(gpu=gpu, cpu=cpu, disk=disk, mixed=TorchMixedDevice([gpu, cpu, disk]))
def close_copy_threads(self):
self.disk.close_copy_threads()
def project_decode_latency(costs, prompt_len, gen_len):
decode_costs = costs[1:]
if gen_len / prompt_len < 0.1:
warmup = 2
decode_latency = (sum(decode_costs[:warmup]) +
np.mean(decode_costs[warmup:]) * (gen_len - 1 - warmup))
else:
warmup = 2
decode_latency = (sum(decode_costs[:warmup]) +
np.mean(decode_costs[warmup:]) * (gen_len - 1 - warmup))
#assert len(decode_costs) >= 4
#warmup = 2
#xs = np.arange(warmup, len(decode_costs))
#ys = np.asarray(decode_costs[warmup:])
#curve = np.poly1d(np.polyfit(xs, ys, deg=1))
#ys_pred = [curve(x) for x in range(gen_len-1)]
#decode_latency = sum(ys_pred)
#print([round(x, 4) for x in decode_costs])
#print([round(x, 4) for x in ys_pred])
return decode_latency
def write_benchmark_log(filename, model_size, cache_size, hidden_size,
gpu_peak_mem, projected, prefill_latency, prefill_throughput,
decode_latency, decode_throughput, total_latency, total_throughput):
log_str = (f"model size: {model_size/GB:.3f} GB\t"
f"cache size: {cache_size/GB:.3f} GB\t"
f"hidden size (p): {hidden_size/GB:.3f} GB\n"
f"peak gpu mem: {gpu_peak_mem / GB:.3f} GB\t"
f"projected: {projected}\n"
f"prefill latency: {prefill_latency:.3f} s\t"
f"prefill throughput: {prefill_throughput:.3f} token/s\n"
f"decode latency: {decode_latency:.3f} s\t"
f"decode throughput: {decode_throughput:.3f} token/s\n"
f"total latency: {total_latency:.3f} s\t"
f"total throughput: {total_throughput:.3f} token/s")
with open(filename, "a") as fout:
fout.write(log_str + "\n")
return log_str
def run_flexgen(args):
print(f"<run_flexgen>: args.model: {args.model}")
if args.model == "facebook/galactica-30b":
tokenizer = AutoTokenizer.from_pretrained("facebook/galactica-30b", padding_side="left")
else:
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-30b", padding_side="left")
num_prompts = args.num_gpu_batches * args.gpu_batch_size
prompt_len, gen_len, cut_gen_len = args.prompt_len, args.gen_len, args.cut_gen_len
# Task and policy
warmup_inputs = get_test_inputs(32, num_prompts, tokenizer)
inputs = get_test_inputs(prompt_len, num_prompts, tokenizer)
gpu = TorchDevice("cuda:0")
cpu = TorchDevice("cpu")
disk = TorchDisk(args.offload_dir)
env = ExecutionEnv(gpu=gpu, cpu=cpu, disk=disk, mixed=TorchMixedDevice([gpu, cpu, disk]))
policy = Policy(args.gpu_batch_size, args.num_gpu_batches,
args.percent[0], args.percent[1],
args.percent[2], args.percent[3],
args.percent[4], args.percent[5],
args.overlap, args.sep_layer, args.pin_weight,
args.cpu_cache_compute, args.attn_sparsity,
args.compress_weight,
CompressionConfig(num_bits=4, group_size=64,
group_dim=0, symmetric=False),
args.compress_cache,
CompressionConfig(num_bits=4, group_size=64,
group_dim=2, symmetric=False))
assert not (args.compress_cache and args.attn_sparsity < 1.0), "Not implemented"
opt_config = get_opt_config(args.model)
cache_size = opt_config.cache_bytes(num_prompts, prompt_len + gen_len)
hidden_size = opt_config.hidden_bytes(num_prompts, prompt_len + gen_len)
print(f"model size: {opt_config.model_bytes()/GB:.3f} GB, "
f"cache size: {cache_size/GB:.3f} GB, "
f"hidden size (prefill): {hidden_size/GB:.3f} GB")
print("init weight...")
model = OptLM(opt_config, env, args.path, policy)
try:
print("warmup - generate")
output_ids = model.generate(
warmup_inputs, max_new_tokens=1, verbose=args.verbose)
print("benchmark - generate")
timers("generate").reset()
output_ids = model.generate(
inputs, max_new_tokens=args.gen_len,
debug_mode=args.debug_mode, cut_gen_len=cut_gen_len, verbose=args.verbose)
costs = timers("generate").costs
finally:
env.close_copy_threads()
# Log output
prefill_latency = costs[0]
prefill_throughput = num_prompts * prompt_len / prefill_latency
if cut_gen_len: # project latency of cut_gen_len to gen_len
decode_latency = project_decode_latency(costs, prompt_len, gen_len)
else:
decode_latency = sum(costs[1:])
decode_throughput = num_prompts * (gen_len - 1) / max(decode_latency, 1e-10)
num_generated_tokens = num_prompts * gen_len
total_latency = prefill_latency + decode_latency
total_throughput = num_generated_tokens / total_latency
_, gpu_peak_mem = gpu.mem_stats()
_, cpu_peak_mem = cpu.mem_stats()
if DUMMY_WEIGHT not in args.path:
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
show_str = "Outputs:\n" + 70 * '-' + "\n"
for i in [0, len(outputs)-1]:
show_str += f"{i}: {outputs[i]}\n"
show_str += "-" * 70 + "\n"
if args.verbose >= 2:
print(show_str)
gpu.print_stats()
cpu.print_stats()
projected = bool(args.debug_mode or cut_gen_len)
if args.log_file == "auto":
filename = get_filename(args) + ".log"
else:
filename = args.log_file
log_str = write_benchmark_log(filename,
opt_config.model_bytes(), cache_size, hidden_size,
gpu_peak_mem, projected, prefill_latency, prefill_throughput,
decode_latency, decode_throughput, total_latency, total_throughput)
if args.verbose >= 1:
print(log_str) | null |
10,183 | import argparse
import dataclasses
import os
import pickle
import time
from typing import Union, List, Optional
import numpy as np
from tqdm import tqdm
import torch
from transformers import AutoTokenizer
from flexgen.compression import CompressionConfig
from flexgen.opt_config import OptConfig, get_opt_config, download_opt_weights
from flexgen.pytorch_backend import (TorchDevice, TorchDisk, TorchLink,
TorchMixedDevice, DeviceType, general_copy, fix_recursive_import)
from flexgen.timer import timers
from flexgen.utils import (Task, ExecutionEnv, GB, T, ValueHolder,
array_1d, array_2d, array_3d, str2bool, project_decode_latency,
torch_mem_stats, torch_dtype_to_np_dtype, write_benchmark_log,
read_benchmark_log)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def add_parser_arguments(parser):
parser.add_argument("--model", type=str, default="facebook/opt-6.7b",
help="The model name.")
parser.add_argument("--path", type=str, default="~/opt_weights",
help="The path to the model weights. If there are no cached weights, "
"FlexGen will automatically download them from HuggingFace.")
parser.add_argument("--offload-dir", type=str, default="~/flexgen_offload_dir",
help="The directory to offload tensors. ")
parser.add_argument("--prompt-len", type=int, default=512)
parser.add_argument("--gen-len", type=int, default=32)
parser.add_argument("--cut-gen-len", type=int,
help="Cut generation length for fast debugging.")
parser.add_argument("--debug-mode", type=str,
choices=["fewer_batch", "breakdown"])
parser.add_argument("--gpu-batch-size", type=int, default=4)
parser.add_argument("--num-gpu-batches", type=int, default=1)
parser.add_argument("--percent", nargs="+", type=int,
default=[100, 0, 100, 0, 100, 0],
help="Six numbers. They are "
"the percentage of weight on GPU, "
"the percentage of weight on CPU, "
"the percentage of attention cache on GPU, "
"the percentage of attention cache on CPU, "
"the percentage of activations on GPU, "
"the percentage of activations on CPU")
parser.add_argument("--sep-layer", type=str2bool, nargs='?',
const=True, default=True)
parser.add_argument("--pin-weight", type=str2bool, nargs="?",
const=True, default=True)
parser.add_argument("--cpu-cache-compute", action="store_true")
parser.add_argument("--attn-sparsity", type=float, default=1.0)
parser.add_argument("--compress-weight", action="store_true",
help="Whether to compress weight.")
parser.add_argument("--compress-cache", action="store_true",
help="Whether to compress cache.")
parser.add_argument("--log-file", type=str, default="auto")
parser.add_argument("--no-log", action="store_true")
parser.add_argument("--verbose", type=int, default=2)
parser.add_argument("--overlap", type=str2bool, nargs='?',
const=True, default=True) | null |
10,184 | import argparse
from itertools import count
import os
import pickle
import traceback
from typing import Union, List, Optional
import numpy as np
import torch
import torch.distributed as dist
from transformers import AutoTokenizer
from flexgen.compression import CompressionConfig
from flexgen.dist_utils import initialize_distributed
from flexgen.flex_opt import (Policy, InputEmbed, OutputEmbed, SelfAttention,
MLP, TransformerLayer, OptLM, get_filename,
add_parser_arguments, get_test_inputs,
DUMMY_WEIGHT)
from flexgen.opt_config import get_opt_config
from flexgen.pytorch_backend import (TorchDevice, TorchDisk, TorchLink,
TorchMixedDevice, TorchTensor)
from flexgen.timer import timers
from flexgen.utils import (Task, ExecutionEnv, GB, T, ValueHolder,
array_1d, array_2d, array_3d, array_4d, str2bool, project_decode_latency)
class DistOptLM(OptLM):
def __init__(self, config, env, path, policy, pipeline_rank,
num_pipeline_stages, comm_device, num_inner_iterations=None,
async_comm=False):
def load_weight(self, b, t, i, j, k):
def init_cache(self, t, j, k):
def load_cache(self, t, i, j, k):
def store_cache(self, t, i, j, k):
def delete_cache(self, t, j, k):
def load_hidden(self, b, t, i, j, k):
def store_hidden(self, b, t, i, j, k):
def send_hidden(self, t, i, j, k, tag=0, async_=False):
def recv_hidden(self, t, i, j, k, tag=0, async_=False):
def move_value_callback():
def compute_layer(self, t, i, j, k):
def update_attention_mask(self, b, t, i, k):
def generate(self,
inputs: Union[np.array, List[List[int]]],
max_new_tokens: int = 32,
do_sample: bool = False,
temperature: float = 1.0,
stop: Optional[int] = None,
debug_mode: Optional[str] = None,
cut_gen_len: Optional[int] = None,
verbose: int = 0):
def send_recv_hidden(self, sending_job, receiving_job):
def _send():
def _recv():
def generation_loop_normal(self):
def generation_loop_overlap_one_batch(self):
def generation_loop_overlap_multi_batch(self):
def comm_test(comm_device):
class CompressionConfig:
DUMMY_WEIGHT = "_DUMMY_"
class Policy:
def w_disk_percent(self):
def cache_disk_percent(self):
def act_disk_percent(self):
def get_filename(args):
def get_test_inputs(prompt_len, num_prompts, tokenizer):
def get_opt_config(name, **kwargs):
class TorchTensor:
def __init__(self, shape, dtype, data, device, name=None):
def bytes(self):
def next_name(cls):
def create_from_torch(cls, data, device, name=None):
def delete(self):
def load_from_np(self, np_array):
def load_from_np_file(self, filename):
def copy(self, dst, src_indices=None):
def smart_copy(self, dst, src_indices=None):
def move(self, dst):
def __str__(self):
class TorchDevice:
def __init__(self, name, mem_capacity=None, flops=None):
def add_link(self, link):
def allocate(self, shape, dtype, pin_memory=None, name=None):
def delete(self, tensor):
def init_attention_compute_workspace(self, config, task, policy):
def next_attention_compute_workspace(self):
def del_attention_compute_workspace(self):
def gen_attention_mask(self, token_ids, pad_token_id, donate):
def extend_attention_mask(self, attention_mask, donate):
def opt_input_embed(self, inputs, attention_mask, w_token, w_pos, pad_token_id, donate):
def opt_output_embed(self, inputs, w_ln, b_ln, w_token, donate,
do_sample, temperature):
def init_cache_one_gpu_batch(self, config, task, policy):
def mha(self, inputs, attention_mask, w_q, b_q, w_k, b_k, w_v, b_v,
w_out, b_out, w_ln, b_ln, n_head, donate, compress_cache, comp_config):
def mha_gen(self, inputs, attention_mask, w_q, b_q, w_k, b_k, w_v, b_v,
w_out, b_out, w_ln, b_ln, n_head, k_cache, v_cache, donate,
attn_sparsity, compress_cache, comp_config):
def _attention_weights(self, q, k, mask, b, src_s, n_head):
def _attention_value(self, q, k, v, mask, b, src_s, tgt_s, n_head, head_dim):
def _sparse_attention_value(self, q, k, v_new, v_cache, mask, b,
src_s, tgt_s, n_head, head_dim, attn_sparsity):
def _mixed_device_attention(self, q, k_cache, v_cache, k_new, v_new,
mask, b, src_s, tgt_s, n_head, head_dim):
def mlp(self, inputs, wi, bi, wo, bo, w_ln, b_ln, donate):
def synchronize(self):
def mem_stats(self):
def print_stats(self, output_file=None):
def __str__(self):
class TorchDisk:
def __init__(self, path, mem_capacity=None, cuda_id=0, num_copy_threads=4):
def add_link(self, link):
def allocate(self, shape, dtype, pin_memory=None, name=None):
def delete(self, tensor):
def init_cache_one_gpu_batch(self, config, task, policy):
def submit_copy(self, *args):
def synchronize(self):
def close_copy_threads(self):
def mem_stats(self):
def print_stats(self):
def __del__(self):
class TorchMixedDevice:
def __init__(self, base_devices):
def allocate(self, shape, dtype, seg_lengths, pin_memory=None, name=None):
def delete(self, tensor):
def init_cache_one_gpu_batch(self, config, task, policy):
timers = Timers()
GB = 1 << 30
class ExecutionEnv:
def create(cls, offload_dir):
def close_copy_threads(self):
def project_decode_latency(costs, prompt_len, gen_len):
def run_flexgen_dist(args):
t_name = args.model.replace("175b", "66b")
tokenizer = AutoTokenizer.from_pretrained(t_name, padding_side="left")
num_inner_iterations = args.num_inner_iterations if args.num_inner_iterations is not None else args.world_size
num_prompts = args.num_gpu_batches * args.gpu_batch_size * num_inner_iterations * 1
prompt_len, gen_len, cut_gen_len = args.prompt_len, args.gen_len, args.cut_gen_len
# Task and policy
warmup_inputs = get_test_inputs(32, num_prompts, tokenizer)
inputs = get_test_inputs(prompt_len, num_prompts, tokenizer)
gpu = TorchDevice(f"cuda:{args.local_rank}")
cpu = TorchDevice("cpu")
disk = TorchDisk(args.offload_dir, None, args.local_rank)
env = ExecutionEnv(gpu=gpu, cpu=cpu, disk=disk, mixed=TorchMixedDevice([gpu, cpu, disk]))
TorchTensor.name_count = count(start=args.rank, step=args.world_size)
comm_test(gpu.dev if args.comm_device == "gpu" else cpu.dev)
policy = Policy(args.gpu_batch_size, args.num_gpu_batches,
args.percent[0], args.percent[1],
args.percent[2], args.percent[3],
args.percent[4], args.percent[5],
args.overlap, args.sep_layer, args.pin_weight,
args.cpu_cache_compute, args.attn_sparsity,
args.compress_weight,
CompressionConfig(num_bits=4, group_size=64,
group_dim=0, symmetric=False),
args.compress_cache,
CompressionConfig(num_bits=4, group_size=64,
group_dim=2, symmetric=False))
assert not (args.compress_cache and args.attn_sparsity < 1.0), "Not implemented"
opt_config = get_opt_config(args.model)
model = DistOptLM(opt_config, env, args.path, policy, args.rank,
args.world_size, args.comm_device, num_inner_iterations=num_inner_iterations,
async_comm=args.async_comm)
cache_size = opt_config.cache_bytes(num_prompts, prompt_len + gen_len)
hidden_size = opt_config.hidden_bytes(num_prompts, prompt_len + gen_len)
print(f"model size: {opt_config.model_bytes()/GB:.3f} GB, "
f"cache size: {cache_size/GB:.3f} GB, "
f"hidden size (prefill): {hidden_size/GB:.3f} GB")
try:
print("warmup - generate")
output_ids = model.generate(
warmup_inputs, max_new_tokens=2, verbose=args.verbose)
print("benchmark - generate")
for timer_name in ["generate-prompt", "generate"]:
timers(timer_name).reset()
output_ids = model.generate(
inputs, max_new_tokens=args.gen_len,
debug_mode=args.debug_mode, cut_gen_len=cut_gen_len, verbose=args.verbose)
prompt_costs = timers("generate-prompt").costs
generate_costs = timers("generate").costs
finally:
env.close_copy_threads()
if args.rank != args.world_size - 1:
return
# Log output
prefill_latency = sum(prompt_costs)
prefill_throughput = num_prompts * prompt_len / prefill_latency
if cut_gen_len: # project latency of cut_gen_len to gen_len
costs = np.array(generate_costs).reshape(-1, cut_gen_len-1).sum(axis=0).tolist()
decode_latency = project_decode_latency([None] + costs, prompt_len, gen_len)
else:
decode_latency = sum(generate_costs)
decode_throughput = num_prompts * (gen_len - 1) / max(decode_latency, 1e-10)
num_generated_tokens = num_prompts * gen_len
total_latency = prefill_latency + decode_latency
total_throughput = num_generated_tokens / total_latency
_, gpu_peak_mem = gpu.mem_stats()
_, cpu_peak_mem = cpu.mem_stats()
if DUMMY_WEIGHT not in args.path:
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
show_str = "Outputs:\n" + 70 * '-' + "\n"
for i in [0, len(outputs)-1]:
show_str += f"{i}: {outputs[i]}\n"
show_str += "-" * 70 + "\n"
print(show_str)
gpu.print_stats()
cpu.print_stats()
projected = args.debug_mode or cut_gen_len
log_str = (f"model size: {opt_config.model_bytes()/GB:.3f} GB\t"
f"cache size: {cache_size/GB:.3f} GB\t"
f"hidden size (prefill): {hidden_size/GB:.3f} GB\n"
f"peak gpu mem: {gpu_peak_mem / GB:.3f} GB\n"
f"prefill latency: {prefill_latency:.2f} s\t"
f"prefill throughput: {prefill_throughput:.2f} token/s\n"
f"decode latency: {decode_latency:.2f} s\t"
f"decode throughput: {decode_throughput:.2f} token/s\n"
f"total latency: {total_latency:.2f} s\t"
f"total throughput: {total_throughput:.2f} token/s")
print(log_str)
if not args.no_log:
if args.log_file == "auto":
basename = f"rank-{args.rank}-{get_filename(args)}"
log_filename = basename + ".log"
else:
log_filename = args.log_file
with open(log_filename, "a") as fout:
fout.write(log_str + "\n") | null |
10,185 | import argparse
from itertools import count
import os
import pickle
import traceback
from typing import Union, List, Optional
import numpy as np
import torch
import torch.distributed as dist
from transformers import AutoTokenizer
from flexgen.compression import CompressionConfig
from flexgen.dist_utils import initialize_distributed
from flexgen.flex_opt import (Policy, InputEmbed, OutputEmbed, SelfAttention,
MLP, TransformerLayer, OptLM, get_filename,
add_parser_arguments, get_test_inputs,
DUMMY_WEIGHT)
from flexgen.opt_config import get_opt_config
from flexgen.pytorch_backend import (TorchDevice, TorchDisk, TorchLink,
TorchMixedDevice, TorchTensor)
from flexgen.timer import timers
from flexgen.utils import (Task, ExecutionEnv, GB, T, ValueHolder,
array_1d, array_2d, array_3d, array_4d, str2bool, project_decode_latency)
def add_distributed_parser_arguments(parser):
parser.add_argument('--head-ip', type=str, default=None, help='the IP address of the head node')
parser.add_argument('--port', type=int, default=None, help='the port of the head node')
parser.add_argument('--rank', metavar='I', type=int, default=None)
parser.add_argument('--local-rank', metavar='I', type=int, default=None)
parser.add_argument('--world-size', metavar='N', type=int, default=None)
parser.add_argument('--use-mpi', action='store_true', default=False,
help="Get distributed info from MPI")
parser.add_argument('--comm-device', type=str, default='gpu',
choices=['gpu', 'cpu'],
help='communication through gpu nvlink or cpu memory '
'and socket')
parser.add_argument('--num-inner-iterations', metavar='I', type=int, default=None)
parser.add_argument('--async-comm', action='store_true', default=False,
help="Use asynchronous communication") | null |
10,186 | import time
from argparse import ArgumentParser
from statistics import mean
import torch
from petals import DistributedBloomConfig, DistributedBloomForCausalLM
from torch.multiprocessing import Process, Event, Queue
from transformers import AutoTokenizer, BloomConfig, OPTConfig
def _patch_bloom_config(bloom_config: BloomConfig, opt_config: OPTConfig):
bloom_config.hidden_size = opt_config.hidden_size
bloom_config.n_head = opt_config.num_attention_heads
bloom_config.n_layer = opt_config.num_hidden_layers
bloom_config.vocab_size = opt_config.vocab_size | null |
10,187 | import time
from argparse import ArgumentParser
from statistics import mean
import torch
from petals import DistributedBloomConfig, DistributedBloomForCausalLM
from torch.multiprocessing import Process, Event, Queue
from transformers import AutoTokenizer, BloomConfig, OPTConfig
def client_process(
finished_warmup,
can_start,
config_bloom,
num_micro_batches,
batch_size,
sequence_length,
max_tokens,
process_index,
queue: Queue,
) -> None:
torch.set_num_threads(1)
torch.cuda.set_device(process_index % torch.cuda.device_count())
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-30b")
inputs = torch.randint(0, tokenizer.vocab_size, size=(batch_size, sequence_length), device="cuda")
model = DistributedBloomForCausalLM(config_bloom)
model.cuda()
# warmup
model.generate(inputs, max_new_tokens=1, do_sample=False)
finished_warmup.set()
can_start.wait()
for _ in range(num_micro_batches):
start = time.monotonic()
model.generate(inputs, max_new_tokens=max_tokens, do_sample=False)
end = time.monotonic()
queue.put(end - start)
def run_bench(args, sequence_length, max_tokens, config_bloom):
queue = Queue()
processes = []
warmup_events = []
can_start = Event()
for i in range(args.num_processes):
print("create process", i)
warmup_event = Event()
proc = Process(target=client_process,
args=(warmup_event, can_start, config_bloom, args.num_micro_batches, args.batch_size,
sequence_length, max_tokens, i, queue)
)
proc.start()
processes.append(proc)
warmup_events.append(warmup_event)
for event in warmup_events:
event.wait()
can_start.set()
start = time.monotonic()
for i, proc in enumerate(processes):
print("join process", i)
proc.join()
end = time.monotonic()
latencies = []
while not queue.empty():
latencies.append(queue.get())
print("total time", end - start)
total_tokens = args.batch_size * args.num_micro_batches * args.num_processes * max_tokens
print("total tokens", total_tokens)
throughput = total_tokens / (end - start)
print("throughput", throughput)
latency = mean(latencies)
print("average latency", latency)
with open(args.output, "a") as f:
print("\t".join(
map(str,
[args.batch_size, args.num_micro_batches, args.num_processes, sequence_length, max_tokens,
throughput, latency]
)), file=f) | null |
10,188 | import argparse
import multiprocessing as mp
import os
import pickle
import time
import numpy as np
from accelerate import (infer_auto_device_map, init_empty_weights,
load_checkpoint_and_dispatch)
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM
from transformers import OPTForCausalLM
import torch
from flexgen.timer import timers
from flexgen.utils import (GB, project_decode_latency,
write_benchmark_log)
from flexgen.opt_config import (get_opt_config,
disable_torch_init, disable_hf_opt_init)
def realize_meta_module(module, dtype=None, device=None):
for name, child in module.named_children():
realize_meta_module(child, dtype, device)
keys = list(module._parameters.keys())
for k in keys:
v = module._parameters[k]
if v is not None:
module._parameters[k] = torch.nn.Parameter(
torch.empty(*v.shape, dtype=dtype or v.dtype,
device=device or v.device))
keys = list(module._buffers.keys())
for k in keys:
v = module._buffers[k]
assert v is None | null |
10,189 | import argparse
import multiprocessing as mp
import os
import pickle
import time
import numpy as np
from accelerate import (infer_auto_device_map, init_empty_weights,
load_checkpoint_and_dispatch)
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM
from transformers import OPTForCausalLM
import torch
from flexgen.timer import timers
from flexgen.utils import (GB, project_decode_latency,
write_benchmark_log)
from flexgen.opt_config import (get_opt_config,
disable_torch_init, disable_hf_opt_init)
def get_filename(model_name, batch_size, prompt_len, gen_len,
cpu_offload, disk_offload, num_nodes, num_gpus_per_node,
use_deepspeed):
modelsize = model_name.split('-')[-1]
if use_deepspeed:
filename = "ds-"
else:
filename = "hf-"
filename += f"{modelsize}-bs{batch_size}-prompt{prompt_len}-gen{gen_len}-"
filename += f"n{num_nodes}x{num_gpus_per_node}-"
if cpu_offload:
filename += "cpu"
elif disk_offload:
filename += "disk"
else:
filename += "gpu"
return filename
def meta_to_cpu(container, dtype=None):
if isinstance(container, torch.Tensor):
return torch.empty(*container.shape, dtype=dtype or container.dtype)
elif isinstance(container, tuple):
return tuple(meta_to_cpu(x, dtype) for x in container)
elif isinstance(container, dict):
return dict((k, meta_to_cpu(v, dtype)) for k, v in container.items())
else:
raise ValueError(f"Invalid type: {container}")
def get_model_config(model_name):
if "175b" in model_name:
config = AutoConfig.from_pretrained("facebook/opt-66b")
config.hidden_size = 12288
config.word_embed_proj_dim = 12288
config.ffn_dim = 12288 * 4
config.num_attention_heads = 96
config.num_hidden_layers = 96
else:
config = AutoConfig.from_pretrained(model_name)
return config
def get_ds_opt_model(model_name, dtype, cpu_offload, disk_offload, offload_dir,
dummy_weights):
import deepspeed
import torch.distributed as dist
from transformers.deepspeed import HfDeepSpeedConfig
config = get_model_config(model_name)
hidden_size = config.hidden_size
deepspeed.init_distributed("nccl")
rank = dist.get_rank()
pin_memory = bool(args.pin_memory)
ds_config = {
"fp16": {
"enabled": dtype == torch.float16,
},
"bf16": {
"enabled": dtype == torch.bfloat16,
},
"zero_optimization": {
"stage": 3,
"stage3_prefetch_bucket_size": hidden_size * hidden_size,
"stage3_param_persistence_threshold": 0,
},
"steps_per_print": 2000,
"train_batch_size": args.batch_size,
"wall_clock_breakdown": False,
}
if cpu_offload:
ds_config["zero_optimization"]["offload_param"] = dict(
device="cpu", pin_memory=pin_memory)
if disk_offload:
ds_config["zero_optimization"]["offload_param"] = dict(
device="nvme",
pin_memory=True,
nvme_path=offload_dir,
buffer_count=5,
buffer_size=2 * GB,
)
ds_config["aio"] = {
"block_size": 1048576,
"queue_depth": 8,
"thread_count": 1,
"single_submit": False,
"overlap_events": True,
}
dschf = HfDeepSpeedConfig(ds_config)
model = OPTForCausalLM.from_pretrained(
dummy_weights or model_name, torch_dtype=dtype)
model = model.eval()
ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[0]
ds_engine.module.eval()
model = ds_engine.module
return model
def get_hf_opt_model(model_name, dtype, cpu_offload, disk_offload, offload_dir,
num_gpus, dummy_weights):
if num_gpus == 1 and dtype != torch.int8:
# Here we use a custom device_map instead of device_map == "auto"
# becase we want to offload as many as possible weights out of GPU
# to allow a larger batch size.
if cpu_offload:
# NOTE: We must put some weights on GPU. Otherwise, huggingface reports errors.
device_map = {
"model.decoder.embed_tokens.weight": 0,
"model.decoder.embed_positions.weight": 0,
"model.decoder.final_layer_norm": "cpu",
"model.decoder.layers": "cpu",
"lm_head.weight": 0,
}
elif disk_offload:
device_map = {
"model.decoder.embed_tokens.weight": 0,
"model.decoder.embed_positions.weight": 0,
"model.decoder.final_layer_norm": "disk",
"model.decoder.layers": "disk",
"lm_head.weight": 0,
}
else:
device_map = None
max_memory = None
else:
# Here we use device_map == "auto", but set a low `max_memory` threshold
# becase we want to offload as many as possible weights out of GPU
# to allow a larger batch size.
device_map = "auto"
if cpu_offload:
# `max_memory` should be larger than the embedding.
# We use 2GB here because the embeding of opt-175b is 1.2GB.
max_memory = {k: "2GB" for k in range(num_gpus)}
elif disk_offload:
max_memory = {k: "2GB" for k in range(num_gpus)}
else:
max_memory = {k: "14GB" for k in range(num_gpus)}
max_memory["cpu"] = "160GB"
if dtype == torch.int8:
kwargs = {"load_in_8bit": True}
else:
kwargs = {"torch_dtype": dtype}
disable_torch_init()
model = OPTForCausalLM.from_pretrained(dummy_weights or model_name,
device_map=device_map, max_memory=max_memory,
offload_folder=offload_dir, **kwargs)
if device_map is None:
model.cuda()
model.eval()
return model
timers = Timers()
def project_decode_latency(costs, prompt_len, gen_len):
decode_costs = costs[1:]
if gen_len / prompt_len < 0.1:
warmup = 2
decode_latency = (sum(decode_costs[:warmup]) +
np.mean(decode_costs[warmup:]) * (gen_len - 1 - warmup))
else:
warmup = 2
decode_latency = (sum(decode_costs[:warmup]) +
np.mean(decode_costs[warmup:]) * (gen_len - 1 - warmup))
#assert len(decode_costs) >= 4
#warmup = 2
#xs = np.arange(warmup, len(decode_costs))
#ys = np.asarray(decode_costs[warmup:])
#curve = np.poly1d(np.polyfit(xs, ys, deg=1))
#ys_pred = [curve(x) for x in range(gen_len-1)]
#decode_latency = sum(ys_pred)
#print([round(x, 4) for x in decode_costs])
#print([round(x, 4) for x in ys_pred])
return decode_latency
def write_benchmark_log(filename, model_size, cache_size, hidden_size,
gpu_peak_mem, projected, prefill_latency, prefill_throughput,
decode_latency, decode_throughput, total_latency, total_throughput):
log_str = (f"model size: {model_size/GB:.3f} GB\t"
f"cache size: {cache_size/GB:.3f} GB\t"
f"hidden size (p): {hidden_size/GB:.3f} GB\n"
f"peak gpu mem: {gpu_peak_mem / GB:.3f} GB\t"
f"projected: {projected}\n"
f"prefill latency: {prefill_latency:.3f} s\t"
f"prefill throughput: {prefill_throughput:.3f} token/s\n"
f"decode latency: {decode_latency:.3f} s\t"
f"decode throughput: {decode_throughput:.3f} token/s\n"
f"total latency: {total_latency:.3f} s\t"
f"total throughput: {total_throughput:.3f} token/s")
with open(filename, "a") as fout:
fout.write(log_str + "\n")
return log_str
def get_opt_config(name, **kwargs):
if "/" in name:
name = name.split("/")[1]
name = name.lower()
# Handle opt-iml-30b and opt-iml-max-30b
if "-iml-max" in name:
arch_name = name.replace("-iml-max", "")
elif "-iml" in name:
arch_name = name.replace("-iml", "")
else:
arch_name = name
if arch_name == "opt-125m":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=12, n_head=12,
hidden_size=768, input_dim=768, ffn_embed_dim=768 * 4,
)
elif arch_name == "opt-350m":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=24, n_head=16,
hidden_size=1024, input_dim=1024, ffn_embed_dim=1024 * 4,
)
raise NotImplementedError("Not implemented because this model "
"has a different architecture")
elif arch_name == "opt-1.3b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=24, n_head=32,
hidden_size=2048, input_dim=2048, ffn_embed_dim=2048 * 4,
)
elif arch_name == "opt-2.7b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=32, n_head=32,
hidden_size=2560, input_dim=2560, ffn_embed_dim=2560 * 4,
)
elif arch_name == "opt-6.7b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=32, n_head=32,
hidden_size=4096, input_dim=4096, ffn_embed_dim=4096 * 4,
)
elif arch_name == "opt-13b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=40, n_head=40,
hidden_size=5120, input_dim=5120, ffn_embed_dim=5120 * 4,
)
elif arch_name == "opt-30b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=48, n_head=56,
hidden_size=7168, input_dim=7168, ffn_embed_dim=7168 * 4,
)
elif arch_name == "galactica-30b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=48, n_head=56,
hidden_size=7168, input_dim=7168, ffn_embed_dim=7168 * 4, vocab_size=50000,
)
elif arch_name == "opt-66b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=64, n_head=72,
hidden_size=9216, input_dim=9216, ffn_embed_dim=9216 * 4,
)
elif arch_name == "opt-175b":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=96, n_head=96,
hidden_size=12288, input_dim=12288, ffn_embed_dim=12288 * 4,
)
elif arch_name == "opt-175b-stage":
config = OptConfig(name=name,
max_seq_len=2048, num_hidden_layers=24, n_head=96,
hidden_size=12288, input_dim=12288, ffn_embed_dim=12288 * 4,
)
else:
raise ValueError(f"Invalid model name: {name}")
return dataclasses.replace(config, **kwargs)
def run_generation(model_name, batch_size, prompt_len, gen_len, cut_gen_len,
cpu_offload, disk_offload, offload_dir, use_int8,
num_nodes, num_gpus_per_node, use_deepspeed, dummy,
output_file, pkl_file, no_log, verbose):
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_name.replace("175b", "66b"), padding_side="left")
# Load model
if use_int8:
dtype = torch.int8
else:
dtype = torch.float16
if dummy:
config = get_model_config(model_name)
filename = os.path.join(offload_dir,
f"{model_name.replace('/', '-')}-hf-weights/")
if not os.path.exists(filename):
print("create dummy weights")
with init_empty_weights():
model = OPTForCausalLM(config)
model.save_pretrained(filename,
state_dict=meta_to_cpu(model.state_dict(), torch.float16))
dummy_weights = filename
else:
dummy_weights = None
print("load model")
if use_deepspeed:
model = get_ds_opt_model(model_name, dtype, cpu_offload, disk_offload,
offload_dir, dummy_weights)
else:
model = get_hf_opt_model(model_name, dtype, cpu_offload, disk_offload,
offload_dir, num_gpus_per_node, dummy_weights)
# Run generation
execute_gen_len = cut_gen_len if cut_gen_len else gen_len
if use_deepspeed:
prompts = ["Paris is the capital city of"] * (batch_size // WORLD_SIZE)
else:
prompts = ["Paris is the capital city of"] * batch_size
input_ids = tokenizer(prompts, return_tensors="pt",
padding="max_length",
max_length=prompt_len).input_ids.cuda()
# Warmup
print("wamup")
generate_kwargs_warmup = dict(max_new_tokens=1, do_sample=False)
with torch.no_grad():
output_ids = model.generate(input_ids=input_ids, **generate_kwargs_warmup)
# Run
print("benchmark")
timers("generate-forward").reset()
generate_kwargs = dict(max_new_tokens=execute_gen_len, do_sample=False)
with torch.no_grad():
output_ids = model.generate(input_ids=input_ids, **generate_kwargs)
costs = timers("generate-forward").costs
if use_deepspeed and args.local_rank != 0:
return
# Log output
prefill_latency = costs[0]
prefill_throughput = batch_size * prompt_len / prefill_latency
if cut_gen_len: # project latency of cut_gen_len to gen_len
decode_latency = project_decode_latency(costs, prompt_len, gen_len)
else:
decode_latency = sum(costs[1:])
decode_throughput = batch_size * (gen_len - 1) / max(decode_latency, 1e-10)
num_generated_tokens = batch_size * gen_len
total_latency = prefill_latency + decode_latency
total_throughput = num_generated_tokens / total_latency
gpu_peak_mem = torch.cuda.max_memory_allocated(torch.device("cuda"))
out_str = ""
if verbose >= 2:
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
show_str = "Outputs:\n" + 70 * '-' + "\n"
for i in [0, len(outputs)-1]:
show_str += f"{i}: {outputs[i]}\n"
show_str += 70 * '-' + "\n"
print(show_str)
# Check lengths
input_lens = [len(x) for x in input_ids]
output_lens = [len(x) for x in output_ids]
assert all(x == prompt_len for x in input_lens)
assert all(x == prompt_len + execute_gen_len for x in output_lens)
if args.log_file == "auto":
filename = get_filename(model_name, batch_size, prompt_len,
gen_len, cpu_offload, disk_offload, num_nodes,
num_gpus_per_node, use_deepspeed) + ".log"
else:
filename = args.log_file
projected = bool(cut_gen_len)
opt_config = get_opt_config(args.model)
cache_size = opt_config.cache_bytes(batch_size, prompt_len + gen_len)
hidden_size = opt_config.hidden_bytes(batch_size, prompt_len + gen_len)
log_str = write_benchmark_log(filename,
opt_config.model_bytes(), cache_size, hidden_size,
gpu_peak_mem, projected, prefill_latency, prefill_throughput,
decode_latency, decode_throughput, total_latency, total_throughput)
if verbose >= 1:
print(log_str) | null |
10,190 | import argparse
from dataclasses import dataclass
import time
from flexgen.utils import run_cmd
def run_huggingface(model, prompt_len, gen_len, cut_gen_len, batch_size,
num_nodes, num_gpus_per_node,
use_ds, cpu, disk, dummy, log_file=None, pkl_file=None):
assert num_nodes == 1
if use_ds:
cmd = f"deepspeed --num_gpus {num_gpus_per_node} hf_opt.py "
else:
cmd = f"python hf_opt.py --num-gpus {num_gpus_per_node} "
cmd += (f"--model {model} "
f"--prompt-len {prompt_len} --gen-len {gen_len} "
f"--batch-size {batch_size} ")
if cut_gen_len:
cmd += f"--cut-gen-len {cut_gen_len} "
if cpu:
cmd += "--cpu "
if disk:
cmd += "--disk "
if dummy:
cmd += "--dummy "
if log_file is not None:
cmd += f"--log-file {log_file} "
if pkl_file is not None:
cmd += f"--pkl-file {pkl_file} "
run_cmd(cmd)
def bench_one_case(case):
if case.model == "facebook/opt-6.7b":
cut_gen_len = None
else:
cut_gen_len = 5
dummy = True
if case.device == "gpu":
cpu = disk = False
elif case.device == "cpu":
cpu, disk = True, False
elif case.device == "disk":
cpu, disk = False, True
use_deepspeed = case.library == "ds"
run_huggingface(case.model, case.prompt_len, case.gen_len, cut_gen_len,
case.batch_size, case.num_nodes, case.num_gpus_per_node,
use_ds=use_deepspeed,
cpu=cpu, disk=disk, dummy=dummy) | null |
10,191 | import os
import sys
import subprocess
from setuptools import setup, find_packages
from setuptools.command import egg_info
import time
from op_builder import ALL_OPS, get_default_compute_capabilities, OpBuilder
from op_builder.builder import installed_cuda_version
ERROR = f"{RED_START} [ERROR] {RED_END}"
print(f"DS_BUILD_OPS={BUILD_OP_DEFAULT}")
print(f'Install Ops={install_ops}')
print(f"version={version_str}, git_hash={git_hash}, git_branch={git_branch}")
print(f'install_requires={install_requires}')
print(f'compatible_ops={compatible_ops}')
print(f'ext_modules={ext_modules}')
print(f'deepspeed build time = {end_time - start_time} secs')
def abort(msg):
print(f"{ERROR} {msg}")
assert False, msg | null |
10,192 | import os
import sys
import subprocess
from setuptools import setup, find_packages
from setuptools.command import egg_info
import time
from op_builder import ALL_OPS, get_default_compute_capabilities, OpBuilder
from op_builder.builder import installed_cuda_version
with open('deepspeed/git_version_info_installed.py', 'w') as fd:
fd.write(f"version='{version_str}'\n")
fd.write(f"git_hash='{git_hash}'\n")
fd.write(f"git_branch='{git_branch}'\n")
fd.write(f"installed_ops={install_ops}\n")
fd.write(f"compatible_ops={compatible_ops}\n")
fd.write(f"torch_info={torch_info}\n")
with open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:
readme_text = fin.read()
def fetch_requirements(path):
with open(path, 'r') as fd:
return [r.strip() for r in fd.readlines()] | null |
10,193 | import os
import sys
import subprocess
from setuptools import setup, find_packages
from setuptools.command import egg_info
import time
from op_builder import ALL_OPS, get_default_compute_capabilities, OpBuilder
from op_builder.builder import installed_cuda_version
if sys.platform == "win32":
# This creates a symbolic links on Windows.
# It needs Administrator privilege to create symlinks on Windows.
create_dir_symlink('..\\..\\csrc', '.\\deepspeed\\ops\\csrc')
create_dir_symlink('..\\..\\op_builder', '.\\deepspeed\\ops\\op_builder')
egg_info.manifest_maker.template = 'MANIFEST_win.in'
def command_exists(cmd):
if sys.platform == "win32":
result = subprocess.Popen(f'{cmd}', stdout=subprocess.PIPE, shell=True)
return result.wait() == 1
else:
result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
return result.wait() == 0 | null |
10,194 | import os
import sys
import subprocess
from setuptools import setup, find_packages
from setuptools.command import egg_info
import time
from op_builder import ALL_OPS, get_default_compute_capabilities, OpBuilder
from op_builder.builder import installed_cuda_version
BUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', BUILD_OP_PLATFORM))
if BUILD_OP_DEFAULT:
assert torch_available, "Unable to pre-compile ops without torch installed. Please install torch before attempting to pre-compile ops."
def op_envvar(op_name):
if 'DS_BUILD_STRING' in os.environ:
# Build string env specified, probably building for distribution
with open('build.txt', 'w') as fd:
fd.write(os.environ.get('DS_BUILD_STRING'))
version_str += os.environ.get('DS_BUILD_STRING')
elif os.path.isfile('build.txt'):
# build.txt exists, probably installing from distribution
with open('build.txt', 'r') as fd:
version_str += fd.read().strip()
else:
# None of the above, probably installing from source
version_str += f'+{git_hash}'
def op_enabled(op_name):
env_var = op_envvar(op_name)
return int(os.environ.get(env_var, BUILD_OP_DEFAULT)) | null |
10,195 | import os
import sys
import subprocess
from setuptools import setup, find_packages
from setuptools.command import egg_info
import time
from op_builder import ALL_OPS, get_default_compute_capabilities, OpBuilder
from op_builder.builder import installed_cuda_version
if 'DS_BUILD_STRING' in os.environ:
# Build string env specified, probably building for distribution
with open('build.txt', 'w') as fd:
fd.write(os.environ.get('DS_BUILD_STRING'))
version_str += os.environ.get('DS_BUILD_STRING')
elif os.path.isfile('build.txt'):
# build.txt exists, probably installing from distribution
with open('build.txt', 'r') as fd:
version_str += fd.read().strip()
else:
# None of the above, probably installing from source
version_str += f'+{git_hash}'
def create_dir_symlink(src, dest):
if not os.path.islink(dest):
if os.path.exists(dest):
os.remove(dest)
assert not os.path.exists(dest)
os.symlink(src, dest) | null |
10,196 | import os
from .constants import (MODEL_FILE_PREFIX,
MODEL_FILE_SUFFIX,
OPTIM_FILE_SUFFIX,
ZERO_FILE_PREFIX)
MODEL_FILE_PREFIX = 'mp_rank_'
MODEL_FILE_SUFFIX = '_model_states.pt'
def get_model_ckpt_name_for_rank(base_folder, mp_rank_str):
ckpt_name = os.path.join(
base_folder,
MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,
)
return ckpt_name | null |
10,197 | import os
from .constants import (MODEL_FILE_PREFIX,
MODEL_FILE_SUFFIX,
OPTIM_FILE_SUFFIX,
ZERO_FILE_PREFIX)
MODEL_FILE_PREFIX = 'mp_rank_'
ZERO_FILE_PREFIX = 'zero_pp_rank_'
OPTIM_FILE_SUFFIX = '_optim_states.pt'
def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):
zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'
mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'
zero_ckpt_name = os.path.join(
base_folder,
zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,
)
return zero_ckpt_name | null |
10,198 | import os
from .constants import (MODEL_FILE_PREFIX,
MODEL_FILE_SUFFIX,
OPTIM_FILE_SUFFIX,
ZERO_FILE_PREFIX)
MODEL_FILE_SUFFIX = '_model_states.pt'
def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):
ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'
ckpt_path = os.path.join(base_folder, ckpt_file)
return ckpt_path | null |
10,199 | from .reshape_utils import partition_data
class meg_2d_parallel_map(object):
def __init__(self, pp_degree, tp_degree):
self.pp_degree = pp_degree
self.tp_degree = tp_degree
self.map = {}
def simple_init(self):
self.map = {
self._make_key(i // self.tp_degree,
i % self.tp_degree): [i]
for i in range(self.pp_degree * self.tp_degree)
}
def add_data(self, pp_index, tp_index, data):
self._validate_indices(pp_index, tp_index)
assert type(data) is list
key = self._make_key(pp_index, tp_index)
if not key in self.map.keys():
self.map[key] = []
self.map[key] += data
def get_data(self, pp_index=None, tp_index=None):
self._validate_indices(pp_index, tp_index)
pp_indices = list(range(self.pp_degree)) if pp_index is None else [pp_index]
tp_indices = list(range(self.tp_degree)) if tp_index is None else [tp_index]
result = []
for i in pp_indices:
for j in tp_indices:
result += self.map[self._make_key(i, j)]
return result
def print_data(self, tag):
print(f'{tag}')
for key, value in self.map.items():
print(f'{key} = {value}')
def _validate_indices(self, pp_index, tp_index):
assert pp_index is None or pp_index < self.pp_degree
assert tp_index is None or tp_index < self.tp_degree
def _make_key(self, i, j):
return f'{i},{j}'
def _reshape_tp_dimension(old_2d_map, new_tp_degree):
old_pp_degree = old_2d_map.pp_degree
new_2d_map = meg_2d_parallel_map(old_pp_degree, new_tp_degree)
for i in range(old_pp_degree):
ranks_for_pp_index = old_2d_map.get_data(pp_index=i, tp_index=None)
split_ranks = partition_data(ranks_for_pp_index, new_tp_degree)
for j in range(new_tp_degree):
new_2d_map.add_data(i, j, split_ranks[j])
return new_2d_map
def _reshape_pp_dimension(old_2d_map, new_pp_degree):
old_tp_degree = old_2d_map.tp_degree
new_2d_map = meg_2d_parallel_map(new_pp_degree, old_tp_degree)
for i in range(old_tp_degree):
ranks_for_tp_index = old_2d_map.get_data(pp_index=None, tp_index=i)
split_ranks = partition_data(ranks_for_tp_index, new_pp_degree)
for j in range(new_pp_degree):
new_2d_map.add_data(j, i, split_ranks[j])
return new_2d_map
def reshape_meg_2d_parallel(old_pp_degree,
old_tp_degree,
new_pp_degree,
new_tp_degree,
verbose=False):
assert new_pp_degree <= old_pp_degree
assert new_tp_degree <= old_tp_degree
old_2d_map = meg_2d_parallel_map(old_pp_degree, old_tp_degree)
old_2d_map.simple_init()
if verbose:
old_2d_map.print_data(f'original_2d_map:')
if old_tp_degree != new_tp_degree:
new_tp_map = _reshape_tp_dimension(old_2d_map, new_tp_degree)
else:
new_tp_map = old_2d_map
if verbose:
new_tp_map.print_data(f'after_tp_reshape:')
if old_pp_degree != new_pp_degree:
final_map = _reshape_pp_dimension(new_tp_map, new_pp_degree)
else:
final_map = new_tp_map
if verbose:
final_map.print_data(f'final_2d_map:')
return final_map | null |
10,200 | from .reshape_utils import partition_data
def get_mpu_ranks(tp_size=1, pp_size=1, dp_size=1, virtual_pp_size=None):
"""
Initialize model data parallel groups.
Arguments:
tp_size: number of GPUs used to parallelize model tensor.
pp_size: number of GPUs used to parallelize model pipeline.
dp_size: number of GPUs used to parallelize model data.
Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
and 8 data-parallel groups as:
8 data_parallel groups:
[g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
8 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
4 pipeline model-parallel groups:
[g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
world_size = tp_size * pp_size * dp_size
print(f"\n\n*** tp={tp_size}, pp={pp_size}, dp={dp_size}, world={world_size}")
tensor_model_parallel_size = min(tp_size, world_size)
pipeline_model_parallel_size = min(pp_size, world_size)
data_parallel_size = world_size // (tensor_model_parallel_size *
pipeline_model_parallel_size)
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
num_data_parallel_groups = world_size // data_parallel_size
# Build the data-parallel groups.
all_dp_group_ranks = []
for i in range(pipeline_model_parallel_size):
start_rank = i * num_pipeline_model_parallel_groups
end_rank = (i + 1) * num_pipeline_model_parallel_groups
for j in range(tensor_model_parallel_size):
ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
all_dp_group_ranks.append(list(ranks))
print("DP", all_dp_group_ranks)
# Build the model-parallel groups.
all_pp_group_ranks = []
for i in range(data_parallel_size):
ranks = [
data_parallel_group_ranks[i]
for data_parallel_group_ranks in all_dp_group_ranks
]
all_pp_group_ranks.append(list(ranks))
print(f"PP", all_pp_group_ranks)
# Build the tensor model-parallel groups.
all_tp_group_ranks = []
for i in range(num_tensor_model_parallel_groups):
ranks = range(i * tensor_model_parallel_size,
(i + 1) * tensor_model_parallel_size)
all_tp_group_ranks.append(list(ranks))
print(f"TP", all_tp_group_ranks)
return all_tp_group_ranks, all_pp_group_ranks, all_dp_group_ranks
# # Build the pipeline model-parallel groups and embedding groups
# # (first and last rank in each pipeline model-parallel group).
# for i in range(num_pipeline_model_parallel_groups):
# ranks = range(i, world_size,
# num_pipeline_model_parallel_groups)
# print(f"EMB{i}", list(ranks))
The provided code snippet includes necessary dependencies for implementing the `reshape` function. Write a Python function `def reshape(src, tgt)` to solve the following problem:
reshape([tp_size_src, pp_size_src, dp_size_src], [tp_size_tgt, pp_size_tgt, dp_size_tgt])
Here is the function:
def reshape(src, tgt):
"""
reshape([tp_size_src, pp_size_src, dp_size_src],
[tp_size_tgt, pp_size_tgt, dp_size_tgt])
"""
print(f"\n\n*** Reshaping: {src} => {tgt}")
tp_size_src, pp_size_src, dp_size_src = src
tp_size_tgt, pp_size_tgt, dp_size_tgt = tgt
tp_ranks1, pp_ranks1, dp_ranks1 = get_mpu_ranks(tp_size=tp_size_src, pp_size=pp_size_src, dp_size=dp_size_src)
tp_ranks2, pp_ranks2, dp_ranks2 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_src, dp_size=dp_size_src)
tp_ranks3, pp_ranks3, dp_ranks3 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_tgt, dp_size=dp_size_src)
# handle tp contraction first
print("\n*** TP contraction:")
for i, r in enumerate(tp_ranks1):
print(f'{tp_ranks1[i]} => {tp_ranks2[i]}')
# handle pp contraction next
print("\n*** PP contraction:")
for i, r in enumerate(pp_ranks1):
print(f'{pp_ranks2[i]} => {pp_ranks3[i]}') | reshape([tp_size_src, pp_size_src, dp_size_src], [tp_size_tgt, pp_size_tgt, dp_size_tgt]) |
10,201 | import os
import torch
from collections import OrderedDict
from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX)
def basic_folder_validation(dir):
assert os.path.exists(dir), f'{dir} path does not exist'
assert os.path.isdir(dir), f'{dir} is not a folder' | null |
10,202 | import os
import torch
from collections import OrderedDict
from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX)
def validate_files(file_list):
for file in file_list:
if not os.path.isfile(file):
print(f'Error: {file} is not existent') | null |
10,203 | import os
import torch
import types
from .constants import (FP32_WEIGHT_KEY,
PARAM,
VOCAB_DIVISIBILITY_PADDING_TENSOR,
CAT_DIM)
def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size):
hp_mapping = self._hp_mapping
optim_state_keys = hp_mapping.get_optim_state_keys()
hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys
checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys}
for file in checkpoint_files.values():
assert os.path.isfile(file), f'{file} is not a valid file'
for key in hp_keys:
ckpt_file = checkpoint_files[key]
ckpt_dict = torch.load(ckpt_file)
full_hp_param = ckpt_dict[PARAM]
# need to deal with slices that were averaged.
# the opposite of averaging here becomes an exact copy of the first slice
# I thought of 2 ways:
# implementation a. find a way for a client to pass a dict with patterns
# if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
# tp_rank = 0
# tp_world_size = 1
# the other approach is to assume that the saved data is correct and if full_hp_param.shape ==
# self.shape that means we automatically copy?
# implementation b.
# this version requires no additional data passed from the client
# if the shapes already match it must be slices that were averaged - so we just hack around those
if full_hp_param.shape == self.shape:
tp_rank = 0
tp_world_size = 1
# special case for word_embeddings weights which get padded differently depending on TP degree.
# the converter to universal currently strips the original padding completely so the saved
# weight is padding-free and we just need to add new padding depending on the target TP
# degree
vocab_divisibility_padding_tensor = ckpt_dict.get(
VOCAB_DIVISIBILITY_PADDING_TENSOR,
None)
if vocab_divisibility_padding_tensor is not None:
# In the absence of data passed from the user wrt new padded vocab specific to tp degree
# we can again derive that data by reverse engineering the target shapes like so:
padded_target_vocab_size = self.shape[0] * tp_world_size
if padded_target_vocab_size > full_hp_param.shape[0]:
# Need to expand
padding_size = padded_target_vocab_size - full_hp_param.shape[0]
# Implement the following concat in efficient way using pad
#full_hp_param = torch.cat((full_hp_param, padding_tensor), 0)
full_hp_param = torch.nn.functional.pad(full_hp_param,
(0,
0,
0,
padding_size),
"constant",
0)
full_hp_param[:-padding_size, :] = vocab_divisibility_padding_tensor
else:
# Need to shrink or keep the same
full_hp_param = full_hp_param[:padded_target_vocab_size, :]
full_param_numel = full_hp_param.numel()
tp_slice_numel = self.numel()
# if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder:
# print_rank_0(f'{full_hp_param[:10]=}', force=True)
assert full_param_numel == tp_world_size * tp_slice_numel, \
f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}'
dst_tensor = hp_mapping.hp_fragment if key == FP32_WEIGHT_KEY else hp_mapping.get_optim_state_fragment(
key)
# print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}")
# print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}")
# since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse
chunk_dim = ckpt_dict.get(CAT_DIM, 0)
# this performs the opposite of cat when merging TP slices
tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank]
tp_hp_slice = tp_hp_slice.flatten()
lp_frag_address = hp_mapping.lp_fragment_address
tp_hp_fragment = tp_hp_slice.narrow(0,
lp_frag_address.start,
lp_frag_address.numel)
assert dst_tensor.numel() == lp_frag_address.numel, \
f'Load checkpoint {key} dst_tensor numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}'
# print(f"{key} SHAPE: {tp_hp_slice.shape=}")
# print(f"{key} SHAPE: {dst_tensor.shape=}")
# print(f"{key} SHAPE: {tp_hp_fragment.shape=}")
dst_tensor.data.copy_(tp_hp_fragment.data)
def enable_universal_checkpoint(param_list):
for param in param_list:
param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state,
param) | null |
10,204 | from .reshape_utils import (get_files,
get_files_with_prefix,
partition_data,
get_zero_files)
from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
from .reshape_meg_2d import (reshape_meg_2d_parallel, meg_2d_parallel_map)
class model_3d_desc(object):
def __init__(self, pp_degree=1, tp_degree=1, dp_degree=1):
self.pp_degree = pp_degree
self.tp_degree = tp_degree
self.dp_degree = dp_degree
def reshape(self, target_3d_desc, verbose=False):
valid_reshape, reshape_errors = self.can_reshape(target_3d_desc)
assert valid_reshape, ','.join(reshape_errors)
tgt_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.pp_degree,
old_tp_degree=self.tp_degree,
new_pp_degree=target_3d_desc.pp_degree,
new_tp_degree=target_3d_desc.tp_degree,
verbose=verbose)
flat_3d_map = flatten_dp_dimension(meg_2d_map=tgt_2d_map,
src_2d_size=self.pp_degree * self.tp_degree,
dp_degree=self.dp_degree)
return unflatten_dp_dimension(meg_2d_map=flat_3d_map,
dp_degree=target_3d_desc.dp_degree)
def get_desc(self):
return f'{PP_DIM},{TP_DIM},{DP_DIM} = ({self.pp_degree}, {self.tp_degree}, {self.dp_degree})'
def world_size(self):
return self.pp_degree * self.tp_degree * self.dp_degree
def is_valid(self, pp_index, tp_index, dp_index):
err_msg = []
valid = True
for index, degree, dim_name in [
(pp_index, self.pp_degree, PP_DIM),
(tp_index, self.tp_degree, TP_DIM),
(dp_index, self.dp_degree, DP_DIM)]:
if index >= degree:
valid = False
err_msg.append(
f'{dim_name} indexing error: index {index} >= degree {degree}')
return valid, err_msg
def can_reshape(self, target_3d_desc):
err_msg = []
if target_3d_desc.pp_degree > self.pp_degree:
err_msg.append(
f'Expansion reshape not supported - {PP_DIM}: {self.pp_degree} ---> {target_3d_desc.pp_degree}'
)
if target_3d_desc.tp_degree > self.tp_degree:
err_msg.append(
f'Expansion reshape not supported - {TP_DIM}: {self.tp_degree} ---> {target_3d_desc.tp_degree}'
)
if target_3d_desc.dp_degree > self.dp_degree:
err_msg.append(
f'Expansion reshape not supported - {DP_DIM}: {self.dp_degree} ---> {target_3d_desc.dp_degree}'
)
return len(err_msg) == 0, err_msg
def get_files_with_prefix(all_files, prefix):
file_list = []
for file_path in all_files:
_, fname = os.path.split(file_path)
if fname.startswith(prefix):
file_list.append(file_path)
return sorted(file_list)
def get_files(dir):
file_list = []
for root, _, files in os.walk(dir):
for file in files:
file_list.append(os.path.join(root, file))
return file_list
def get_zero_files(dir):
file_list = get_files(dir)
for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]:
zero_files = get_files_with_prefix(file_list, prefix)
if len(zero_files) > 0:
return zero_files
return []
MODEL_FILE_PREFIX = 'mp_rank_'
LAYER_FILE_PREFIX = 'layer_'
def get_model_3d_descriptor(dir):
file_list = get_files(dir)
zero_file_list = get_zero_files(dir)
num_pp0_files = len(get_files_with_prefix(file_list, f'{LAYER_FILE_PREFIX}01'))
if num_pp0_files > 0:
tp_degree = num_pp0_files
pp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) // tp_degree
dp_degree = max(1, len(zero_file_list) // (pp_degree * tp_degree))
else:
tp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX))
dp_degree = max(1, len(zero_file_list) // tp_degree)
pp_degree = 0
return model_3d_desc(pp_degree, tp_degree, dp_degree) | null |
10,205 | from .reshape_utils import (get_files,
get_files_with_prefix,
partition_data,
get_zero_files)
from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
from .reshape_meg_2d import (reshape_meg_2d_parallel, meg_2d_parallel_map)
class meg_2d_parallel_map(object):
def __init__(self, pp_degree, tp_degree):
def simple_init(self):
def add_data(self, pp_index, tp_index, data):
def get_data(self, pp_index=None, tp_index=None):
def print_data(self, tag):
def _validate_indices(self, pp_index, tp_index):
def _make_key(self, i, j):
def flatten_dp_dimension(meg_2d_map, src_2d_size, dp_degree):
new_meg_2d_map = meg_2d_parallel_map(meg_2d_map.pp_degree, meg_2d_map.tp_degree)
for pp_index in range(meg_2d_map.pp_degree):
for tp_index in range(meg_2d_map.tp_degree):
dp0_indices = meg_2d_map.get_data(pp_index, tp_index)
for idx in dp0_indices:
dpX_indices = [idx + (i * src_2d_size) for i in range(dp_degree)]
new_meg_2d_map.add_data(pp_index, tp_index, dpX_indices)
return new_meg_2d_map | null |
10,206 | from .reshape_utils import (get_files,
get_files_with_prefix,
partition_data,
get_zero_files)
from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
from .reshape_meg_2d import (reshape_meg_2d_parallel, meg_2d_parallel_map)
def partition_data(data_list, num_partitions):
num_elems = len(data_list)
assert num_elems % num_partitions == 0
partition_size = num_elems // num_partitions
partitions_list = [
data_list[i:i + partition_size] for i in range(0,
num_elems,
partition_size)
]
return partitions_list
class meg_2d_parallel_map(object):
def __init__(self, pp_degree, tp_degree):
self.pp_degree = pp_degree
self.tp_degree = tp_degree
self.map = {}
def simple_init(self):
self.map = {
self._make_key(i // self.tp_degree,
i % self.tp_degree): [i]
for i in range(self.pp_degree * self.tp_degree)
}
def add_data(self, pp_index, tp_index, data):
self._validate_indices(pp_index, tp_index)
assert type(data) is list
key = self._make_key(pp_index, tp_index)
if not key in self.map.keys():
self.map[key] = []
self.map[key] += data
def get_data(self, pp_index=None, tp_index=None):
self._validate_indices(pp_index, tp_index)
pp_indices = list(range(self.pp_degree)) if pp_index is None else [pp_index]
tp_indices = list(range(self.tp_degree)) if tp_index is None else [tp_index]
result = []
for i in pp_indices:
for j in tp_indices:
result += self.map[self._make_key(i, j)]
return result
def print_data(self, tag):
print(f'{tag}')
for key, value in self.map.items():
print(f'{key} = {value}')
def _validate_indices(self, pp_index, tp_index):
assert pp_index is None or pp_index < self.pp_degree
assert tp_index is None or tp_index < self.tp_degree
def _make_key(self, i, j):
return f'{i},{j}'
def unflatten_dp_dimension(meg_2d_map, dp_degree):
pp_degree = meg_2d_map.pp_degree
tp_degree = meg_2d_map.tp_degree
meg_2d_map_list = [
meg_2d_parallel_map(pp_degree=pp_degree,
tp_degree=tp_degree) for _ in range(dp_degree)
]
for pp_index in range(pp_degree):
for tp_index in range(tp_degree):
flat_dp_indices = meg_2d_map.get_data(pp_index, tp_index)
partitioned_dp_indices = partition_data(flat_dp_indices, dp_degree)
for dp_indices, _2d_map in zip(partitioned_dp_indices, meg_2d_map_list):
_2d_map.add_data(pp_index, tp_index, dp_indices)
return meg_2d_map_list | null |
10,207 | from collections import OrderedDict
import torch
import sys
import os
from deepspeed import comm as dist
from deepspeed.runtime.constants import PIPE_REPLICATED
from deepspeed.ops.op_builder import UtilsBuilder
from deepspeed.runtime import ZeROOptimizer
from packaging import version as pkg_version
from deepspeed.git_version_info import version
from deepspeed.runtime.utils import (get_global_norm_of_tensors,
clip_tensors_by_global_norm,
DummyOptim,
align_dense_tensors,
all_gather_dp_groups,
bwc_tensor_model_parallel_rank,
is_model_parallel_parameter,
see_memory_usage)
from deepspeed.utils import link_hp_params, fragment_address
from deepspeed.checkpoint import enable_universal_checkpoint
from deepspeed.checkpoint.constants import (DS_VERSION,
PARTITION_COUNT,
BASE_OPTIMIZER_STATE,
SINGLE_PARTITION_OF_FP32_GROUPS,
CLIP_GRAD,
GROUP_PADDINGS,
PARAM_SLICE_MAPPINGS)
def _get_padded_tensor(src_tensor, size):
if src_tensor.numel() >= size:
return src_tensor
padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device)
slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel())
slice_tensor.data.copy_(src_tensor.data)
return padded_tensor | null |
10,208 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def noop_decorator(func):
return func | null |
10,209 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
The provided code snippet includes necessary dependencies for implementing the `ensure_directory_exists` function. Write a Python function `def ensure_directory_exists(filename)` to solve the following problem:
Create the directory path to ``filename`` if it does not already exist. Args: filename (str): A file path.
Here is the function:
def ensure_directory_exists(filename):
"""Create the directory path to ``filename`` if it does not already exist.
Args:
filename (str): A file path.
"""
dirname = os.path.dirname(filename)
os.makedirs(dirname, exist_ok=True) | Create the directory path to ``filename`` if it does not already exist. Args: filename (str): A file path. |
10,210 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
The provided code snippet includes necessary dependencies for implementing the `set_random_seed` function. Write a Python function `def set_random_seed(seed)` to solve the following problem:
Set the random seed for common PRNGs used during training: random, numpy, and torch. Args: seed (int): the seed to use
Here is the function:
def set_random_seed(seed):
"""Set the random seed for common PRNGs used during training: random, numpy, and torch.
Args:
seed (int): the seed to use
"""
import numpy
import random
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed) | Set the random seed for common PRNGs used during training: random, numpy, and torch. Args: seed (int): the seed to use |
10,211 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
The provided code snippet includes necessary dependencies for implementing the `copy_to_device` function. Write a Python function `def copy_to_device(item, device, criterion_func)` to solve the following problem:
Return a copy of tensor on specified device. Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. Parameters: item: tensor to copy or (possibly nested) container of tensors to copy. device: target device criterion_func: Function to restrict copy operation to items meet criterion Returns: None
Here is the function:
def copy_to_device(item, device, criterion_func):
"""
Return a copy of tensor on specified device.
Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
Parameters:
item: tensor to copy or (possibly nested) container of tensors to copy.
device: target device
criterion_func: Function to restrict copy operation to items meet criterion
Returns:
None
"""
if criterion_func(item):
return item.to(device)
elif isinstance(item, list):
return [copy_to_device(v, device, criterion_func) for v in item]
elif isinstance(item, tuple):
return tuple([copy_to_device(v, device, criterion_func) for v in item])
elif isinstance(item, dict):
return {k: copy_to_device(v, device, criterion_func) for k, v in item.items()}
else:
return item | Return a copy of tensor on specified device. Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. Parameters: item: tensor to copy or (possibly nested) container of tensors to copy. device: target device criterion_func: Function to restrict copy operation to items meet criterion Returns: None |
10,212 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
The provided code snippet includes necessary dependencies for implementing the `move_to_device` function. Write a Python function `def move_to_device(item, device, criterion_func)` to solve the following problem:
Move tensor on to specified device by changing the storage. Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. Parameters: item: tensor to move or (possibly nested) container of tensors to move. device: target device criterion_func: Function to restrict move operation to items meet criterion Returns: None
Here is the function:
def move_to_device(item, device, criterion_func):
"""
Move tensor on to specified device by changing the storage.
Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
Parameters:
item: tensor to move or (possibly nested) container of tensors to move.
device: target device
criterion_func: Function to restrict move operation to items meet criterion
Returns:
None
"""
if criterion_func(item):
device_copy = item.to(device)
item.data = device_copy.data
return item
elif isinstance(item, list):
return [move_to_device(v, device, criterion_func) for v in item]
elif isinstance(item, tuple):
return tuple([move_to_device(v, device, criterion_func) for v in item])
elif isinstance(item, dict):
return {k: move_to_device(v, device, criterion_func) for k, v in item.items()}
else:
return item | Move tensor on to specified device by changing the storage. Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. Parameters: item: tensor to move or (possibly nested) container of tensors to move. device: target device criterion_func: Function to restrict move operation to items meet criterion Returns: None |
10,213 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def _handle_overflow(cpu_sum, x, i):
import math
rank = dist.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
logger.info(
f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}"
) | null |
10,214 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
The provided code snippet includes necessary dependencies for implementing the `get_global_norm` function. Write a Python function `def get_global_norm(norm_list)` to solve the following problem:
Compute total from a list of norms
Here is the function:
def get_global_norm(norm_list):
""" Compute total from a list of norms
"""
total_norm = 0.0
for norm in norm_list:
total_norm += norm**2.0
return sqrt(total_norm) | Compute total from a list of norms |
10,215 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def is_model_parallel_parameter(p) -> bool:
if hasattr(p, 'model_parallel') and p.model_parallel:
return True
if hasattr(p, 'tensor_model_parallel') and p.tensor_model_parallel:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `clip_grad_norm_` function. Write a Python function `def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None)` to solve the following problem:
Clips gradient norm of an iterable of parameters. This has been adapted from Nvidia megatron. We add norm averaging to consider MoE params when calculating norm as they will result in different norms across different ranks. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector).
Here is the function:
def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None):
"""Clips gradient norm of an iterable of parameters.
This has been adapted from Nvidia megatron. We add norm averaging
to consider MoE params when calculating norm as they will result
in different norms across different ranks.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda,
op=dist.ReduceOp.MAX,
group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0
for p in parameters:
if mpu is not None:
if (mpu.get_model_parallel_rank()
== 0) or is_model_parallel_parameter(p):
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item()**norm_type
else:
param_norm = p.grad.data.float().norm(norm_type)
total_norm += param_norm.item()**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda,
op=dist.ReduceOp.SUM,
group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
# Need to average total_norm across different GPUs due to the presence of moe params
pg = groups._get_data_parallel_group()
scaled_norm = total_norm * 1.0 / float(dist.get_world_size(group=pg))
scaled_norm_tensor = torch.cuda.FloatTensor([float(scaled_norm)])
dist.all_reduce(scaled_norm_tensor, group=pg)
total_norm = scaled_norm_tensor.item()
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm | Clips gradient norm of an iterable of parameters. This has been adapted from Nvidia megatron. We add norm averaging to consider MoE params when calculating norm as they will result in different norms across different ranks. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). |
10,216 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
if hasattr(torch.cuda, "memory_reserved"):
torch_memory_reserved = torch.cuda.memory_reserved
else:
torch_memory_reserved = torch.cuda.memory_allocated
if hasattr(torch.cuda, "max_memory_reserved"):
torch_max_memory_reserved = torch.cuda.max_memory_reserved
else:
torch_max_memory_reserved = torch.cuda.memory_cached
def is_model_parallel_parameter(p) -> bool:
if hasattr(p, 'model_parallel') and p.model_parallel:
return True
if hasattr(p, 'tensor_model_parallel') and p.tensor_model_parallel:
return True
return False
def bwc_tensor_model_parallel_rank(mpu=None):
"""Backwards-compatible way of querying the tensor model parallel rank from
an ``mpu`` object.
*Tensor* model parallelism means that tensors are physically split across
processes. This contrasts with *pipeline* model parallelism, in which the
layers are partitioned but tensors left intact.
The API for tensor model parallelism has changed across versions and this
helper provides a best-effort implementation across versions of ``mpu``
objects. The preferred mechanism is
``mpu.get_tensor_model_parallel_rank()``.
This should "just work" with both Megatron-LM and DeepSpeed's pipeline
parallelism.
Args:
mpu (model parallel unit, optional): The tensor model parallel rank.
If ``mpu=None``, returns 0. Defaults to ``None``.
Returns:
int: the rank
"""
if mpu is None:
# No model parallelism in easy :)
return 0
if hasattr(mpu, 'get_tensor_model_parallel_rank'):
# New Megatron and DeepSpeed convention (post pipeline-parallelism release)
return mpu.get_tensor_model_parallel_rank()
elif hasattr(mpu, 'get_slice_parallel_rank'):
# Some DeepSpeed + pipeline parallelism versions
return mpu.get_slice_parallel_rank()
else:
# Deprecated Megatron and DeepSpeed convention
return mpu.get_model_parallel_rank()
PIPE_REPLICATED = 'ds_pipe_replicated'
The provided code snippet includes necessary dependencies for implementing the `get_grad_zeros` function. Write a Python function `def get_grad_zeros(parameters, mpu=None)` to solve the following problem:
Compute the number of grads with zero values. This is adapted from get_grad_norm Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized Returns: Total number of params with zero values (viewed as a single vector).
Here is the function:
def get_grad_zeros(parameters, mpu=None):
"""Compute the number of grads with zero values.
This is adapted from get_grad_norm
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
Returns:
Total number of params with zero values (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
total_zeros = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
count_zeros = p.grad.numel() - torch.count_nonzero(p.grad)
total_zeros += count_zeros.item()
# Sum across all model parallel GPUs.
total_zeros_cuda = torch.cuda.FloatTensor([float(total_zeros)])
if mpu is not None:
dist.all_reduce(total_zeros_cuda,
op=dist.ReduceOp.SUM,
group=mpu.get_model_parallel_group())
total_zeros = total_zeros_cuda[0].item()
return total_zeros | Compute the number of grads with zero values. This is adapted from get_grad_norm Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized Returns: Total number of params with zero values (viewed as a single vector). |
10,217 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
if hasattr(torch.cuda, "memory_reserved"):
torch_memory_reserved = torch.cuda.memory_reserved
else:
torch_memory_reserved = torch.cuda.memory_allocated
if hasattr(torch.cuda, "max_memory_reserved"):
torch_max_memory_reserved = torch.cuda.max_memory_reserved
else:
torch_max_memory_reserved = torch.cuda.memory_cached
def is_model_parallel_parameter(p) -> bool:
if hasattr(p, 'model_parallel') and p.model_parallel:
return True
if hasattr(p, 'tensor_model_parallel') and p.tensor_model_parallel:
return True
return False
def bwc_tensor_model_parallel_rank(mpu=None):
"""Backwards-compatible way of querying the tensor model parallel rank from
an ``mpu`` object.
*Tensor* model parallelism means that tensors are physically split across
processes. This contrasts with *pipeline* model parallelism, in which the
layers are partitioned but tensors left intact.
The API for tensor model parallelism has changed across versions and this
helper provides a best-effort implementation across versions of ``mpu``
objects. The preferred mechanism is
``mpu.get_tensor_model_parallel_rank()``.
This should "just work" with both Megatron-LM and DeepSpeed's pipeline
parallelism.
Args:
mpu (model parallel unit, optional): The tensor model parallel rank.
If ``mpu=None``, returns 0. Defaults to ``None``.
Returns:
int: the rank
"""
if mpu is None:
# No model parallelism in easy :)
return 0
if hasattr(mpu, 'get_tensor_model_parallel_rank'):
# New Megatron and DeepSpeed convention (post pipeline-parallelism release)
return mpu.get_tensor_model_parallel_rank()
elif hasattr(mpu, 'get_slice_parallel_rank'):
# Some DeepSpeed + pipeline parallelism versions
return mpu.get_slice_parallel_rank()
else:
# Deprecated Megatron and DeepSpeed convention
return mpu.get_model_parallel_rank()
PIPE_REPLICATED = 'ds_pipe_replicated'
The provided code snippet includes necessary dependencies for implementing the `get_weight_norm` function. Write a Python function `def get_weight_norm(parameters, norm_type=2, mpu=None)` to solve the following problem:
Get norm of an iterable of parameters. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Taken from Nvidia Megatron. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector).
Here is the function:
def get_weight_norm(parameters, norm_type=2, mpu=None):
"""Get norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place. Taken from Nvidia Megatron.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.data.abs().max() for p in parameters)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda,
op=dist.ReduceOp.MAX,
group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
param_norm = p.data.float().norm(norm_type)
total_norm += param_norm**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda,
op=dist.ReduceOp.SUM,
group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float(
'inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm | Get norm of an iterable of parameters. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Taken from Nvidia Megatron. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). |
10,218 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def prefix_sum_inc(weights):
def partition_uniform(num_items, num_parts):
def _lprobe(weights, num_parts, bottleneck):
def _rb_partition_balanced(weights, num_parts, eps):
def partition_balanced(weights, num_parts, eps=1e-3):
num_items = len(weights)
# First check for the trivial edge case
if num_items <= num_parts:
return partition_uniform(num_items, num_parts)
weights_ = prefix_sum_inc(weights)
# Find the smallest bottleneck (weight of heaviest partition)
bottleneck = _rb_partition_balanced(weights_, num_parts, eps=eps)
# Now compute that partitioning
parts, success = _lprobe(weights_, num_parts, bottleneck)
assert success
return parts | null |
10,219 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
mem_alloced = 0
mem_cached = 0
def memory_status(msg, print_rank=-1, reset_max=False):
global mem_alloced, mem_cached
rank = dist.get_rank()
if print_rank != -1 and rank != print_rank:
return
torch.cuda.synchronize()
if reset_max:
torch.cuda.reset_max_memory_cached()
torch.cuda.reset_max_memory_allocated()
new_alloced = torch.cuda.memory_allocated()
new_cached = torch.cuda.memory_cached()
delta_alloced = new_alloced - mem_alloced
delta_cached = new_cached - mem_cached
mem_cached = new_cached
mem_alloced = new_alloced
max_alloced = torch.cuda.max_memory_allocated()
max_cached = torch.cuda.max_memory_cached()
# convert to GB for printing
new_alloced /= 1024**3
new_cached /= 1024**3
delta_alloced /= 1024**3
delta_cached /= 1024**3
max_alloced /= 1024**3
max_cached /= 1024**3
print(
f'RANK={rank} MEMSTATS',
msg,
f'device={torch.cuda.current_device()} '
f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '
f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)'
) | null |
10,220 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def get_ma_status():
if dist.is_initialized() and not dist.get_rank() == 0:
return 0
return torch.cuda.memory_allocated() | null |
10,221 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
if hasattr(torch.cuda, "memory_reserved"):
torch_memory_reserved = torch.cuda.memory_reserved
else:
torch_memory_reserved = torch.cuda.memory_allocated
if hasattr(torch.cuda, "max_memory_reserved"):
torch_max_memory_reserved = torch.cuda.max_memory_reserved
else:
torch_max_memory_reserved = torch.cuda.memory_cached
def see_memory_usage(message, force=False):
if not force:
return
if dist.is_initialized() and not dist.get_rank() == 0:
return
# python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports
gc.collect()
# Print message except when distributed but not rank 0
logger.info(message)
logger.info(
f"MA {round(torch.cuda.memory_allocated() / (1024 * 1024 * 1024),2 )} GB \
Max_MA {round(torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \
CA {round(torch_memory_reserved() / (1024 * 1024 * 1024),2)} GB \
Max_CA {round(torch_max_memory_reserved() / (1024 * 1024 * 1024))} GB ")
vm_stats = psutil.virtual_memory()
used_GB = round(((vm_stats.total - vm_stats.available) / (1024**3)), 2)
logger.info(
f'CPU Virtual Memory: used = {used_GB} GB, percent = {vm_stats.percent}%')
# get the peak memory to report correct data, so reset the counter for the next call
if hasattr(torch.cuda, "reset_peak_memory_stats"): # pytorch 1.4+
torch.cuda.reset_peak_memory_stats() | null |
10,222 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
The provided code snippet includes necessary dependencies for implementing the `call_to_str` function. Write a Python function `def call_to_str(base, *args, **kwargs)` to solve the following problem:
Construct a string representation of a call. Args: base (str): name of the call args (tuple, optional): args to ``base`` kwargs (dict, optional): kwargs supplied to ``base`` Returns: str: A string representation of base(*args, **kwargs)
Here is the function:
def call_to_str(base, *args, **kwargs):
"""Construct a string representation of a call.
Args:
base (str): name of the call
args (tuple, optional): args to ``base``
kwargs (dict, optional): kwargs supplied to ``base``
Returns:
str: A string representation of base(*args, **kwargs)
"""
name = f'{base}('
if args:
name += ', '.join(repr(arg) for arg in args)
if kwargs:
name += ', '
if kwargs:
name += ', '.join(f'{key}={repr(arg)}' for key, arg in kwargs.items())
name += ')'
return name | Construct a string representation of a call. Args: base (str): name of the call args (tuple, optional): args to ``base`` kwargs (dict, optional): kwargs supplied to ``base`` Returns: str: A string representation of base(*args, **kwargs) |
10,223 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def get_only_unique_item(items):
item_set = set(items)
if len(item_set) != 1:
raise RuntimeError(f"expected there to be only one unique element in {items}")
unique_item, = item_set
return unique_item | null |
10,224 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def get_grad_norm(parameters, norm_type=2, mpu=None):
"""Get grad norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place. Taken from Nvidia Megatron.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda,
op=dist.ReduceOp.MAX,
group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
param_norm = p.grad.data.float().norm(norm_type)
total_norm += param_norm.item()**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda,
op=dist.ReduceOp.SUM,
group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float(
'inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
The provided code snippet includes necessary dependencies for implementing the `clip_gradients` function. Write a Python function `def clip_gradients(parameters, max_norm=1.0, global_grad_norm=None, mpu=None, eps=1e-6)` to solve the following problem:
Clip the gradient of a list of parameters. Args: parameters: List of parameters whose .grad will be clipped. global_grad_norm (float, optional): Precomputed gradient norm. Defaults to None. mpu (optional): model parallelism unit. Defaults to None. eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6 Returns: float: the global gradient norm
Here is the function:
def clip_gradients(parameters, max_norm=1.0, global_grad_norm=None, mpu=None, eps=1e-6):
"""Clip the gradient of a list of parameters.
Args:
parameters: List of parameters whose .grad will be clipped.
global_grad_norm (float, optional): Precomputed gradient norm. Defaults to None.
mpu (optional): model parallelism unit. Defaults to None.
eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6
Returns:
float: the global gradient norm
"""
if global_grad_norm is None:
global_grad_norm = get_grad_norm(parameters, mpu=mpu)
clip_coef = max_norm / (global_grad_norm + eps)
if clip_coef < 1:
for p in parameters:
p.grad.detach().mul_(clip_coef)
return global_grad_norm | Clip the gradient of a list of parameters. Args: parameters: List of parameters whose .grad will be clipped. global_grad_norm (float, optional): Precomputed gradient norm. Defaults to None. mpu (optional): model parallelism unit. Defaults to None. eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6 Returns: float: the global gradient norm |
10,225 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None):
"""Get norm of an iterable of tensors.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Taken from Nvidia Megatron.
Arguments:
input_tensors (Iterable[Tensor]): an iterable of Tensors will have norm computed
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the tensors (viewed as a single vector).
"""
assert isinstance(input_tensors, Iterable), f'expected Iterable type not {type(input_tensors)}'
assert all([torch.is_tensor(t) for t in input_tensors]), f'expected list of only tensors'
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(t.data.abs().max() for t in input_tensors)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda,
op=dist.ReduceOp.MAX,
group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = sum(
[t.data.float().norm(norm_type).item()**norm_type for t in input_tensors])
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda,
op=dist.ReduceOp.SUM,
group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float(
'inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
The provided code snippet includes necessary dependencies for implementing the `clip_tensors_by_global_norm` function. Write a Python function `def clip_tensors_by_global_norm(input_tensors, max_norm=1.0, global_norm=None, mpu=None, eps=1e-6)` to solve the following problem:
Clip list of tensors by global norm. Args: input_tensors: List of tensors to be clipped global_norm (float, optional): Precomputed norm. Defaults to None. mpu (optional): model parallelism unit. Defaults to None. eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6 Returns: float: the global norm
Here is the function:
def clip_tensors_by_global_norm(input_tensors,
max_norm=1.0,
global_norm=None,
mpu=None,
eps=1e-6):
"""Clip list of tensors by global norm.
Args:
input_tensors: List of tensors to be clipped
global_norm (float, optional): Precomputed norm. Defaults to None.
mpu (optional): model parallelism unit. Defaults to None.
eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6
Returns:
float: the global norm
"""
if global_norm is None:
global_norm = get_global_norm_of_tensors(input_tensors, mpu=mpu)
clip_coef = max_norm / (global_norm + eps)
if clip_coef < 1:
for t in input_tensors:
t.detach().mul_(clip_coef)
return global_norm | Clip list of tensors by global norm. Args: input_tensors: List of tensors to be clipped global_norm (float, optional): Precomputed norm. Defaults to None. mpu (optional): model parallelism unit. Defaults to None. eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6 Returns: float: the global norm |
10,226 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def align_dense_tensors(tensor_list, alignment):
num_elements = sum(t.numel() for t in tensor_list)
remaining = num_elements % alignment
if remaining:
elements_to_add = alignment - remaining
pad_tensor = torch.zeros(elements_to_add,
device=tensor_list[0].device,
dtype=tensor_list[0].dtype)
padded_tensor_list = tensor_list + [pad_tensor]
else:
padded_tensor_list = tensor_list
return padded_tensor_list | null |
10,227 | from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from torch._six import inf
from deepspeed import comm as dist
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
def all_gather_dp_groups(partitioned_param_groups,
dp_process_group,
start_alignment_factor,
allgather_bucket_size):
for group_id, partitioned_params in enumerate(partitioned_param_groups):
# Sequential AllGather Best of both worlds
partition_id = dist.get_rank(group=dp_process_group[group_id])
dp_world_size = dist.get_world_size(group=dp_process_group[group_id])
num_shards = max(
1,
partitioned_params[partition_id].numel() * dp_world_size //
allgather_bucket_size)
shard_size = partitioned_params[partition_id].numel() // num_shards
# Enforce nccl/rccl alignment of start location of each shard
shard_size = shard_size - (shard_size % start_alignment_factor)
num_elements = shard_size
assert shard_size * num_shards <= partitioned_params[partition_id].numel()
for shard_id in range(num_shards):
if shard_id == (num_shards - 1):
num_elements = partitioned_params[partition_id].numel(
) - shard_id * shard_size
shard_list = []
for dp_id in range(dp_world_size):
curr_shard = partitioned_params[dp_id].narrow(0,
shard_id * shard_size,
num_elements).detach()
shard_list.append(curr_shard)
dist.all_gather(shard_list,
shard_list[partition_id],
dp_process_group[group_id]) | null |
10,228 | import argparse
from torch.optim import Optimizer
import math
from deepspeed.utils import logger
def add_tuning_arguments(parser):
group = parser.add_argument_group('Convergence Tuning',
'Convergence tuning configurations')
# LR scheduler
group.add_argument('--lr_schedule',
type=str,
default=None,
help='LR schedule for training.')
# Learning rate range test
group.add_argument("--lr_range_test_min_lr",
type=float,
default=0.001,
help='Starting lr value.')
group.add_argument("--lr_range_test_step_rate",
type=float,
default=1.0,
help='scaling rate for LR range test.')
group.add_argument("--lr_range_test_step_size",
type=int,
default=1000,
help='training steps per LR change.')
group.add_argument("--lr_range_test_staircase",
type=bool,
default=False,
help='use staircase scaling for LR range test.')
# OneCycle schedule
group.add_argument("--cycle_first_step_size",
type=int,
default=1000,
help='size of first step of 1Cycle schedule (training steps).')
group.add_argument("--cycle_first_stair_count",
type=int,
default=-1,
help='first stair count for 1Cycle schedule.')
group.add_argument(
"--cycle_second_step_size",
type=int,
default=-1,
help='size of second step of 1Cycle schedule (default first_step_size).')
group.add_argument("--cycle_second_stair_count",
type=int,
default=-1,
help='second stair count for 1Cycle schedule.')
group.add_argument(
"--decay_step_size",
type=int,
default=1000,
help='size of intervals for applying post cycle decay (training steps).')
# 1Cycle LR
group.add_argument("--cycle_min_lr",
type=float,
default=0.01,
help='1Cycle LR lower bound.')
group.add_argument("--cycle_max_lr",
type=float,
default=0.1,
help='1Cycle LR upper bound.')
group.add_argument("--decay_lr_rate",
type=float,
default=0.0,
help='post cycle LR decay rate.')
# 1Cycle Momentum
group.add_argument('--cycle_momentum',
default=False,
action='store_true',
help='Enable 1Cycle momentum schedule.')
group.add_argument("--cycle_min_mom",
type=float,
default=0.8,
help='1Cycle momentum lower bound.')
group.add_argument("--cycle_max_mom",
type=float,
default=0.9,
help='1Cycle momentum upper bound.')
group.add_argument("--decay_mom_rate",
type=float,
default=0.0,
help='post cycle momentum decay rate.')
# Warmup LR
group.add_argument('--warmup_min_lr',
type=float,
default=0,
help='WarmupLR minimum/initial LR value')
group.add_argument('--warmup_max_lr',
type=float,
default=0.001,
help='WarmupLR maximum LR value.')
group.add_argument('--warmup_num_steps',
type=int,
default=1000,
help='WarmupLR step count for LR warmup.')
group.add_argument('--warmup_type',
type=str,
default=WARMUP_LOG_RATE,
help='WarmupLR increasing function during warmup')
return parser
def parse_arguments():
parser = argparse.ArgumentParser()
parser = add_tuning_arguments(parser)
lr_sched_args, unknown_args = parser.parse_known_args()
return lr_sched_args, unknown_args | null |
10,229 | import argparse
from torch.optim import Optimizer
import math
from deepspeed.utils import logger
def override_lr_range_test_params(args, params):
if hasattr(args, LR_RANGE_TEST_MIN_LR) and args.lr_range_test_min_lr is not None:
params[LR_RANGE_TEST_MIN_LR] = args.lr_range_test_min_lr
if hasattr(args,
LR_RANGE_TEST_STEP_RATE) and args.lr_range_test_step_rate is not None:
params[LR_RANGE_TEST_STEP_RATE] = args.lr_range_test_step_rate
if hasattr(args,
LR_RANGE_TEST_STEP_SIZE) and args.lr_range_test_step_size is not None:
params[LR_RANGE_TEST_STEP_SIZE] = args.lr_range_test_step_size
if hasattr(args,
LR_RANGE_TEST_STAIRCASE) and args.lr_range_test_staircase is not None:
params[LR_RANGE_TEST_STAIRCASE] = args.lr_range_test_staircase
def override_1cycle_params(args, params):
if hasattr(args, CYCLE_FIRST_STEP_SIZE) and args.cycle_first_step_size is not None:
params[CYCLE_FIRST_STEP_SIZE] = args.cycle_first_step_size
if hasattr(args,
CYCLE_FIRST_STAIR_COUNT) and args.cycle_first_stair_count is not None:
params[CYCLE_FIRST_STAIR_COUNT] = args.cycle_first_stair_count
if hasattr(args, CYCLE_SECOND_STEP_SIZE) and args.cycle_second_step_size is not None:
params[CYCLE_SECOND_STEP_SIZE] = args.cycle_second_step_size
if hasattr(args,
CYCLE_SECOND_STAIR_COUNT) and args.cycle_second_stair_count is not None:
params[CYCLE_SECOND_STAIR_COUNT] = args.cycle_second_stair_count
if hasattr(args, DECAY_STEP_SIZE) and args.decay_step_size is not None:
params[DECAY_STEP_SIZE] = args.decay_step_size
# 1Cycle LR params
if hasattr(args, CYCLE_MIN_LR) and args.cycle_min_lr is not None:
params[CYCLE_MIN_LR] = args.cycle_min_lr
if hasattr(args, CYCLE_MAX_LR) and args.cycle_max_lr is not None:
params[CYCLE_MAX_LR] = args.cycle_max_lr
if hasattr(args, DECAY_LR_RATE) and args.decay_lr_rate is not None:
params[DECAY_LR_RATE] = args.decay_lr_rate
# 1Cycle MOM params
if hasattr(args, CYCLE_MIN_MOM) and args.cycle_min_mom is not None:
params[CYCLE_MIN_MOM] = args.cycle_min_mom
if hasattr(args, CYCLE_MAX_MOM) and args.cycle_max_mom is not None:
params[CYCLE_MAX_MOM] = args.cycle_max_mom
if hasattr(args, DECAY_MOM_RATE) and args.decay_mom_rate is not None:
params[DECAY_MOM_RATE] = args.decay_mom_rate
def override_warmupLR_params(args, params):
if hasattr(args, WARMUP_MIN_LR) and args.warmup_min_lr is not None:
params[WARMUP_MIN_LR] = args.warmup_min_lr
if hasattr(args, WARMUP_MAX_LR) and args.warmup_max_lr is not None:
params[WARMUP_MAX_LR] = args.warmup_max_lr
if hasattr(args, WARMUP_NUM_STEPS) and args.warmup_num_steps is not None:
params[WARMUP_NUM_STEPS] = args.warmup_num_steps
if hasattr(args, WARMUP_TYPE) and args.warmup_type is not None:
params[WARMUP_TYPE] = args.warmup_type
def override_params(args, params):
# LR range test params
override_lr_range_test_params(args, params)
# 1Cycle params
override_1cycle_params(args, params)
# WarmupLR params
override_warmupLR_params(args, params) | null |
10,230 | import argparse
from torch.optim import Optimizer
import math
from deepspeed.utils import logger
LR_SCHEDULE = 'lr_schedule'
LR_RANGE_TEST = 'LRRangeTest'
ONE_CYCLE = 'OneCycle'
VALID_LR_SCHEDULES = [LR_RANGE_TEST, ONE_CYCLE, WARMUP_LR, WARMUP_DECAY_LR]
def override_lr_range_test_params(args, params):
if hasattr(args, LR_RANGE_TEST_MIN_LR) and args.lr_range_test_min_lr is not None:
params[LR_RANGE_TEST_MIN_LR] = args.lr_range_test_min_lr
if hasattr(args,
LR_RANGE_TEST_STEP_RATE) and args.lr_range_test_step_rate is not None:
params[LR_RANGE_TEST_STEP_RATE] = args.lr_range_test_step_rate
if hasattr(args,
LR_RANGE_TEST_STEP_SIZE) and args.lr_range_test_step_size is not None:
params[LR_RANGE_TEST_STEP_SIZE] = args.lr_range_test_step_size
if hasattr(args,
LR_RANGE_TEST_STAIRCASE) and args.lr_range_test_staircase is not None:
params[LR_RANGE_TEST_STAIRCASE] = args.lr_range_test_staircase
def override_1cycle_params(args, params):
if hasattr(args, CYCLE_FIRST_STEP_SIZE) and args.cycle_first_step_size is not None:
params[CYCLE_FIRST_STEP_SIZE] = args.cycle_first_step_size
if hasattr(args,
CYCLE_FIRST_STAIR_COUNT) and args.cycle_first_stair_count is not None:
params[CYCLE_FIRST_STAIR_COUNT] = args.cycle_first_stair_count
if hasattr(args, CYCLE_SECOND_STEP_SIZE) and args.cycle_second_step_size is not None:
params[CYCLE_SECOND_STEP_SIZE] = args.cycle_second_step_size
if hasattr(args,
CYCLE_SECOND_STAIR_COUNT) and args.cycle_second_stair_count is not None:
params[CYCLE_SECOND_STAIR_COUNT] = args.cycle_second_stair_count
if hasattr(args, DECAY_STEP_SIZE) and args.decay_step_size is not None:
params[DECAY_STEP_SIZE] = args.decay_step_size
# 1Cycle LR params
if hasattr(args, CYCLE_MIN_LR) and args.cycle_min_lr is not None:
params[CYCLE_MIN_LR] = args.cycle_min_lr
if hasattr(args, CYCLE_MAX_LR) and args.cycle_max_lr is not None:
params[CYCLE_MAX_LR] = args.cycle_max_lr
if hasattr(args, DECAY_LR_RATE) and args.decay_lr_rate is not None:
params[DECAY_LR_RATE] = args.decay_lr_rate
# 1Cycle MOM params
if hasattr(args, CYCLE_MIN_MOM) and args.cycle_min_mom is not None:
params[CYCLE_MIN_MOM] = args.cycle_min_mom
if hasattr(args, CYCLE_MAX_MOM) and args.cycle_max_mom is not None:
params[CYCLE_MAX_MOM] = args.cycle_max_mom
if hasattr(args, DECAY_MOM_RATE) and args.decay_mom_rate is not None:
params[DECAY_MOM_RATE] = args.decay_mom_rate
def override_warmupLR_params(args, params):
if hasattr(args, WARMUP_MIN_LR) and args.warmup_min_lr is not None:
params[WARMUP_MIN_LR] = args.warmup_min_lr
if hasattr(args, WARMUP_MAX_LR) and args.warmup_max_lr is not None:
params[WARMUP_MAX_LR] = args.warmup_max_lr
if hasattr(args, WARMUP_NUM_STEPS) and args.warmup_num_steps is not None:
params[WARMUP_NUM_STEPS] = args.warmup_num_steps
if hasattr(args, WARMUP_TYPE) and args.warmup_type is not None:
params[WARMUP_TYPE] = args.warmup_type
def get_config_from_args(args):
if not hasattr(args, LR_SCHEDULE) or args.lr_schedule is None:
return None, '--{} not specified on command line'.format(LR_SCHEDULE)
if not args.lr_schedule in VALID_LR_SCHEDULES:
return None, '{} is not supported LR schedule'.format(args.lr_schedule)
config = {}
config['type'] = args.lr_schedule
config['params'] = {}
if args.lr_schedule == LR_RANGE_TEST:
override_lr_range_test_params(args, config['params'])
elif args.lr_schedule == ONE_CYCLE:
override_1cycle_params(args, config['params'])
else:
override_warmupLR_params(args, config['params'])
return config, None | null |
10,231 | import argparse
from torch.optim import Optimizer
import math
from deepspeed.utils import logger
LR_RANGE_TEST = 'LRRangeTest'
ONE_CYCLE = 'OneCycle'
VALID_LR_SCHEDULES = [LR_RANGE_TEST, ONE_CYCLE, WARMUP_LR, WARMUP_DECAY_LR]
LR_RANGE_TEST_MIN_LR = 'lr_range_test_min_lr'
CYCLE_MAX_LR = 'cycle_max_lr'
WARMUP_MAX_LR = 'warmup_max_lr'
def get_lr_from_config(config):
if not 'type' in config:
return None, 'LR schedule type not defined in config'
if not 'params' in config:
return None, 'LR schedule params not defined in config'
lr_schedule = config['type']
lr_params = config['params']
if not lr_schedule in VALID_LR_SCHEDULES:
return None, '{} is not a valid LR schedule'.format(lr_schedule)
if lr_schedule == LR_RANGE_TEST:
return lr_params[LR_RANGE_TEST_MIN_LR], ''
if lr_schedule == ONE_CYCLE:
return lr_params[CYCLE_MAX_LR], ''
# Warmup LR
return lr_params[WARMUP_MAX_LR], '' | null |
10,232 | import argparse
from torch.optim import Optimizer
import math
from deepspeed.utils import logger
def get_torch_optimizer(optimizer):
if isinstance(optimizer, Optimizer):
return optimizer
if hasattr(optimizer, 'optimizer') and isinstance(optimizer.optimizer, Optimizer):
return optimizer.optimizer
raise TypeError('{} is not a subclass of torch.optim.Optimizer'.format(
type(optimizer).__name__)) | null |
10,233 | import json
import collections
import collections.abc
from functools import reduce
from pydantic import BaseModel
from deepspeed.utils import logger
def get_list_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value) | null |
10,234 | import json
import collections
import collections.abc
from functools import reduce
from pydantic import BaseModel
from deepspeed.utils import logger
def get_dict_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value) | null |
10,235 | import math
from typing import List
import torch
from torch import Tensor
from deepspeed import comm as dist
from torch.distributed import ProcessGroup
import torch.nn.functional
from deepspeed.utils import instrument_w_nvtx
def _torch_reduce_scatter_fn(input_tensor: Tensor,
output_tensor: Tensor,
group=None,
async_op=False,
prof=False):
return instrument_w_nvtx(dist.reduce_scatter_fn)(output_tensor,
input_tensor,
group=group,
async_op=async_op)
The provided code snippet includes necessary dependencies for implementing the `reduce_scatter_coalesced` function. Write a Python function `def reduce_scatter_coalesced( tensors: List[Tensor], group: ProcessGroup = None, ) -> List[Tensor]` to solve the following problem:
simultaneously reduce-scatter a list of tensors - this can be done more efficiently than individual reduce scatter calls TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL
Here is the function:
def reduce_scatter_coalesced(
tensors: List[Tensor],
group: ProcessGroup = None,
) -> List[Tensor]:
"""simultaneously reduce-scatter a list of tensors - this can be done more
efficiently than individual reduce scatter calls
TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL
"""
this_rank = dist.get_rank(group)
world_sz = dist.get_world_size(group)
partition_lst_for_each_tensor = [None] * len(tensors)
for tensor_idx, tensor in enumerate(tensors):
flattened_tensor = tensor.view(-1)
chunk_sz = math.ceil(tensor.numel() / world_sz)
partition_lst_for_each_tensor[tensor_idx] = [
flattened_tensor[rank * chunk_sz:rank * chunk_sz + chunk_sz]
for rank in range(0,
world_sz)
]
padded_partition_sz_for_each_tensor = tuple(
math.ceil(t.numel() / world_sz) for t in tensors)
if len(tensors) == 1 and tensors[0].numel() % world_sz == 0:
# if there's only one tensor being reduced and we don't need to pad
# we have an opportunity to avoid a memory allocation
tensor_partition_flat_buffer = tensors[0].view(-1)
else:
# interleave tensor partitions such that the correct reduced partitions of each tensor
# end up at each rank
tensor_partitions_lst_with_padding = []
for rank in range(world_sz):
for tensor_idx in range(len(tensors)):
# add tensor content
tensor_chunk = partition_lst_for_each_tensor[tensor_idx][rank]
tensor_partitions_lst_with_padding.append(tensor_chunk)
# add padding if necessary
padding_sz = padded_partition_sz_for_each_tensor[
tensor_idx] - tensor_chunk.numel()
if padding_sz > 0:
tensor_partitions_lst_with_padding.append(
torch.empty(padding_sz,
dtype=tensor_chunk.dtype,
device=tensor_chunk.device))
tensor_partition_flat_buffer = instrument_w_nvtx(
torch.cat)(tensor_partitions_lst_with_padding)
tensor_partition_flat_buffer.div_(world_sz) # pre-divide
tensor_partition_buffer_for_each_rank: List[Tensor] = torch.chunk(
tensor_partition_flat_buffer,
world_sz)
# batched reduce-scatter call
_torch_reduce_scatter_fn(tensor_partition_flat_buffer,
tensor_partition_buffer_for_each_rank[this_rank],
group=group)
# reverse procedure of the interleaving done previously, done on the
# result of the batched reduce-scatter
output_lst: List[Tensor] = [None] * len(tensors)
offset = 0
for tensor_idx in range(len(tensors)):
output_lst[tensor_idx] = tensor_partition_buffer_for_each_rank[this_rank].narrow(
0,
offset,
partition_lst_for_each_tensor[tensor_idx][this_rank].numel())
offset += padded_partition_sz_for_each_tensor[tensor_idx]
return output_lst | simultaneously reduce-scatter a list of tensors - this can be done more efficiently than individual reduce scatter calls TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL |
10,236 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
CURRICULUM_LEARNING = "curriculum_learning"
CURRICULUM_ENABLED = "enabled"
CURRICULUM_ENABLED_DEFAULT = False
def get_scalar_param(param_dict, param_name, param_default_value):
def get_curriculum_enabled(param_dict):
if CURRICULUM_LEARNING in param_dict.keys():
return get_scalar_param(param_dict[CURRICULUM_LEARNING],
CURRICULUM_ENABLED,
CURRICULUM_ENABLED_DEFAULT)
else:
return False | null |
10,237 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
CURRICULUM_LEARNING = "curriculum_learning"
CURRICULUM_ENABLED = "enabled"
def get_curriculum_params(param_dict):
if CURRICULUM_LEARNING in param_dict.keys():
curriculum_params = copy.copy(param_dict[CURRICULUM_LEARNING])
curriculum_params.pop(CURRICULUM_ENABLED)
return curriculum_params
else:
return False | null |
10,238 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
PROGRESSIVE_LAYER_DROP = "progressive_layer_drop"
PLD_ENABLED = "enabled"
PLD_ENABLED_DEFAULT = False
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_pld_enabled(param_dict):
if PROGRESSIVE_LAYER_DROP in param_dict.keys():
return get_scalar_param(param_dict[PROGRESSIVE_LAYER_DROP],
PLD_ENABLED,
PLD_ENABLED_DEFAULT)
else:
return False | null |
10,239 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
PROGRESSIVE_LAYER_DROP = "progressive_layer_drop"
PLD_ENABLED = "enabled"
def get_pld_params(param_dict):
if PROGRESSIVE_LAYER_DROP in param_dict.keys():
pld_params = copy.copy(param_dict[PROGRESSIVE_LAYER_DROP])
pld_params.pop(PLD_ENABLED)
return pld_params
else:
return False | null |
10,240 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
AMP = "amp"
AMP_ENABLED = "enabled"
AMP_ENABLED_DEFAULT = False
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_amp_enabled(param_dict):
if AMP in param_dict.keys():
return get_scalar_param(param_dict[AMP], AMP_ENABLED, AMP_ENABLED_DEFAULT)
else:
return False | null |
10,241 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
AMP = "amp"
AMP_ENABLED = "enabled"
def get_amp_params(param_dict):
if AMP in param_dict.keys():
amp_params = copy.copy(param_dict[AMP])
amp_params.pop(AMP_ENABLED)
return amp_params
else:
return False | null |
10,242 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
def get_fp16_enabled(param_dict):
if FP16 in param_dict.keys():
return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
FP16 = "fp16"
FP16_MASTER_WEIGHTS_AND_GRADS = "fp16_master_weights_and_grads"
FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT = False
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_fp16_master_weights_and_grads_enabled(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16],
FP16_MASTER_WEIGHTS_AND_GRADS,
FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT)
else:
return False | null |
10,243 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
def get_fp16_enabled(param_dict):
FP16 = "fp16"
FP16_AUTO_CAST = "auto_cast"
FP16_AUTO_CAST_DEFAULT = False
def get_scalar_param(param_dict, param_name, param_default_value):
def get_fp16_auto_cast(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_AUTO_CAST, FP16_AUTO_CAST_DEFAULT) | null |
10,244 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
def get_fp16_enabled(param_dict):
if FP16 in param_dict.keys():
return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
def get_bfloat16_enabled(param_dict):
for key in [BFLOAT16, BFLOAT16_OLD]:
if key in param_dict.keys():
return get_scalar_param(param_dict[key],
BFLOAT16_ENABLED,
BFLOAT16_ENABLED_DEFAULT)
return False
FP16 = "fp16"
FP16_LOSS_SCALE = "loss_scale"
FP16_LOSS_SCALE_DEFAULT = 0
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_loss_scale(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16],
FP16_LOSS_SCALE,
FP16_LOSS_SCALE_DEFAULT)
elif get_bfloat16_enabled(param_dict):
return 1.0
else:
return FP16_LOSS_SCALE_DEFAULT | null |
10,245 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
def get_fp16_enabled(param_dict):
if FP16 in param_dict.keys():
return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
def get_bfloat16_enabled(param_dict):
for key in [BFLOAT16, BFLOAT16_OLD]:
if key in param_dict.keys():
return get_scalar_param(param_dict[key],
BFLOAT16_ENABLED,
BFLOAT16_ENABLED_DEFAULT)
return False
FP16 = "fp16"
FP16_INITIAL_SCALE_POWER = "initial_scale_power"
FP16_INITIAL_SCALE_POWER_DEFAULT = 32
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_initial_dynamic_scale(param_dict):
if get_fp16_enabled(param_dict):
initial_scale_power = get_scalar_param(param_dict[FP16],
FP16_INITIAL_SCALE_POWER,
FP16_INITIAL_SCALE_POWER_DEFAULT)
elif get_bfloat16_enabled(param_dict):
initial_scale_power = 0
else:
initial_scale_power = FP16_INITIAL_SCALE_POWER_DEFAULT
return 2**initial_scale_power | null |
10,246 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
def get_fp16_enabled(param_dict):
if FP16 in param_dict.keys():
return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
FP16 = "fp16"
FP16_INITIAL_SCALE_POWER = "initial_scale_power"
FP16_INITIAL_SCALE_POWER_DEFAULT = 32
FP16_LOSS_SCALE_WINDOW = "loss_scale_window"
FP16_LOSS_SCALE_WINDOW_DEFAULT = 1000
FP16_HYSTERESIS = "hysteresis"
FP16_HYSTERESIS_DEFAULT = 2
FP16_MIN_LOSS_SCALE = "min_loss_scale"
FP16_MIN_LOSS_SCALE_DEFAULT = 1
INITIAL_LOSS_SCALE = 'init_scale'
SCALE_WINDOW = 'scale_window'
DELAYED_SHIFT = 'delayed_shift'
MIN_LOSS_SCALE = 'min_scale'
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_dynamic_loss_scale_args(param_dict):
loss_scale_args = None
if get_fp16_enabled(param_dict):
fp16_dict = param_dict[FP16]
dynamic_loss_args = [
FP16_INITIAL_SCALE_POWER,
FP16_LOSS_SCALE_WINDOW,
FP16_MIN_LOSS_SCALE,
FP16_HYSTERESIS,
]
if any(arg in list(fp16_dict.keys()) for arg in dynamic_loss_args):
init_scale = get_scalar_param(fp16_dict,
FP16_INITIAL_SCALE_POWER,
FP16_INITIAL_SCALE_POWER_DEFAULT)
scale_window = get_scalar_param(fp16_dict,
FP16_LOSS_SCALE_WINDOW,
FP16_LOSS_SCALE_WINDOW_DEFAULT)
delayed_shift = get_scalar_param(fp16_dict,
FP16_HYSTERESIS,
FP16_HYSTERESIS_DEFAULT)
min_loss_scale = get_scalar_param(fp16_dict,
FP16_MIN_LOSS_SCALE,
FP16_MIN_LOSS_SCALE_DEFAULT)
loss_scale_args = {
INITIAL_LOSS_SCALE: 2**init_scale,
SCALE_WINDOW: scale_window,
DELAYED_SHIFT: delayed_shift,
MIN_LOSS_SCALE: min_loss_scale,
}
return loss_scale_args | null |
10,247 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
GRADIENT_ACCUMULATION_STEPS = "gradient_accumulation_steps"
GRADIENT_ACCUMULATION_STEPS_DEFAULT = None
def get_scalar_param(param_dict, param_name, param_default_value):
def get_gradient_accumulation_steps(param_dict):
return get_scalar_param(param_dict,
GRADIENT_ACCUMULATION_STEPS,
GRADIENT_ACCUMULATION_STEPS_DEFAULT) | null |
10,248 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
SPARSE_GRADIENTS = "sparse_gradients"
SPARSE_GRADIENTS_DEFAULT = False
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_sparse_gradients_enabled(param_dict):
return get_scalar_param(param_dict, SPARSE_GRADIENTS, SPARSE_GRADIENTS_DEFAULT) | null |
10,249 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
COMMUNICATION_DATA_TYPE = "communication_data_type"
COMMUNICATION_DATA_TYPE_DEFAULT = None
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_communication_data_type(param_dict):
val = get_scalar_param(param_dict,
COMMUNICATION_DATA_TYPE,
COMMUNICATION_DATA_TYPE_DEFAULT)
val = val.lower() if val is not None else val
if val is None:
return val # we must determine it by other parameters
elif val == "fp32":
return torch.float32
elif val == "fp16":
return torch.float16
elif val == "bfp16":
return torch.bfloat16
raise ValueError(
f"Invalid communication_data_type. Supported data types: ['fp16', 'bfp16', 'fp32']. Got: {val}"
) | null |
10,250 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
PRESCALE_GRADIENTS = "prescale_gradients"
PRESCALE_GRADIENTS_DEFAULT = False
def get_scalar_param(param_dict, param_name, param_default_value):
def get_prescale_gradients(param_dict):
return get_scalar_param(param_dict, PRESCALE_GRADIENTS, PRESCALE_GRADIENTS_DEFAULT) | null |
10,251 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
GRADIENT_PREDIVIDE_FACTOR = "gradient_predivide_factor"
GRADIENT_PREDIVIDE_FACTOR_DEFAULT = 1.0
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_gradient_predivide_factor(param_dict):
return get_scalar_param(param_dict,
GRADIENT_PREDIVIDE_FACTOR,
GRADIENT_PREDIVIDE_FACTOR_DEFAULT) | null |
10,252 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
STEPS_PER_PRINT = "steps_per_print"
STEPS_PER_PRINT_DEFAULT = 10
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_steps_per_print(param_dict):
return get_scalar_param(param_dict, STEPS_PER_PRINT, STEPS_PER_PRINT_DEFAULT) | null |
10,253 | import os
from typing import Union
import torch
import json
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import DeepSpeedMonitorConfig
from deepspeed import comm as dist
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
DISABLE_ALLGATHER = "disable_allgather"
DISABLE_ALLGATHER_DEFAULT = False
def get_scalar_param(param_dict, param_name, param_default_value):
return param_dict.get(param_name, param_default_value)
def get_disable_allgather(param_dict):
return get_scalar_param(param_dict, DISABLE_ALLGATHER, DISABLE_ALLGATHER_DEFAULT) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.