id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
8,184 | import torch
import triton
import triton.language as tl
def _add_kernel(A, B, C, size, BLOCK: tl.constexpr):
"""add kernel."""
prog_id = tl.program_id(0)
offs = prog_id * BLOCK + tl.arange(0, BLOCK)
a = tl.load(A + offs, mask=offs < size)
b = tl.load(B + offs, mask=offs < size)
tl.store(C + offs, a + b, mask=offs < size)
The provided code snippet includes necessary dependencies for implementing the `custom_add` function. Write a Python function `def custom_add(a, b)` to solve the following problem:
custom add one.
Here is the function:
def custom_add(a, b):
"""custom add one."""
c = torch.empty_like(a)
size = c.size(0)
BLOCK = 16
grid = [triton.cdiv(size, BLOCK)]
_add_kernel[grid](a, b, c, size, BLOCK=BLOCK)
return c | custom add one. |
8,185 | import asyncio
import os
from dataclasses import asdict, dataclass, field
from typing import Any, Callable, Dict, List, Union
import torch
import torch.distributed as dist
from torch import multiprocessing as mp
from torch.distributed._tensor import DeviceMesh, Replicate, distribute_tensor
from transformers import AutoModelForCausalLM
from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger
from ..adapter.adapter import (AdapterWeightMap, get_indexed_lora_linears,
get_max_lora_weight_size, update_lora_linears)
from ..config import CacheConfig, ModelConfig
from ..models import patch
from ..utils import get_gpu_memory
from .cache_engine import CacheEngine
The provided code snippet includes necessary dependencies for implementing the `_unparam_lora_weight` function. Write a Python function `def _unparam_lora_weight(model: torch.nn.Module)` to solve the following problem:
unparam lora weight. We don't want to move weight of lora to gpu.
Here is the function:
def _unparam_lora_weight(model: torch.nn.Module):
"""unparam lora weight.
We don't want to move weight of lora to gpu.
"""
from peft.tuners.lora import Linear as LoRALinear
def _tensorize_weight(linear):
"""tensorize weight."""
w = linear.weight
del linear.weight
linear.weight = w.data
for _, mod in model.named_modules():
if isinstance(mod, LoRALinear):
lora_A = mod.lora_A
lora_B = mod.lora_B
for linear in lora_A.values():
_tensorize_weight(linear)
for linear in lora_B.values():
_tensorize_weight(linear) | unparam lora weight. We don't want to move weight of lora to gpu. |
8,186 | import asyncio
import os
from dataclasses import asdict, dataclass, field
from typing import Any, Callable, Dict, List, Union
import torch
import torch.distributed as dist
from torch import multiprocessing as mp
from torch.distributed._tensor import DeviceMesh, Replicate, distribute_tensor
from transformers import AutoModelForCausalLM
from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger
from ..adapter.adapter import (AdapterWeightMap, get_indexed_lora_linears,
get_max_lora_weight_size, update_lora_linears)
from ..config import CacheConfig, ModelConfig
from ..models import patch
from ..utils import get_gpu_memory
from .cache_engine import CacheEngine
def cache_swapping(cache_engine: CacheEngine, swap_in_map: dict,
swap_out_map: dict):
"""perform cache swapping."""
issued_cache_op = False
if len(swap_in_map) > 0:
cache_engine.swap_in(swap_in_map)
issued_cache_op = True
if len(swap_out_map) > 0:
cache_engine.swap_out(swap_out_map)
issued_cache_op = True
if issued_cache_op:
cache_events = cache_engine.events
for event in cache_events:
event.wait()
def model_forward(
patched_model: torch.nn.Module,
inputs: ModelInputs,
cache_engine: CacheEngine,
json_config: dict = None,
world_size: int = 1,
stream: torch.cuda.Stream = None,
):
"""perform model forward."""
stream = stream or torch.cuda.current_stream()
with torch.inference_mode(), torch.cuda.stream(stream):
# forward
inputs = inputs.to_device('cuda')
context = StepContext.new(
inputs=inputs,
world_size=world_size,
json_config=json_config,
kv_caches=cache_engine.gpu_cache,
)
output = patched_model.patched_forward(
input_ids=inputs.input_ids,
position_ids=inputs.position_ids,
attention_mask=inputs.attention_mask,
past_key_values=cache_engine.gpu_cache,
return_dict=True,
output_attentions=False,
output_hidden_states=False,
use_origin=False,
context=context,
)
return dict(logits=output['logits'], custom_outputs=context._outputs)
class TPResponse:
ret_code: int
error: Union[Exception, List[Exception]] = None
data: Any = None
def gather_error(self):
"""gather error."""
rank = dist.get_rank()
world_size = dist.get_world_size()
# gather errors
error_count = torch.tensor(self.ret_code).cuda(rank)
dist.all_reduce(error_count)
if error_count.item() > 0:
all_errors = [None] * world_size
dist.all_gather_object(all_errors, self.error)
self.ret_code = 1
self.error = all_errors
def raise_error(self, default_error: Exception):
"""raise error."""
if self.error is None:
raise default_error
elif isinstance(self.error, Exception):
raise self.error
else:
assert isinstance(self.error, List), ('expect error type list, '
f'got {type(self.error)}')
rank = dist.get_rank()
err = self.error[rank]
if err is None:
raise default_error
else:
raise err
def _tp_build_model(
rank: int,
model_path: str,
model_config: ModelConfig,
cache_config: CacheConfig,
adapters: Dict[str, str],
out_que: mp.Queue,
world_size: int,
trust_remote_code=True,
):
"""build tensor parallel model."""
from accelerate import init_empty_weights
error_code = 0
error_type = None
patched_model = None
cache_engine = None
def __get_device_map(model, device_map=None):
"""get device map of model."""
import psutil
model_size = _get_model_memory_usage(model)
if psutil.virtual_memory().available < model_size:
logger.debug('Preload model on GPU.')
return device_map
else:
logger.debug('Preload model on CPU.')
return 'cpu'
def __load_params_and_buffers(param_mod, mod):
"""load param and buffer."""
for name, param in param_mod.named_parameters(recurse=False):
mod.register_parameter(name, param)
for name, buffer in param_mod.named_buffers(recurse=False):
mod.register_buffer(name, buffer)
def __load_state_dict_assign(param_model, model):
"""load state dict assign."""
try:
model.load_state_dict(param_model.state_dict(), assign=True)
except Exception:
__load_params_and_buffers(param_model, model)
mods = dict(model.named_modules())
for mod_name, param_mod in param_model.named_modules():
mod = mods[mod_name]
__load_params_and_buffers(param_mod, mod)
def _broadcast_config(cache_config):
"""broadcast cache config, use minimum cache."""
if rank == 0:
gathered_configs = [None] * world_size
dist.gather_object(cache_config, gathered_configs)
num_gpu_blocks_list = [
config.num_gpu_blocks for config in gathered_configs
]
num_cpu_blocks_list = [
config.num_cpu_blocks for config in gathered_configs
]
min_num_gpu_blocks = min(num_gpu_blocks_list)
min_num_cpu_blocks = min(num_cpu_blocks_list)
cache_config.num_cpu_blocks = min_num_cpu_blocks
cache_config.num_gpu_blocks = min_num_gpu_blocks
config_list = [cache_config]
else:
gathered_configs = None
dist.gather_object(cache_config, gathered_configs)
config_list = [None]
dist.broadcast_object_list(config_list)
return config_list[0]
try:
config = model_config.hf_config
torch_dtype = model_config.dtype
device_map = None
with init_empty_weights():
model = AutoModelForCausalLM.from_config(
config,
torch_dtype=torch_dtype,
trust_remote_code=trust_remote_code,
**model_config.init_kwargs)
if rank == 0:
device_map = _create_device_map(model, world_size)
_add_adapters(model, adapters)
if rank == 0:
# adapter would remove weight of linear.
device_map = _create_device_map(model, world_size, device_map)
model.eval()
model.config.use_cache = True
if rank == 0:
with LoadNoInit():
device_map = __get_device_map(model, device_map)
param_model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch_dtype,
device_map=device_map,
trust_remote_code=trust_remote_code,
**model_config.init_kwargs)
_load_adapters(param_model, adapters, device_map=device_map)
__load_state_dict_assign(param_model, model)
param_model = param_model.to('meta')
del param_model
patched_model = patch(
model,
extra_args=_PATCH_ARG_NAMES,
rank=rank,
world_size=world_size,
)
block_size = _infer_block_size(patched_model, model_config,
cache_config, world_size)
if block_size != cache_config.block_size:
cache_config.block_size = block_size
if rank == 0:
logger.warning(f'infered block size: {block_size}')
_update_cache_config(model_config,
cache_config,
gpu_id=rank,
world_size=world_size)
cache_config = _broadcast_config(cache_config)
cache_engine = CacheEngine(cache_config,
model_config,
rank=rank,
world_size=world_size)
except Exception as e:
logger.error(f'rank[{rank}] failed with error: {e}')
error_code = 1
error_type = e
# response
resp = TPResponse(error_code, error_type, cache_config)
resp.gather_error()
if rank == 0:
out_que.put(resp)
if resp.ret_code != 0:
resp.raise_error(RuntimeError('failed to init model.'))
return patched_model, cache_engine
def _tp_get_input(rank: int, in_que: mp.Queue, world_size: int):
"""get input tensor parallel."""
device_mesh = DeviceMesh('cuda', list(range(world_size)))
# broadcast meta info
if rank == 0:
inputs, swap_in_map, swap_out_map = in_que.get()
inputs = asdict(inputs)
input_tensors = dict(
(k, v) for k, v in inputs.items() if isinstance(v, torch.Tensor))
tensor_metas = dict(
(name, (t.shape, t.dtype)) for name, t in input_tensors.items())
other_metas = dict((k, v) for k, v in inputs.items()
if not isinstance(v, torch.Tensor))
input_metas = (tensor_metas, other_metas)
objs = [input_metas, swap_in_map, swap_out_map]
else:
objs = [None, None, None]
dist.broadcast_object_list(objs)
if rank != 0:
input_metas = objs[0]
tensor_metas, other_metas = input_metas
input_tensors = dict((name, torch.empty(meta[0], dtype=meta[1]))
for name, meta in tensor_metas.items())
updated_inputs = dict()
for name, t in input_tensors.items():
updated_inputs[name] = distribute_tensor(t,
device_mesh=device_mesh,
placements=[Replicate()
]).to_local()
torch.cuda.synchronize()
inputs = updated_inputs
inputs.update(other_metas)
inputs = ModelInputs(**inputs)
swap_in_map = objs[1]
swap_out_map = objs[2]
return inputs, swap_in_map, swap_out_map
def _tp_paging_adapters(
rank: int,
patched_model: torch.nn.Module,
cache_engine: CacheEngine,
in_que: mp.Queue,
out_que: mp.Queue,
):
"""tp paging adapters."""
def __get_weight_map():
"""get weight map."""
if rank == 0:
weight_maps = in_que.get()
dist_obj = [weight_maps]
else:
dist_obj = [None]
dist.broadcast_object_list(dist_obj)
return dist_obj[0]
def __paging(weight_maps):
"""paging."""
lora_linears = get_indexed_lora_linears(patched_model)
cpu_caches = cache_engine.cpu_cache
num_blocks = cache_engine.num_cpu_blocks
cpu_caches = [(kcache.view(num_blocks,
-1), vcache.view(num_blocks, -1))
for kcache, vcache in cpu_caches]
for weight_map in weight_maps:
weight_map.cache_adapter(lora_linears, cpu_caches)
update_lora_linears(lora_linears, weight_maps, device='cuda')
weight_maps = __get_weight_map()
resp = TPResponse(0)
try:
if rank == 0:
logger.info('tp paging adapters.')
if len(weight_maps) > 0:
__paging(weight_maps)
except Exception as e:
resp.ret_code = 1
resp.error = e
resp.gather_error()
if rank == 0:
out_que.put(resp)
if resp.ret_code != 0:
resp.raise_error(RuntimeError('tp paging adapters failed.'))
class CacheConfig:
"""Config of key value cache."""
block_size: int
num_cpu_blocks: int
num_gpu_blocks: int
window_size: int = -1
cache_max_entry_count: float = 0.8
class ModelConfig:
"""Config of model."""
hidden_size: int
num_layers: int
num_attention_heads: int
num_key_value_heads: int
bos_token_id: int
eos_token_id: int
head_dim: int
sliding_window: int = -1
dtype: torch.dtype = torch.float16
multi_query_attention: bool = False
json_config: dict = field(default_factory=dict)
hf_config: Any = None
init_kwargs: Dict[str, Any] = field(default_factory=dict)
def get_head_size(self):
"""get head size."""
return self.head_dim
def from_pretrained(cls,
pretrained_model_name_or_path: str,
trust_remote_code: bool = True):
"""build ModelConfig from model path or name."""
from transformers import AutoConfig
hf_config = AutoConfig.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code)
return cls.from_hf_config(hf_config, pretrained_model_name_or_path)
def from_hf_config(cls, hf_config: Any, model_path: str = None):
"""from huggingface config."""
if model_path is None:
model_path = ''
def __build_falcon():
"""build falcon."""
num_attention_heads = hf_config.num_attention_heads
if hf_config.new_decoder_architecture:
# 40b-instruct, GQA
kv_head = hf_config.num_kv_heads
if hf_config.multi_query:
# 7b-instruct, MQA
kv_head = 1
else:
# rw-1b, MHA
kv_head = num_attention_heads
head_dim = hf_config.hidden_size // num_attention_heads
return ModelConfig(
hidden_size=hf_config.hidden_size,
num_layers=hf_config.num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=kv_head,
bos_token_id=hf_config.bos_token_id,
eos_token_id=hf_config.eos_token_id,
head_dim=head_dim,
multi_query_attention=hf_config.multi_query,
)
def __build_chatglm():
"""build chatglm."""
head_dim = hf_config.hidden_size // hf_config.num_attention_heads
bos_token_id = hf_config.bos_token_id
if bos_token_id is None:
bos_token_id = hf_config.pad_token_id
init_kwargs = dict(empty_init=False)
return ModelConfig(
hidden_size=hf_config.hidden_size,
num_layers=hf_config.num_layers,
num_attention_heads=hf_config.num_attention_heads,
num_key_value_heads=hf_config.multi_query_group_num,
bos_token_id=bos_token_id,
eos_token_id=hf_config.eos_token_id,
head_dim=head_dim,
init_kwargs=init_kwargs)
def __build_gemma():
return ModelConfig(
hidden_size=hf_config.hidden_size,
num_layers=hf_config.num_hidden_layers,
num_attention_heads=hf_config.num_attention_heads,
num_key_value_heads=hf_config.num_key_value_heads,
bos_token_id=hf_config.bos_token_id,
eos_token_id=hf_config.eos_token_id,
head_dim=hf_config.head_dim)
def __build_default():
head_dim = hf_config.hidden_size // hf_config.num_attention_heads
num_attention_heads = hf_config.num_attention_heads
num_key_value_heads = getattr(hf_config, 'num_key_value_heads',
num_attention_heads)
use_sliding_window = getattr(hf_config, 'use_sliding_window', True)
sliding_window = -1
if use_sliding_window:
sliding_window = getattr(hf_config, 'sliding_window',
sliding_window) or -1
return ModelConfig(
hidden_size=hf_config.hidden_size,
num_layers=hf_config.num_hidden_layers,
num_attention_heads=hf_config.num_attention_heads,
num_key_value_heads=num_key_value_heads,
bos_token_id=hf_config.bos_token_id,
eos_token_id=hf_config.eos_token_id,
sliding_window=sliding_window,
head_dim=head_dim)
if 'falcon' in model_path:
model_config = __build_falcon()
elif 'chatglm' in model_path:
model_config = __build_chatglm()
elif hf_config.model_type == 'gemma':
model_config = __build_gemma()
else:
model_config = __build_default()
model_config.dtype = _get_torch_dtype(hf_config)
model_config.hf_config = hf_config
model_config.json_config = hf_config.to_dict()
return model_config
The provided code snippet includes necessary dependencies for implementing the `_tp_model_loop` function. Write a Python function `def _tp_model_loop( rank: int, model_path: str, model_config: ModelConfig, cache_config: CacheConfig, adapters: Dict[str, str], in_que: mp.Queue, out_que: mp.Queue, world_size: int, trust_remote_code=True, )` to solve the following problem:
Start model loops for tensor parallel model inference. Args: rank (int): Distribution rank. model_path (int): Path of the hugging face model. Could be local or online. model_config (ModelConfig): The config of the model. cache_config (CacheConfig): The config of the cache. in_que (mp.Queue): Input queue. Used to receive model input. out_que (mp.Queue): Output queue. Used to send the model output. world_size (int): The distribution world size.
Here is the function:
def _tp_model_loop(
rank: int,
model_path: str,
model_config: ModelConfig,
cache_config: CacheConfig,
adapters: Dict[str, str],
in_que: mp.Queue,
out_que: mp.Queue,
world_size: int,
trust_remote_code=True,
):
"""Start model loops for tensor parallel model inference.
Args:
rank (int): Distribution rank.
model_path (int): Path of the hugging face model. Could be
local or online.
model_config (ModelConfig): The config of the model.
cache_config (CacheConfig): The config of the cache.
in_que (mp.Queue): Input queue. Used to receive model input.
out_que (mp.Queue): Output queue. Used to send the model output.
world_size (int): The distribution world size.
"""
stream = torch.cuda.Stream()
patched_model, cache_engine = _tp_build_model(
rank,
model_path,
model_config,
cache_config,
adapters,
out_que=out_que,
world_size=world_size,
trust_remote_code=trust_remote_code)
if adapters:
_tp_paging_adapters(rank,
patched_model,
cache_engine=cache_engine,
in_que=in_que,
out_que=out_que)
while True:
inputs, swap_in_map, swap_out_map = _tp_get_input(
rank, in_que, world_size)
cache_swapping(cache_engine,
swap_in_map=swap_in_map,
swap_out_map=swap_out_map)
output = model_forward(
patched_model,
inputs,
cache_engine,
model_config.json_config,
world_size=world_size,
stream=stream,
)
stream.synchronize()
if rank == 0:
resp_output = output
out_que.put(TPResponse(0, None, resp_output)) | Start model loops for tensor parallel model inference. Args: rank (int): Distribution rank. model_path (int): Path of the hugging face model. Could be local or online. model_config (ModelConfig): The config of the model. cache_config (CacheConfig): The config of the cache. in_que (mp.Queue): Input queue. Used to receive model input. out_que (mp.Queue): Output queue. Used to send the model output. world_size (int): The distribution world size. |
8,187 | import asyncio
import os
from dataclasses import asdict, dataclass, field
from typing import Any, Callable, Dict, List, Union
import torch
import torch.distributed as dist
from torch import multiprocessing as mp
from torch.distributed._tensor import DeviceMesh, Replicate, distribute_tensor
from transformers import AutoModelForCausalLM
from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger
from ..adapter.adapter import (AdapterWeightMap, get_indexed_lora_linears,
get_max_lora_weight_size, update_lora_linears)
from ..config import CacheConfig, ModelConfig
from ..models import patch
from ..utils import get_gpu_memory
from .cache_engine import CacheEngine
logger = get_logger('lmdeploy')
The provided code snippet includes necessary dependencies for implementing the `_start_tp_process` function. Write a Python function `def _start_tp_process(rank: int, world_size: int, func: Callable, args: List = None, kwargs: Dict = None, port: int = 29500)` to solve the following problem:
Start the tensor parallel process. Args: rank (int): The distribution rank. world_size (int): The distribution world size. func (Callable): The function to be called in the process. args (List): The arguments of the func. kwargs (Dict): The keyword arguments of the func.
Here is the function:
def _start_tp_process(rank: int,
world_size: int,
func: Callable,
args: List = None,
kwargs: Dict = None,
port: int = 29500):
"""Start the tensor parallel process.
Args:
rank (int): The distribution rank.
world_size (int): The distribution world size.
func (Callable): The function to be called in the process.
args (List): The arguments of the func.
kwargs (Dict): The keyword arguments of the func.
"""
try:
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = str(port)
dist.init_process_group('nccl', rank=rank, world_size=world_size)
with torch.cuda.device(rank), torch.no_grad():
args = args or tuple()
kwargs = kwargs or dict()
func(rank, *args, **kwargs)
except Exception as e:
from traceback import print_exc
logger.error(f'Rank[{rank}] failed.')
print_exc()
raise e | Start the tensor parallel process. Args: rank (int): The distribution rank. world_size (int): The distribution world size. func (Callable): The function to be called in the process. args (List): The arguments of the func. kwargs (Dict): The keyword arguments of the func. |
8,188 | import asyncio
import os
from dataclasses import asdict, dataclass, field
from typing import Any, Callable, Dict, List, Union
import torch
import torch.distributed as dist
from torch import multiprocessing as mp
from torch.distributed._tensor import DeviceMesh, Replicate, distribute_tensor
from transformers import AutoModelForCausalLM
from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger
from ..adapter.adapter import (AdapterWeightMap, get_indexed_lora_linears,
get_max_lora_weight_size, update_lora_linears)
from ..config import CacheConfig, ModelConfig
from ..models import patch
from ..utils import get_gpu_memory
from .cache_engine import CacheEngine
def _check_context_alive(mp_context: mp.ProcessContext):
"""check context alive."""
procs = mp_context.processes
for idx, p in enumerate(procs):
if not p.is_alive():
raise RuntimeError(f'Rank[{idx}] failed.')
The provided code snippet includes necessary dependencies for implementing the `_queue_get_response` function. Write a Python function `def _queue_get_response(que: mp.Queue, mp_context: mp.ProcessContext, interval: float = 1.0)` to solve the following problem:
get response.
Here is the function:
def _queue_get_response(que: mp.Queue,
mp_context: mp.ProcessContext,
interval: float = 1.0):
"""get response."""
from multiprocessing.queues import Empty
while True:
try:
return que.get(timeout=interval)
except Empty:
_check_context_alive(mp_context) | get response. |
8,189 | import asyncio
import os
from dataclasses import asdict, dataclass, field
from typing import Any, Callable, Dict, List, Union
import torch
import torch.distributed as dist
from torch import multiprocessing as mp
from torch.distributed._tensor import DeviceMesh, Replicate, distribute_tensor
from transformers import AutoModelForCausalLM
from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger
from ..adapter.adapter import (AdapterWeightMap, get_indexed_lora_linears,
get_max_lora_weight_size, update_lora_linears)
from ..config import CacheConfig, ModelConfig
from ..models import patch
from ..utils import get_gpu_memory
from .cache_engine import CacheEngine
def _check_context_alive(mp_context: mp.ProcessContext):
"""check context alive."""
procs = mp_context.processes
for idx, p in enumerate(procs):
if not p.is_alive():
raise RuntimeError(f'Rank[{idx}] failed.')
The provided code snippet includes necessary dependencies for implementing the `_async_queue_get_response` function. Write a Python function `async def _async_queue_get_response(que: mp.Queue, mp_context: mp.ProcessContext, interval: float = 1.0)` to solve the following problem:
get response.
Here is the function:
async def _async_queue_get_response(que: mp.Queue,
mp_context: mp.ProcessContext,
interval: float = 1.0):
"""get response."""
from multiprocessing.queues import Empty
def __try_que_get():
"""try que get."""
try:
return que.get(timeout=interval)
except Empty:
return None
while True:
ret = await asyncio.get_event_loop().run_in_executor(
None, __try_que_get)
if ret is not None:
return ret
_check_context_alive(mp_context) | get response. |
8,190 | import asyncio
import os
from dataclasses import asdict, dataclass, field
from typing import Any, Callable, Dict, List, Union
import torch
import torch.distributed as dist
from torch import multiprocessing as mp
from torch.distributed._tensor import DeviceMesh, Replicate, distribute_tensor
from transformers import AutoModelForCausalLM
from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger
from ..adapter.adapter import (AdapterWeightMap, get_indexed_lora_linears,
get_max_lora_weight_size, update_lora_linears)
from ..config import CacheConfig, ModelConfig
from ..models import patch
from ..utils import get_gpu_memory
from .cache_engine import CacheEngine
class BaseModelAgent(AutoModelAgent):
"""Base model agent.
load model on local gpu
Args:
model_path (str): The hugging face model path.
model_config (ModelConfig): The config of the model.
cache_config (CacheConfig): The config of the cache info.
trust_remote_code (bool): Trust remote code
"""
def __init__(self,
model_path: str,
model_config: ModelConfig,
cache_config: CacheConfig,
adapters: Dict[str, str] = None,
trust_remote_code: bool = True):
super().__init__(model_config=model_config, cache_config=cache_config)
torch_dtype = model_config.dtype
self.patched_model = self._build_model(
model_path,
torch_dtype=torch_dtype,
adapters=adapters,
trust_remote_code=trust_remote_code)
block_size = _infer_block_size(self.patched_model, model_config,
cache_config)
if block_size != cache_config.block_size:
cache_config.block_size = block_size
logger.warning(f'infered block size: {block_size}')
_update_cache_config(model_config, cache_config)
self.cache_engine = CacheEngine(cache_config, model_config)
self.stream = torch.cuda.Stream()
def _build_model(self,
model_path: str,
torch_dtype: torch.dtype,
adapters: Dict[str, str] = None,
trust_remote_code: bool = True):
"""build patched model."""
with LoadNoInit():
hf_model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch_dtype,
trust_remote_code=trust_remote_code,
**self.model_config.init_kwargs)
hf_model.eval()
hf_model.config.use_cache = True
if adapters:
_load_adapters(hf_model, adapters)
patched_model = patch(hf_model, _PATCH_ARG_NAMES)
if adapters:
_unparam_lora_weight(patched_model)
patched_model = patched_model.cuda()
return patched_model
def paging_adapters(self, weight_maps: List[AdapterWeightMap]):
"""paging adapter."""
logger.info('paging adapters.')
lora_linears = get_indexed_lora_linears(self.patched_model)
cpu_caches = self.cache_engine.cpu_cache
num_blocks = self.cache_engine.num_cpu_blocks
cpu_caches = [(kcache.view(num_blocks,
-1), vcache.view(num_blocks, -1))
for kcache, vcache in cpu_caches]
for weight_map in weight_maps:
weight_map.cache_adapter(lora_linears, cpu_caches)
update_lora_linears(lora_linears, weight_maps, device='cuda')
def _forward_impl(self, inputs: ModelInputs, swap_in_map: SwapMap,
swap_out_map: SwapMap):
cache_swapping(self.cache_engine,
swap_in_map=swap_in_map,
swap_out_map=swap_out_map)
output = model_forward(
self.patched_model,
inputs,
self.cache_engine,
self.model_config.json_config,
world_size=1,
stream=self.stream,
)
return output
def forward(self, inputs: ModelInputs, swap_in_map: SwapMap,
swap_out_map: SwapMap):
"""model forward.
Args:
inputs (Dict): The input data comes from _make_inputs.
swap_in_map (SwapMap): Cache maps to swap in.
swap_out_map (SwapMap): Cache maps to swap out.
"""
output = self._forward_impl(inputs,
swap_in_map=swap_in_map,
swap_out_map=swap_out_map)
self.stream.synchronize()
return output
async def async_forward(self, inputs: ModelInputs, swap_in_map: SwapMap,
swap_out_map: SwapMap):
"""model forward.
Args:
inputs (Dict): The input data comes from _make_inputs.
swap_in_map (SwapMap): Cache maps to swap in.
swap_out_map (SwapMap): Cache maps to swap out.
"""
output = self._forward_impl(inputs,
swap_in_map=swap_in_map,
swap_out_map=swap_out_map)
await asyncio.get_event_loop().run_in_executor(None,
self.stream.synchronize)
return output
class TPModelAgent(AutoModelAgent):
"""Tensor Parallelism model agent.
load model on multiple GPUs
Args:
model_path (str): The hugging face model path.
model_config (ModelConfig): The config of the model.
cache_config (CacheConfig): The config of the cache info.
trust_remote_code (bool): Trust remote code
"""
def __init__(self,
model_path: str,
model_config: ModelConfig,
cache_config: CacheConfig,
world_size: int,
adapters: Dict[str, str] = None,
trust_remote_code: bool = True) -> None:
self.mp_ctx = mp.get_context('spawn')
super().__init__(model_config=model_config, cache_config=cache_config)
self.world_size = world_size
self.tp_model_in_que = self.mp_ctx.Queue(10)
self.tp_model_out_que = self.mp_ctx.Queue(10)
self.patch_model_tp(model_path,
model_config=model_config,
cache_config=cache_config,
adapters=adapters,
in_que=self.tp_model_in_que,
out_que=self.tp_model_out_que,
world_size=world_size,
trust_remote_code=trust_remote_code)
def patch_model_tp(self, model_path: str, model_config: ModelConfig,
cache_config: CacheConfig, adapters: Dict[str, str],
in_que: mp.Queue, out_que: mp.Queue, world_size: int,
trust_remote_code: bool):
"""Start tensor parallel sub process.
Args:
model_path (int): Path of the hugging face model.
Could be local or online.
extra_args (List[str]): The extra arguments to add to the
patched model.
model_config (ModelConfig): The config of the model.
cache_config (CacheConfig): The config of the cache.
in_que (mp.Queue): Input queue. Used to receive model input.
out_que (mp.Queue): Output queue. Used to send the model output.
world_size (int): The distribution world size.
"""
def __find_available_port() -> bool:
"""find available port."""
import socket
port = 29500
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) != 0:
return port
port += 1
self.mp_context = mp.spawn(
_start_tp_process,
args=(
world_size,
_tp_model_loop,
(model_path, ),
dict(model_config=model_config,
cache_config=cache_config,
adapters=adapters,
in_que=in_que,
out_que=out_que,
world_size=world_size,
trust_remote_code=trust_remote_code),
__find_available_port(),
),
nprocs=world_size,
join=False,
daemon=True,
)
resp: TPResponse = _queue_get_response(out_que, self.mp_context)
if resp.ret_code != 0:
logger.error(f'Init tp model failed with error: {resp.error}')
raise next(err for err in resp.error if err is not None)
self.cache_config = resp.data
def paging_adapters(self, weight_maps: List[AdapterWeightMap]):
"""load adapter."""
if not weight_maps:
return
self.tp_model_in_que.put(weight_maps)
resp: TPResponse = self.tp_model_out_que.get()
if resp.ret_code != 0:
logger.error(f'paging adapters failed with error: {resp.error}')
raise next(err for err in resp.error if err is not None)
def forward(self, inputs: ModelInputs, swap_in_map: SwapMap,
swap_out_map: SwapMap):
"""model forward.
Args:
inputs (Dict): The input data comes from _make_inputs.
swap_in_map (Dict[int, int]): Cache maps to swap in.
swap_out_map (Dict[int, int]): Cache maps to swap out.
"""
with torch.no_grad():
self.tp_model_in_que.put((inputs, swap_in_map, swap_out_map))
resp: TPResponse = _queue_get_response(self.tp_model_out_que,
self.mp_context)
if resp.ret_code != 0:
raise RuntimeError('tp forward failed.')
return resp.data
async def async_forward(self, inputs: ModelInputs, swap_in_map: SwapMap,
swap_out_map: SwapMap):
"""model forward.
Args:
inputs (Dict): The input data comes from _make_inputs.
swap_in_map (Dict[int, int]): Cache maps to swap in.
swap_out_map (Dict[int, int]): Cache maps to swap out.
"""
with torch.no_grad():
self.tp_model_in_que.put((inputs, swap_in_map, swap_out_map))
resp: TPResponse = await _async_queue_get_response(
self.tp_model_out_que, self.mp_context)
if resp.ret_code != 0:
raise RuntimeError('tp forward failed.')
return resp.data
class CacheConfig:
"""Config of key value cache."""
block_size: int
num_cpu_blocks: int
num_gpu_blocks: int
window_size: int = -1
cache_max_entry_count: float = 0.8
class ModelConfig:
"""Config of model."""
hidden_size: int
num_layers: int
num_attention_heads: int
num_key_value_heads: int
bos_token_id: int
eos_token_id: int
head_dim: int
sliding_window: int = -1
dtype: torch.dtype = torch.float16
multi_query_attention: bool = False
json_config: dict = field(default_factory=dict)
hf_config: Any = None
init_kwargs: Dict[str, Any] = field(default_factory=dict)
def get_head_size(self):
"""get head size."""
return self.head_dim
def from_pretrained(cls,
pretrained_model_name_or_path: str,
trust_remote_code: bool = True):
"""build ModelConfig from model path or name."""
from transformers import AutoConfig
hf_config = AutoConfig.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code)
return cls.from_hf_config(hf_config, pretrained_model_name_or_path)
def from_hf_config(cls, hf_config: Any, model_path: str = None):
"""from huggingface config."""
if model_path is None:
model_path = ''
def __build_falcon():
"""build falcon."""
num_attention_heads = hf_config.num_attention_heads
if hf_config.new_decoder_architecture:
# 40b-instruct, GQA
kv_head = hf_config.num_kv_heads
if hf_config.multi_query:
# 7b-instruct, MQA
kv_head = 1
else:
# rw-1b, MHA
kv_head = num_attention_heads
head_dim = hf_config.hidden_size // num_attention_heads
return ModelConfig(
hidden_size=hf_config.hidden_size,
num_layers=hf_config.num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=kv_head,
bos_token_id=hf_config.bos_token_id,
eos_token_id=hf_config.eos_token_id,
head_dim=head_dim,
multi_query_attention=hf_config.multi_query,
)
def __build_chatglm():
"""build chatglm."""
head_dim = hf_config.hidden_size // hf_config.num_attention_heads
bos_token_id = hf_config.bos_token_id
if bos_token_id is None:
bos_token_id = hf_config.pad_token_id
init_kwargs = dict(empty_init=False)
return ModelConfig(
hidden_size=hf_config.hidden_size,
num_layers=hf_config.num_layers,
num_attention_heads=hf_config.num_attention_heads,
num_key_value_heads=hf_config.multi_query_group_num,
bos_token_id=bos_token_id,
eos_token_id=hf_config.eos_token_id,
head_dim=head_dim,
init_kwargs=init_kwargs)
def __build_gemma():
return ModelConfig(
hidden_size=hf_config.hidden_size,
num_layers=hf_config.num_hidden_layers,
num_attention_heads=hf_config.num_attention_heads,
num_key_value_heads=hf_config.num_key_value_heads,
bos_token_id=hf_config.bos_token_id,
eos_token_id=hf_config.eos_token_id,
head_dim=hf_config.head_dim)
def __build_default():
head_dim = hf_config.hidden_size // hf_config.num_attention_heads
num_attention_heads = hf_config.num_attention_heads
num_key_value_heads = getattr(hf_config, 'num_key_value_heads',
num_attention_heads)
use_sliding_window = getattr(hf_config, 'use_sliding_window', True)
sliding_window = -1
if use_sliding_window:
sliding_window = getattr(hf_config, 'sliding_window',
sliding_window) or -1
return ModelConfig(
hidden_size=hf_config.hidden_size,
num_layers=hf_config.num_hidden_layers,
num_attention_heads=hf_config.num_attention_heads,
num_key_value_heads=num_key_value_heads,
bos_token_id=hf_config.bos_token_id,
eos_token_id=hf_config.eos_token_id,
sliding_window=sliding_window,
head_dim=head_dim)
if 'falcon' in model_path:
model_config = __build_falcon()
elif 'chatglm' in model_path:
model_config = __build_chatglm()
elif hf_config.model_type == 'gemma':
model_config = __build_gemma()
else:
model_config = __build_default()
model_config.dtype = _get_torch_dtype(hf_config)
model_config.hf_config = hf_config
model_config.json_config = hf_config.to_dict()
return model_config
The provided code snippet includes necessary dependencies for implementing the `build_model_agent` function. Write a Python function `def build_model_agent(model_path: str, cache_config: CacheConfig, trust_remote_code: bool, adapters: Dict[str, str] = None, tp: int = 1)` to solve the following problem:
create model agent.
Here is the function:
def build_model_agent(model_path: str,
cache_config: CacheConfig,
trust_remote_code: bool,
adapters: Dict[str, str] = None,
tp: int = 1):
"""create model agent."""
model_config = ModelConfig.from_pretrained(
model_path, trust_remote_code=trust_remote_code)
if tp == 1:
model_agent = BaseModelAgent(model_path,
model_config=model_config,
cache_config=cache_config,
adapters=adapters,
trust_remote_code=trust_remote_code)
else:
model_agent = TPModelAgent(model_path,
model_config=model_config,
cache_config=cache_config,
world_size=tp,
adapters=adapters,
trust_remote_code=trust_remote_code)
return model_agent | create model agent. |
8,191 | import asyncio
import enum
from dataclasses import dataclass, field
from queue import Empty, Queue
from threading import Lock, Thread
from typing import Any, Awaitable, Callable, Dict, List
from lmdeploy.messages import ResponseType
from lmdeploy.utils import get_logger
logger = get_logger('lmdeploy')
def _raise_exception_on_finish(task: asyncio.Task) -> None:
try:
task.result()
except asyncio.CancelledError:
return
except Exception as e:
logger.exception(f'Engine loop failed with error: {e}') | null |
8,192 | import asyncio
import enum
from dataclasses import dataclass, field
from queue import Empty, Queue
from threading import Lock, Thread
from typing import Any, Awaitable, Callable, Dict, List
from lmdeploy.messages import ResponseType
from lmdeploy.utils import get_logger
logger = get_logger('lmdeploy')
def _ignore_exception_on_finish(task: asyncio.Task) -> None:
try:
task.result()
except asyncio.CancelledError:
return
except Exception as exc:
logger.info(f'task: {task.get_name()} ended.')
logger.debug(f'task: {task.get_name()} exception: {exc}') | null |
8,193 | import asyncio
import enum
from dataclasses import dataclass, field
from queue import Empty, Queue
from threading import Lock, Thread
from typing import Any, Awaitable, Callable, Dict, List
from lmdeploy.messages import ResponseType
from lmdeploy.utils import get_logger
logger = get_logger('lmdeploy')
The provided code snippet includes necessary dependencies for implementing the `_run_until_complete` function. Write a Python function `def _run_until_complete(future: Awaitable)` to solve the following problem:
run untile complete.
Here is the function:
def _run_until_complete(future: Awaitable):
"""run untile complete."""
try:
event_loop = asyncio.get_event_loop()
except Exception:
logger.warning('Can not found event loop in current thread.'
' Create a new event loop.')
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(future) | run untile complete. |
8,194 | from typing import Dict, List, Tuple
import torch
from torch.distributed._tensor import DeviceMesh
from lmdeploy.utils import get_logger
from ..config import CacheConfig, ModelConfig
The provided code snippet includes necessary dependencies for implementing the `_get_dtype_size` function. Write a Python function `def _get_dtype_size(dtype: torch.dtype) -> int` to solve the following problem:
get size of the given dtype. Args: dtype (torch.dtype): Data type. Return: int: size in bytes.
Here is the function:
def _get_dtype_size(dtype: torch.dtype) -> int:
"""get size of the given dtype.
Args:
dtype (torch.dtype): Data type.
Return:
int: size in bytes.
"""
return torch.tensor([], dtype=dtype).element_size() | get size of the given dtype. Args: dtype (torch.dtype): Data type. Return: int: size in bytes. |
8,195 | from dataclasses import asdict, dataclass
from typing import Dict, List
import torch
from transformers.generation.logits_process import LogitsWarper
from ..messages import SchedulerSequence
The provided code snippet includes necessary dependencies for implementing the `_process_temperature` function. Write a Python function `def _process_temperature(scores: torch.Tensor, temperature: torch.Tensor, inplace: bool = True)` to solve the following problem:
process temperature.
Here is the function:
def _process_temperature(scores: torch.Tensor,
temperature: torch.Tensor,
inplace: bool = True):
"""process temperature."""
temperature = temperature.to(scores.dtype)
if not inplace:
scores = scores / temperature[:, None]
else:
scores /= temperature[:, None]
return scores | process temperature. |
8,196 | from dataclasses import asdict, dataclass
from typing import Dict, List
import torch
from transformers.generation.logits_process import LogitsWarper
from ..messages import SchedulerSequence
The provided code snippet includes necessary dependencies for implementing the `_process_bad_words` function. Write a Python function `def _process_bad_words(scores: torch.Tensor, bad_words: torch.LongTensor, filter_value: float = -float('inf'), inplace: bool = True)` to solve the following problem:
process bad words.
Here is the function:
def _process_bad_words(scores: torch.Tensor,
bad_words: torch.LongTensor,
filter_value: float = -float('inf'),
inplace: bool = True):
"""process bad words."""
batch_size = scores.size(0)
batch_idx = torch.arange(batch_size, device=scores.device)
filtered_scores = scores[batch_idx[:, None], bad_words]
filtered_scores[bad_words >= 0] = filter_value
if not inplace:
scores = scores.clone()
scores[batch_idx[:, None], bad_words] = filtered_scores
return scores | process bad words. |
8,197 | from dataclasses import asdict, dataclass
from typing import Dict, List
import torch
from transformers.generation.logits_process import LogitsWarper
from ..messages import SchedulerSequence
The provided code snippet includes necessary dependencies for implementing the `_process_repetition_penalty` function. Write a Python function `def _process_repetition_penalty(scores: torch.Tensor, input_ids: torch.LongTensor, penalty: torch.Tensor, inplace: bool = True)` to solve the following problem:
process repetition penalty.
Here is the function:
def _process_repetition_penalty(scores: torch.Tensor,
input_ids: torch.LongTensor,
penalty: torch.Tensor,
inplace: bool = True):
"""process repetition penalty."""
score = torch.gather(scores, 1, input_ids)
penalty = penalty.to(score.dtype)
score = torch.where(score < 0, score * penalty[:, None],
score / penalty[:, None])
if not inplace:
scores = scores.clone()
scores.scatter_(1, input_ids, score)
return scores | process repetition penalty. |
8,198 | from dataclasses import asdict, dataclass
from typing import Dict, List
import torch
from transformers.generation.logits_process import LogitsWarper
from ..messages import SchedulerSequence
The provided code snippet includes necessary dependencies for implementing the `_filter_topk_sorted` function. Write a Python function `def _filter_topk_sorted(scores: torch.Tensor, topk: torch.LongTensor, filter_value: float = -float('inf'), inplace: bool = True)` to solve the following problem:
filter topk on sorted scores.
Here is the function:
def _filter_topk_sorted(scores: torch.Tensor,
topk: torch.LongTensor,
filter_value: float = -float('inf'),
inplace: bool = True):
"""filter topk on sorted scores."""
filter_value = -float('inf')
num_tokens = scores.size(1)
token_idx = torch.arange(num_tokens, device=scores.device)
mask = token_idx[None, :] >= topk[:, None]
if inplace:
scores.masked_fill_(mask, filter_value)
else:
scores = scores.masked_fill(mask, filter_value)
return scores | filter topk on sorted scores. |
8,199 | from dataclasses import asdict, dataclass
from typing import Dict, List
import torch
from transformers.generation.logits_process import LogitsWarper
from ..messages import SchedulerSequence
The provided code snippet includes necessary dependencies for implementing the `_filter_topp_sorted` function. Write a Python function `def _filter_topp_sorted(scores: torch.Tensor, topp: torch.Tensor, filter_value: float = -float('inf'), inplace: bool = True)` to solve the following problem:
filter topp on sorted scores.
Here is the function:
def _filter_topp_sorted(scores: torch.Tensor,
topp: torch.Tensor,
filter_value: float = -float('inf'),
inplace: bool = True):
"""filter topp on sorted scores."""
softmax_scores = scores.softmax(-1)
cum_scores = softmax_scores.cumsum(1) - softmax_scores
mask = cum_scores > topp[:, None]
mask[:, 0] = False # keep at least one
if inplace:
scores.masked_fill_(mask, filter_value)
else:
scores = scores.masked_fill(mask, filter_value)
return scores | filter topp on sorted scores. |
8,200 | from dataclasses import asdict, dataclass
from typing import Dict, List
import torch
from transformers.generation.logits_process import LogitsWarper
from ..messages import SchedulerSequence
def multinomial_sampling(scores: torch.Tensor,
seeds: torch.LongTensor,
offsets: torch.LongTensor,
indices: torch.Tensor = None):
"""multinomial sampling."""
def __kernel_meta():
"""kernel meta."""
device = scores.device
device_idx = device.index
device_type = device.type
stream = get_cuda_stream(device_idx)
return dict(device=device, device_type=device_type, stream=stream)
assert scores.dim() == 2
batch_size, num_tokens = scores.size()
device = scores.device
if num_tokens == 1:
return torch.zeros_like(scores, dtype=torch.long)
if indices is None:
indices = torch.arange(num_tokens, device=device)
indices = indices.expand_as(scores)
assert indices.dim() == 2
assert indices.size() == scores.size()
outputs = indices[:, 0].clone()
BLOCK = 32
BLOCK_N = 64
grid = [triton.cdiv(batch_size, BLOCK)]
kernel_meta = __kernel_meta()
_multinomial_sampling_kernel[grid](scores,
seeds,
offsets,
indices,
outputs,
stride_sb=scores.stride(0),
stride_st=scores.stride(1),
stride_ib=indices.stride(0),
stride_it=indices.stride(1),
num_batchs=batch_size,
num_tokens=num_tokens,
BLOCK=BLOCK,
BLOCK_N=BLOCK_N,
**kernel_meta)
return outputs
The provided code snippet includes necessary dependencies for implementing the `_multinomial_sampling` function. Write a Python function `def _multinomial_sampling(scores: torch.Tensor, seeds: torch.LongTensor, offsets: torch.LongTensor, indices: torch.LongTensor = None)` to solve the following problem:
sampling.
Here is the function:
def _multinomial_sampling(scores: torch.Tensor,
seeds: torch.LongTensor,
offsets: torch.LongTensor,
indices: torch.LongTensor = None):
"""sampling."""
from lmdeploy.pytorch.kernels import multinomial_sampling
return multinomial_sampling(scores, seeds, offsets, indices) | sampling. |
8,201 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
The provided code snippet includes necessary dependencies for implementing the `_div_up` function. Write a Python function `def _div_up(x, n)` to solve the following problem:
perform div up.
Here is the function:
def _div_up(x, n):
"""perform div up."""
return (x + n - 1) // n | perform div up. |
8,202 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
class AutoModelAgent:
def __init__(self, model_config: ModelConfig, cache_config: CacheConfig):
def paging_adapters(self, weight_maps: List[AdapterWeightMap]):
async def async_forward(self, inputs: ModelInputs, swap_in_map: SwapMap,
swap_out_map: SwapMap):
def forward(self, inputs: ModelInputs, swap_in_map: SwapMap,
swap_out_map: SwapMap):
def from_pretrained(cls,
pretrained_model_name_or_path: str,
cache_config: CacheConfig,
trust_remote_code: bool,
adapters: Dict[str, str] = None,
tp: int = 1):
def _paging_adapters(adapters: dict, model_agent: AutoModelAgent,
scheduler: Scheduler):
adapters = adapters or dict()
weight_maps = []
for name, path in adapters.items():
weight_map = scheduler.add_adapter(path, name)
weight_map.block_table = torch.tensor(weight_map.block_table)
weight_maps.append(weight_map)
model_agent.paging_adapters(weight_maps) | null |
8,203 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
The provided code snippet includes necessary dependencies for implementing the `_tensorlize_block_offsets` function. Write a Python function `def _tensorlize_block_offsets(block_offsets)` to solve the following problem:
tensorlize block_offsets.
Here is the function:
def _tensorlize_block_offsets(block_offsets):
"""tensorlize block_offsets."""
from torch.nn.utils.rnn import pad_sequence
block_offsets = [torch.from_numpy(off) for off in block_offsets]
block_offsets = pad_sequence(block_offsets, batch_first=True)
return block_offsets | tensorlize block_offsets. |
8,204 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
SeqList = List[SchedulerSequence]
AdapterList = List[SchedulerAdapter]
The provided code snippet includes necessary dependencies for implementing the `_get_adapter_ids` function. Write a Python function `def _get_adapter_ids(seqs: SeqList, adapters: AdapterList)` to solve the following problem:
get adapter ids.
Here is the function:
def _get_adapter_ids(seqs: SeqList, adapters: AdapterList):
"""get adapter ids."""
adapter_names_map = dict(
(ada.name, idx) for idx, ada in enumerate(adapters))
adapter_ids = [adapter_names_map[seq.adapter_name] for seq in seqs]
return adapter_ids | get adapter ids. |
8,205 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
def _check_resp(resp: Response, state: ResponseType, warning_msg: str = None):
"""check if response has state."""
if isinstance(state, ResponseType):
state = [state]
ret = resp.type in state
if not ret and warning_msg is not None:
logger.warning(warning_msg)
return ret
class ResponseType(enum.Enum):
"""Response type."""
SUCCESS = enum.auto()
FINISH = enum.auto()
ENGINE_STOP_ERROR = enum.auto()
SESSION_REPEAT = enum.auto()
SESSION_NOT_EXIST = enum.auto()
HANDLER_NOT_EXIST = enum.auto()
class RequestType(enum.Enum):
"""Request type."""
ADD_SESSION = enum.auto()
ADD_MESSAGE = enum.auto()
STOP_SESSION = enum.auto()
END_SESSION = enum.auto()
STOP_ENGINE = enum.auto()
RESUME_ENGINE = enum.auto()
class RequestSender:
"""Request sender.
Args:
sender_id (int): The id of the sender
"""
sender_id: int
manager: 'RequestManager'
resp_dict: Dict[int, List[Response]] = field(default_factory=dict)
_next_req_id: int = 0
_resp_que: asyncio.Queue = None
_resp_thread_que: Queue = None
def new(cls, sender_id: int, manager: 'RequestManager'):
"""new."""
return cls(sender_id=sender_id, manager=manager)
def resp_que(self):
"""response queue."""
if self.is_thread_safe():
return self.manager.responses
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if self._resp_que is None:
self._resp_que = asyncio.Queue()
return self._resp_que
def req_que(self):
"""request queue."""
return self.manager.requests
def resp_thread_que(self):
"""response threadsafe queue."""
if self._resp_thread_que is None:
self._resp_thread_que = Queue()
return self._resp_thread_que
def req_thread_que(self):
"""request threadsafe queue."""
return self.manager.thread_requests
def event_loop(self):
"""get event loop."""
return self.manager.event_loop
def is_thread_safe(self):
"""is thread safe."""
return self.manager.is_thread_safe()
def is_loop_alive(self):
"""is loop alive."""
return self.manager.is_loop_alive()
def run_until_complete(self, future: Awaitable):
"""run untile complete."""
return self.manager.run_until_complete(future)
def _resp_get(self):
"""resp_que.get."""
timeout = 1
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
ret = self.resp_thread_que.get(timeout=timeout)
return ret
except Empty:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
async def _async_resp_get(self):
"""get resp.
Different behavior in threadsafe mode.
"""
timeout = 1
async def __no_threadsafe_get():
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
return await asyncio.wait_for(self.resp_que.get(), timeout)
except asyncio.TimeoutError:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
if self.is_thread_safe():
ret = self._resp_get()
await asyncio.sleep(0)
return ret
else:
return await __no_threadsafe_get()
def _req_put(self, reqs: Any):
"""req put."""
self.req_thread_que.put(reqs)
async def _async_req_put(self, reqs: Any):
"""async rq_que put.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
self._req_put(reqs)
await asyncio.sleep(0)
else:
await self.req_que.put(reqs)
def _prefetch_resps(self):
"""prefetch from resp que.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
resp_que = self.resp_thread_que
else:
resp_que = self.resp_que
num_resps = resp_que.qsize()
for _ in range(num_resps):
resp: Response = resp_que.get_nowait()
req_id = resp.req_id
self._push_resp(req_id, resp)
def _push_resp(self, req_id: int, resp: Response):
"""push response."""
self.resp_dict.setdefault(req_id, [])
self.resp_dict[req_id].append(resp)
def _pop_resp(self, req_id: int, default: Any = None):
"""pop response."""
if req_id not in self.resp_dict:
return default
resps = self.resp_dict[req_id]
ret = resps.pop(0)
if len(resps) == 0:
self.resp_dict.pop(req_id)
return ret
def _gather_request(self, req_types: List[RequestType], data: List[Any]):
"""gather requests."""
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if not self.is_loop_alive():
logger.error('Engine main loop stopped.')
exit(1)
assert len(req_types) == len(data)
batch_size = len(req_types)
req_ids = list(range(self._next_req_id,
self._next_req_id + batch_size))
self._next_req_id += batch_size
reqs = [
Request(type=rtype,
sender_id=self.sender_id,
req_id=req_id,
data=rdata)
for req_id, rtype, rdata in zip(req_ids, req_types, data)
]
return req_ids, reqs
async def async_batched_send_async(self, req_types: List[RequestType],
data: List[Any]):
"""Batched send request asynchronize."""
req_ids, reqs = self._gather_request(req_types, data)
await self._async_req_put(reqs)
return req_ids
async def async_send_async(self, req_type: RequestType, data: Any):
"""send request asynchronize."""
return (await self.async_batched_send_async(req_types=[req_type],
data=[data]))[0]
def batched_send_async(self, req_types: List[RequestType],
data: List[Any]) -> List[int]:
"""Batched send request asynchronize.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_batched_send_async(req_types, data)
return self.run_until_complete(coro)
req_ids, reqs = self._gather_request(req_types, data)
self._req_put(reqs)
return req_ids
def send_async(self, req_type: RequestType, data: Any) -> int:
"""send request asynchronize."""
return self.batched_send_async(req_types=[req_type], data=[data])[0]
async def async_recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
self._prefetch_resps()
for req_id in self.resp_dict:
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
return await self._async_resp_get()
def recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
coro = self.async_recv_any(que_timeout)
return self.run_until_complete(coro)
def recv_all(self, req_id: int, block: bool = True):
"""revceive all response with req_id."""
self._prefetch_resps()
resps = self.resp_dict.pop(req_id, [])
return resps
async def async_recv(self,
req_id: int,
que_timeout: float = None) -> Response:
"""receive response of given request id async."""
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = await self._async_resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
def recv(self, req_id: int, que_timeout: float = None) -> Response:
"""receive response of given request id.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_recv(req_id, que_timeout)
return self.run_until_complete(coro)
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = self._resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
async def async_send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None):
"""send and receive synchronize."""
req_id = await self.async_send_async(req_type, data)
return await self.async_recv(req_id, que_timeout=que_timeout)
def send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None) -> Response:
"""send and receive synchronize."""
req_id = self.send_async(req_type, data)
return self.recv(req_id, que_timeout=que_timeout)
def response_callback(self, resp: Response):
"""response callback."""
self.resp_que.put_nowait(resp)
The provided code snippet includes necessary dependencies for implementing the `async_try_add_session` function. Write a Python function `async def async_try_add_session(req_sender: RequestSender, session_id: int)` to solve the following problem:
Add new session. Args: session_id (int): The session id to add.
Here is the function:
async def async_try_add_session(req_sender: RequestSender, session_id: int):
"""Add new session.
Args:
session_id (int): The session id to add.
"""
resp = await req_sender.async_send(RequestType.ADD_SESSION,
dict(session_id=session_id))
_check_resp(resp, [ResponseType.SUCCESS, ResponseType.SESSION_REPEAT],
(f'Can not add session {session_id} '
f'with error: {resp.type}')) | Add new session. Args: session_id (int): The session id to add. |
8,206 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
def _check_resp_success(resp: Response, warning_msg: str = None):
"""check if response success."""
return _check_resp(resp, ResponseType.SUCCESS, warning_msg)
class RequestType(enum.Enum):
"""Request type."""
ADD_SESSION = enum.auto()
ADD_MESSAGE = enum.auto()
STOP_SESSION = enum.auto()
END_SESSION = enum.auto()
STOP_ENGINE = enum.auto()
RESUME_ENGINE = enum.auto()
class RequestSender:
"""Request sender.
Args:
sender_id (int): The id of the sender
"""
sender_id: int
manager: 'RequestManager'
resp_dict: Dict[int, List[Response]] = field(default_factory=dict)
_next_req_id: int = 0
_resp_que: asyncio.Queue = None
_resp_thread_que: Queue = None
def new(cls, sender_id: int, manager: 'RequestManager'):
"""new."""
return cls(sender_id=sender_id, manager=manager)
def resp_que(self):
"""response queue."""
if self.is_thread_safe():
return self.manager.responses
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if self._resp_que is None:
self._resp_que = asyncio.Queue()
return self._resp_que
def req_que(self):
"""request queue."""
return self.manager.requests
def resp_thread_que(self):
"""response threadsafe queue."""
if self._resp_thread_que is None:
self._resp_thread_que = Queue()
return self._resp_thread_que
def req_thread_que(self):
"""request threadsafe queue."""
return self.manager.thread_requests
def event_loop(self):
"""get event loop."""
return self.manager.event_loop
def is_thread_safe(self):
"""is thread safe."""
return self.manager.is_thread_safe()
def is_loop_alive(self):
"""is loop alive."""
return self.manager.is_loop_alive()
def run_until_complete(self, future: Awaitable):
"""run untile complete."""
return self.manager.run_until_complete(future)
def _resp_get(self):
"""resp_que.get."""
timeout = 1
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
ret = self.resp_thread_que.get(timeout=timeout)
return ret
except Empty:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
async def _async_resp_get(self):
"""get resp.
Different behavior in threadsafe mode.
"""
timeout = 1
async def __no_threadsafe_get():
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
return await asyncio.wait_for(self.resp_que.get(), timeout)
except asyncio.TimeoutError:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
if self.is_thread_safe():
ret = self._resp_get()
await asyncio.sleep(0)
return ret
else:
return await __no_threadsafe_get()
def _req_put(self, reqs: Any):
"""req put."""
self.req_thread_que.put(reqs)
async def _async_req_put(self, reqs: Any):
"""async rq_que put.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
self._req_put(reqs)
await asyncio.sleep(0)
else:
await self.req_que.put(reqs)
def _prefetch_resps(self):
"""prefetch from resp que.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
resp_que = self.resp_thread_que
else:
resp_que = self.resp_que
num_resps = resp_que.qsize()
for _ in range(num_resps):
resp: Response = resp_que.get_nowait()
req_id = resp.req_id
self._push_resp(req_id, resp)
def _push_resp(self, req_id: int, resp: Response):
"""push response."""
self.resp_dict.setdefault(req_id, [])
self.resp_dict[req_id].append(resp)
def _pop_resp(self, req_id: int, default: Any = None):
"""pop response."""
if req_id not in self.resp_dict:
return default
resps = self.resp_dict[req_id]
ret = resps.pop(0)
if len(resps) == 0:
self.resp_dict.pop(req_id)
return ret
def _gather_request(self, req_types: List[RequestType], data: List[Any]):
"""gather requests."""
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if not self.is_loop_alive():
logger.error('Engine main loop stopped.')
exit(1)
assert len(req_types) == len(data)
batch_size = len(req_types)
req_ids = list(range(self._next_req_id,
self._next_req_id + batch_size))
self._next_req_id += batch_size
reqs = [
Request(type=rtype,
sender_id=self.sender_id,
req_id=req_id,
data=rdata)
for req_id, rtype, rdata in zip(req_ids, req_types, data)
]
return req_ids, reqs
async def async_batched_send_async(self, req_types: List[RequestType],
data: List[Any]):
"""Batched send request asynchronize."""
req_ids, reqs = self._gather_request(req_types, data)
await self._async_req_put(reqs)
return req_ids
async def async_send_async(self, req_type: RequestType, data: Any):
"""send request asynchronize."""
return (await self.async_batched_send_async(req_types=[req_type],
data=[data]))[0]
def batched_send_async(self, req_types: List[RequestType],
data: List[Any]) -> List[int]:
"""Batched send request asynchronize.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_batched_send_async(req_types, data)
return self.run_until_complete(coro)
req_ids, reqs = self._gather_request(req_types, data)
self._req_put(reqs)
return req_ids
def send_async(self, req_type: RequestType, data: Any) -> int:
"""send request asynchronize."""
return self.batched_send_async(req_types=[req_type], data=[data])[0]
async def async_recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
self._prefetch_resps()
for req_id in self.resp_dict:
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
return await self._async_resp_get()
def recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
coro = self.async_recv_any(que_timeout)
return self.run_until_complete(coro)
def recv_all(self, req_id: int, block: bool = True):
"""revceive all response with req_id."""
self._prefetch_resps()
resps = self.resp_dict.pop(req_id, [])
return resps
async def async_recv(self,
req_id: int,
que_timeout: float = None) -> Response:
"""receive response of given request id async."""
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = await self._async_resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
def recv(self, req_id: int, que_timeout: float = None) -> Response:
"""receive response of given request id.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_recv(req_id, que_timeout)
return self.run_until_complete(coro)
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = self._resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
async def async_send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None):
"""send and receive synchronize."""
req_id = await self.async_send_async(req_type, data)
return await self.async_recv(req_id, que_timeout=que_timeout)
def send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None) -> Response:
"""send and receive synchronize."""
req_id = self.send_async(req_type, data)
return self.recv(req_id, que_timeout=que_timeout)
def response_callback(self, resp: Response):
"""response callback."""
self.resp_que.put_nowait(resp)
The provided code snippet includes necessary dependencies for implementing the `async_end` function. Write a Python function `async def async_end(req_sender: RequestSender, session_id: int)` to solve the following problem:
End the given session.
Here is the function:
async def async_end(req_sender: RequestSender, session_id: int):
"""End the given session."""
resp = await req_sender.async_send(RequestType.END_SESSION,
dict(session_id=session_id))
_check_resp_success(resp, (f'Failed to end session: {session_id}. '
f'Error: {resp.type}.')) | End the given session. |
8,207 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
def _check_resp_success(resp: Response, warning_msg: str = None):
"""check if response success."""
return _check_resp(resp, ResponseType.SUCCESS, warning_msg)
class RequestType(enum.Enum):
"""Request type."""
ADD_SESSION = enum.auto()
ADD_MESSAGE = enum.auto()
STOP_SESSION = enum.auto()
END_SESSION = enum.auto()
STOP_ENGINE = enum.auto()
RESUME_ENGINE = enum.auto()
class RequestSender:
"""Request sender.
Args:
sender_id (int): The id of the sender
"""
sender_id: int
manager: 'RequestManager'
resp_dict: Dict[int, List[Response]] = field(default_factory=dict)
_next_req_id: int = 0
_resp_que: asyncio.Queue = None
_resp_thread_que: Queue = None
def new(cls, sender_id: int, manager: 'RequestManager'):
"""new."""
return cls(sender_id=sender_id, manager=manager)
def resp_que(self):
"""response queue."""
if self.is_thread_safe():
return self.manager.responses
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if self._resp_que is None:
self._resp_que = asyncio.Queue()
return self._resp_que
def req_que(self):
"""request queue."""
return self.manager.requests
def resp_thread_que(self):
"""response threadsafe queue."""
if self._resp_thread_que is None:
self._resp_thread_que = Queue()
return self._resp_thread_que
def req_thread_que(self):
"""request threadsafe queue."""
return self.manager.thread_requests
def event_loop(self):
"""get event loop."""
return self.manager.event_loop
def is_thread_safe(self):
"""is thread safe."""
return self.manager.is_thread_safe()
def is_loop_alive(self):
"""is loop alive."""
return self.manager.is_loop_alive()
def run_until_complete(self, future: Awaitable):
"""run untile complete."""
return self.manager.run_until_complete(future)
def _resp_get(self):
"""resp_que.get."""
timeout = 1
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
ret = self.resp_thread_que.get(timeout=timeout)
return ret
except Empty:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
async def _async_resp_get(self):
"""get resp.
Different behavior in threadsafe mode.
"""
timeout = 1
async def __no_threadsafe_get():
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
return await asyncio.wait_for(self.resp_que.get(), timeout)
except asyncio.TimeoutError:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
if self.is_thread_safe():
ret = self._resp_get()
await asyncio.sleep(0)
return ret
else:
return await __no_threadsafe_get()
def _req_put(self, reqs: Any):
"""req put."""
self.req_thread_que.put(reqs)
async def _async_req_put(self, reqs: Any):
"""async rq_que put.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
self._req_put(reqs)
await asyncio.sleep(0)
else:
await self.req_que.put(reqs)
def _prefetch_resps(self):
"""prefetch from resp que.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
resp_que = self.resp_thread_que
else:
resp_que = self.resp_que
num_resps = resp_que.qsize()
for _ in range(num_resps):
resp: Response = resp_que.get_nowait()
req_id = resp.req_id
self._push_resp(req_id, resp)
def _push_resp(self, req_id: int, resp: Response):
"""push response."""
self.resp_dict.setdefault(req_id, [])
self.resp_dict[req_id].append(resp)
def _pop_resp(self, req_id: int, default: Any = None):
"""pop response."""
if req_id not in self.resp_dict:
return default
resps = self.resp_dict[req_id]
ret = resps.pop(0)
if len(resps) == 0:
self.resp_dict.pop(req_id)
return ret
def _gather_request(self, req_types: List[RequestType], data: List[Any]):
"""gather requests."""
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if not self.is_loop_alive():
logger.error('Engine main loop stopped.')
exit(1)
assert len(req_types) == len(data)
batch_size = len(req_types)
req_ids = list(range(self._next_req_id,
self._next_req_id + batch_size))
self._next_req_id += batch_size
reqs = [
Request(type=rtype,
sender_id=self.sender_id,
req_id=req_id,
data=rdata)
for req_id, rtype, rdata in zip(req_ids, req_types, data)
]
return req_ids, reqs
async def async_batched_send_async(self, req_types: List[RequestType],
data: List[Any]):
"""Batched send request asynchronize."""
req_ids, reqs = self._gather_request(req_types, data)
await self._async_req_put(reqs)
return req_ids
async def async_send_async(self, req_type: RequestType, data: Any):
"""send request asynchronize."""
return (await self.async_batched_send_async(req_types=[req_type],
data=[data]))[0]
def batched_send_async(self, req_types: List[RequestType],
data: List[Any]) -> List[int]:
"""Batched send request asynchronize.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_batched_send_async(req_types, data)
return self.run_until_complete(coro)
req_ids, reqs = self._gather_request(req_types, data)
self._req_put(reqs)
return req_ids
def send_async(self, req_type: RequestType, data: Any) -> int:
"""send request asynchronize."""
return self.batched_send_async(req_types=[req_type], data=[data])[0]
async def async_recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
self._prefetch_resps()
for req_id in self.resp_dict:
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
return await self._async_resp_get()
def recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
coro = self.async_recv_any(que_timeout)
return self.run_until_complete(coro)
def recv_all(self, req_id: int, block: bool = True):
"""revceive all response with req_id."""
self._prefetch_resps()
resps = self.resp_dict.pop(req_id, [])
return resps
async def async_recv(self,
req_id: int,
que_timeout: float = None) -> Response:
"""receive response of given request id async."""
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = await self._async_resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
def recv(self, req_id: int, que_timeout: float = None) -> Response:
"""receive response of given request id.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_recv(req_id, que_timeout)
return self.run_until_complete(coro)
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = self._resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
async def async_send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None):
"""send and receive synchronize."""
req_id = await self.async_send_async(req_type, data)
return await self.async_recv(req_id, que_timeout=que_timeout)
def send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None) -> Response:
"""send and receive synchronize."""
req_id = self.send_async(req_type, data)
return self.recv(req_id, que_timeout=que_timeout)
def response_callback(self, resp: Response):
"""response callback."""
self.resp_que.put_nowait(resp)
The provided code snippet includes necessary dependencies for implementing the `async_cancel` function. Write a Python function `async def async_cancel(req_sender: RequestSender, session_id: int)` to solve the following problem:
Stop current streaming inference.
Here is the function:
async def async_cancel(req_sender: RequestSender, session_id: int):
"""Stop current streaming inference."""
resp = await req_sender.async_send(RequestType.STOP_SESSION,
dict(session_id=session_id))
_check_resp_success(resp, (f'Failed to cancel session: {session_id}. '
f'Error: {resp.type}.')) | Stop current streaming inference. |
8,208 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
def _check_resp(resp: Response, state: ResponseType, warning_msg: str = None):
"""check if response has state."""
if isinstance(state, ResponseType):
state = [state]
ret = resp.type in state
if not ret and warning_msg is not None:
logger.warning(warning_msg)
return ret
class ResponseType(enum.Enum):
"""Response type."""
SUCCESS = enum.auto()
FINISH = enum.auto()
ENGINE_STOP_ERROR = enum.auto()
SESSION_REPEAT = enum.auto()
SESSION_NOT_EXIST = enum.auto()
HANDLER_NOT_EXIST = enum.auto()
class RequestType(enum.Enum):
"""Request type."""
ADD_SESSION = enum.auto()
ADD_MESSAGE = enum.auto()
STOP_SESSION = enum.auto()
END_SESSION = enum.auto()
STOP_ENGINE = enum.auto()
RESUME_ENGINE = enum.auto()
class RequestSender:
"""Request sender.
Args:
sender_id (int): The id of the sender
"""
sender_id: int
manager: 'RequestManager'
resp_dict: Dict[int, List[Response]] = field(default_factory=dict)
_next_req_id: int = 0
_resp_que: asyncio.Queue = None
_resp_thread_que: Queue = None
def new(cls, sender_id: int, manager: 'RequestManager'):
"""new."""
return cls(sender_id=sender_id, manager=manager)
def resp_que(self):
"""response queue."""
if self.is_thread_safe():
return self.manager.responses
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if self._resp_que is None:
self._resp_que = asyncio.Queue()
return self._resp_que
def req_que(self):
"""request queue."""
return self.manager.requests
def resp_thread_que(self):
"""response threadsafe queue."""
if self._resp_thread_que is None:
self._resp_thread_que = Queue()
return self._resp_thread_que
def req_thread_que(self):
"""request threadsafe queue."""
return self.manager.thread_requests
def event_loop(self):
"""get event loop."""
return self.manager.event_loop
def is_thread_safe(self):
"""is thread safe."""
return self.manager.is_thread_safe()
def is_loop_alive(self):
"""is loop alive."""
return self.manager.is_loop_alive()
def run_until_complete(self, future: Awaitable):
"""run untile complete."""
return self.manager.run_until_complete(future)
def _resp_get(self):
"""resp_que.get."""
timeout = 1
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
ret = self.resp_thread_que.get(timeout=timeout)
return ret
except Empty:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
async def _async_resp_get(self):
"""get resp.
Different behavior in threadsafe mode.
"""
timeout = 1
async def __no_threadsafe_get():
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
return await asyncio.wait_for(self.resp_que.get(), timeout)
except asyncio.TimeoutError:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
if self.is_thread_safe():
ret = self._resp_get()
await asyncio.sleep(0)
return ret
else:
return await __no_threadsafe_get()
def _req_put(self, reqs: Any):
"""req put."""
self.req_thread_que.put(reqs)
async def _async_req_put(self, reqs: Any):
"""async rq_que put.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
self._req_put(reqs)
await asyncio.sleep(0)
else:
await self.req_que.put(reqs)
def _prefetch_resps(self):
"""prefetch from resp que.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
resp_que = self.resp_thread_que
else:
resp_que = self.resp_que
num_resps = resp_que.qsize()
for _ in range(num_resps):
resp: Response = resp_que.get_nowait()
req_id = resp.req_id
self._push_resp(req_id, resp)
def _push_resp(self, req_id: int, resp: Response):
"""push response."""
self.resp_dict.setdefault(req_id, [])
self.resp_dict[req_id].append(resp)
def _pop_resp(self, req_id: int, default: Any = None):
"""pop response."""
if req_id not in self.resp_dict:
return default
resps = self.resp_dict[req_id]
ret = resps.pop(0)
if len(resps) == 0:
self.resp_dict.pop(req_id)
return ret
def _gather_request(self, req_types: List[RequestType], data: List[Any]):
"""gather requests."""
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if not self.is_loop_alive():
logger.error('Engine main loop stopped.')
exit(1)
assert len(req_types) == len(data)
batch_size = len(req_types)
req_ids = list(range(self._next_req_id,
self._next_req_id + batch_size))
self._next_req_id += batch_size
reqs = [
Request(type=rtype,
sender_id=self.sender_id,
req_id=req_id,
data=rdata)
for req_id, rtype, rdata in zip(req_ids, req_types, data)
]
return req_ids, reqs
async def async_batched_send_async(self, req_types: List[RequestType],
data: List[Any]):
"""Batched send request asynchronize."""
req_ids, reqs = self._gather_request(req_types, data)
await self._async_req_put(reqs)
return req_ids
async def async_send_async(self, req_type: RequestType, data: Any):
"""send request asynchronize."""
return (await self.async_batched_send_async(req_types=[req_type],
data=[data]))[0]
def batched_send_async(self, req_types: List[RequestType],
data: List[Any]) -> List[int]:
"""Batched send request asynchronize.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_batched_send_async(req_types, data)
return self.run_until_complete(coro)
req_ids, reqs = self._gather_request(req_types, data)
self._req_put(reqs)
return req_ids
def send_async(self, req_type: RequestType, data: Any) -> int:
"""send request asynchronize."""
return self.batched_send_async(req_types=[req_type], data=[data])[0]
async def async_recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
self._prefetch_resps()
for req_id in self.resp_dict:
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
return await self._async_resp_get()
def recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
coro = self.async_recv_any(que_timeout)
return self.run_until_complete(coro)
def recv_all(self, req_id: int, block: bool = True):
"""revceive all response with req_id."""
self._prefetch_resps()
resps = self.resp_dict.pop(req_id, [])
return resps
async def async_recv(self,
req_id: int,
que_timeout: float = None) -> Response:
"""receive response of given request id async."""
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = await self._async_resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
def recv(self, req_id: int, que_timeout: float = None) -> Response:
"""receive response of given request id.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_recv(req_id, que_timeout)
return self.run_until_complete(coro)
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = self._resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
async def async_send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None):
"""send and receive synchronize."""
req_id = await self.async_send_async(req_type, data)
return await self.async_recv(req_id, que_timeout=que_timeout)
def send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None) -> Response:
"""send and receive synchronize."""
req_id = self.send_async(req_type, data)
return self.recv(req_id, que_timeout=que_timeout)
def response_callback(self, resp: Response):
"""response callback."""
self.resp_que.put_nowait(resp)
The provided code snippet includes necessary dependencies for implementing the `try_add_session` function. Write a Python function `def try_add_session(req_sender: RequestSender, session_id: int)` to solve the following problem:
Add new session. Args: session_id (int): The session id to add.
Here is the function:
def try_add_session(req_sender: RequestSender, session_id: int):
"""Add new session.
Args:
session_id (int): The session id to add.
"""
resp = req_sender.send(RequestType.ADD_SESSION,
dict(session_id=session_id))
_check_resp(resp, [ResponseType.SUCCESS, ResponseType.SESSION_REPEAT],
(f'Can not add session {session_id} '
f'with error: {resp.type}')) | Add new session. Args: session_id (int): The session id to add. |
8,209 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
def _check_resp_success(resp: Response, warning_msg: str = None):
"""check if response success."""
return _check_resp(resp, ResponseType.SUCCESS, warning_msg)
class RequestType(enum.Enum):
"""Request type."""
ADD_SESSION = enum.auto()
ADD_MESSAGE = enum.auto()
STOP_SESSION = enum.auto()
END_SESSION = enum.auto()
STOP_ENGINE = enum.auto()
RESUME_ENGINE = enum.auto()
class RequestSender:
"""Request sender.
Args:
sender_id (int): The id of the sender
"""
sender_id: int
manager: 'RequestManager'
resp_dict: Dict[int, List[Response]] = field(default_factory=dict)
_next_req_id: int = 0
_resp_que: asyncio.Queue = None
_resp_thread_que: Queue = None
def new(cls, sender_id: int, manager: 'RequestManager'):
"""new."""
return cls(sender_id=sender_id, manager=manager)
def resp_que(self):
"""response queue."""
if self.is_thread_safe():
return self.manager.responses
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if self._resp_que is None:
self._resp_que = asyncio.Queue()
return self._resp_que
def req_que(self):
"""request queue."""
return self.manager.requests
def resp_thread_que(self):
"""response threadsafe queue."""
if self._resp_thread_que is None:
self._resp_thread_que = Queue()
return self._resp_thread_que
def req_thread_que(self):
"""request threadsafe queue."""
return self.manager.thread_requests
def event_loop(self):
"""get event loop."""
return self.manager.event_loop
def is_thread_safe(self):
"""is thread safe."""
return self.manager.is_thread_safe()
def is_loop_alive(self):
"""is loop alive."""
return self.manager.is_loop_alive()
def run_until_complete(self, future: Awaitable):
"""run untile complete."""
return self.manager.run_until_complete(future)
def _resp_get(self):
"""resp_que.get."""
timeout = 1
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
ret = self.resp_thread_que.get(timeout=timeout)
return ret
except Empty:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
async def _async_resp_get(self):
"""get resp.
Different behavior in threadsafe mode.
"""
timeout = 1
async def __no_threadsafe_get():
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
return await asyncio.wait_for(self.resp_que.get(), timeout)
except asyncio.TimeoutError:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
if self.is_thread_safe():
ret = self._resp_get()
await asyncio.sleep(0)
return ret
else:
return await __no_threadsafe_get()
def _req_put(self, reqs: Any):
"""req put."""
self.req_thread_que.put(reqs)
async def _async_req_put(self, reqs: Any):
"""async rq_que put.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
self._req_put(reqs)
await asyncio.sleep(0)
else:
await self.req_que.put(reqs)
def _prefetch_resps(self):
"""prefetch from resp que.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
resp_que = self.resp_thread_que
else:
resp_que = self.resp_que
num_resps = resp_que.qsize()
for _ in range(num_resps):
resp: Response = resp_que.get_nowait()
req_id = resp.req_id
self._push_resp(req_id, resp)
def _push_resp(self, req_id: int, resp: Response):
"""push response."""
self.resp_dict.setdefault(req_id, [])
self.resp_dict[req_id].append(resp)
def _pop_resp(self, req_id: int, default: Any = None):
"""pop response."""
if req_id not in self.resp_dict:
return default
resps = self.resp_dict[req_id]
ret = resps.pop(0)
if len(resps) == 0:
self.resp_dict.pop(req_id)
return ret
def _gather_request(self, req_types: List[RequestType], data: List[Any]):
"""gather requests."""
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if not self.is_loop_alive():
logger.error('Engine main loop stopped.')
exit(1)
assert len(req_types) == len(data)
batch_size = len(req_types)
req_ids = list(range(self._next_req_id,
self._next_req_id + batch_size))
self._next_req_id += batch_size
reqs = [
Request(type=rtype,
sender_id=self.sender_id,
req_id=req_id,
data=rdata)
for req_id, rtype, rdata in zip(req_ids, req_types, data)
]
return req_ids, reqs
async def async_batched_send_async(self, req_types: List[RequestType],
data: List[Any]):
"""Batched send request asynchronize."""
req_ids, reqs = self._gather_request(req_types, data)
await self._async_req_put(reqs)
return req_ids
async def async_send_async(self, req_type: RequestType, data: Any):
"""send request asynchronize."""
return (await self.async_batched_send_async(req_types=[req_type],
data=[data]))[0]
def batched_send_async(self, req_types: List[RequestType],
data: List[Any]) -> List[int]:
"""Batched send request asynchronize.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_batched_send_async(req_types, data)
return self.run_until_complete(coro)
req_ids, reqs = self._gather_request(req_types, data)
self._req_put(reqs)
return req_ids
def send_async(self, req_type: RequestType, data: Any) -> int:
"""send request asynchronize."""
return self.batched_send_async(req_types=[req_type], data=[data])[0]
async def async_recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
self._prefetch_resps()
for req_id in self.resp_dict:
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
return await self._async_resp_get()
def recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
coro = self.async_recv_any(que_timeout)
return self.run_until_complete(coro)
def recv_all(self, req_id: int, block: bool = True):
"""revceive all response with req_id."""
self._prefetch_resps()
resps = self.resp_dict.pop(req_id, [])
return resps
async def async_recv(self,
req_id: int,
que_timeout: float = None) -> Response:
"""receive response of given request id async."""
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = await self._async_resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
def recv(self, req_id: int, que_timeout: float = None) -> Response:
"""receive response of given request id.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_recv(req_id, que_timeout)
return self.run_until_complete(coro)
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = self._resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
async def async_send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None):
"""send and receive synchronize."""
req_id = await self.async_send_async(req_type, data)
return await self.async_recv(req_id, que_timeout=que_timeout)
def send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None) -> Response:
"""send and receive synchronize."""
req_id = self.send_async(req_type, data)
return self.recv(req_id, que_timeout=que_timeout)
def response_callback(self, resp: Response):
"""response callback."""
self.resp_que.put_nowait(resp)
The provided code snippet includes necessary dependencies for implementing the `end` function. Write a Python function `def end(req_sender: RequestSender, session_id: int)` to solve the following problem:
End the given session.
Here is the function:
def end(req_sender: RequestSender, session_id: int):
"""End the given session."""
resp = req_sender.send(RequestType.END_SESSION,
dict(session_id=session_id))
_check_resp_success(resp, (f'Failed to end session: {session_id}. '
f'Error: {resp.type}.')) | End the given session. |
8,210 | import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
ResponseType)
from lmdeploy.tokenizer import Tokenizer
from lmdeploy.utils import get_logger, get_model, logging_timer
from ..adapter.adapter import ADAPTER_MANAGER, SchedulerAdapter
from ..check_env import check_env, check_model
from ..config import CacheConfig, SchedulerConfig
from ..messages import MessageStatus, SamplingParam, SchedulerSequence
from ..paging import Scheduler
from .logits_process import FusedLogitsProcessor, SamplingInputs
from .model_agent import AutoModelAgent, ModelInputs
from .request import (Request, RequestManager, RequestSender, RequestType,
Response)
def _check_resp_success(resp: Response, warning_msg: str = None):
"""check if response success."""
return _check_resp(resp, ResponseType.SUCCESS, warning_msg)
class RequestType(enum.Enum):
"""Request type."""
ADD_SESSION = enum.auto()
ADD_MESSAGE = enum.auto()
STOP_SESSION = enum.auto()
END_SESSION = enum.auto()
STOP_ENGINE = enum.auto()
RESUME_ENGINE = enum.auto()
class RequestSender:
"""Request sender.
Args:
sender_id (int): The id of the sender
"""
sender_id: int
manager: 'RequestManager'
resp_dict: Dict[int, List[Response]] = field(default_factory=dict)
_next_req_id: int = 0
_resp_que: asyncio.Queue = None
_resp_thread_que: Queue = None
def new(cls, sender_id: int, manager: 'RequestManager'):
"""new."""
return cls(sender_id=sender_id, manager=manager)
def resp_que(self):
"""response queue."""
if self.is_thread_safe():
return self.manager.responses
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if self._resp_que is None:
self._resp_que = asyncio.Queue()
return self._resp_que
def req_que(self):
"""request queue."""
return self.manager.requests
def resp_thread_que(self):
"""response threadsafe queue."""
if self._resp_thread_que is None:
self._resp_thread_que = Queue()
return self._resp_thread_que
def req_thread_que(self):
"""request threadsafe queue."""
return self.manager.thread_requests
def event_loop(self):
"""get event loop."""
return self.manager.event_loop
def is_thread_safe(self):
"""is thread safe."""
return self.manager.is_thread_safe()
def is_loop_alive(self):
"""is loop alive."""
return self.manager.is_loop_alive()
def run_until_complete(self, future: Awaitable):
"""run untile complete."""
return self.manager.run_until_complete(future)
def _resp_get(self):
"""resp_que.get."""
timeout = 1
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
ret = self.resp_thread_que.get(timeout=timeout)
return ret
except Empty:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
async def _async_resp_get(self):
"""get resp.
Different behavior in threadsafe mode.
"""
timeout = 1
async def __no_threadsafe_get():
while True:
if not self.manager.is_loop_alive():
logger.debug('Engine loop is not alive.')
exit(1)
try:
return await asyncio.wait_for(self.resp_que.get(), timeout)
except asyncio.TimeoutError:
continue
except Exception as e:
logger.exception(
f'sender[{self.sender_id}] get response failed: {e}')
raise e
if self.is_thread_safe():
ret = self._resp_get()
await asyncio.sleep(0)
return ret
else:
return await __no_threadsafe_get()
def _req_put(self, reqs: Any):
"""req put."""
self.req_thread_que.put(reqs)
async def _async_req_put(self, reqs: Any):
"""async rq_que put.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
self._req_put(reqs)
await asyncio.sleep(0)
else:
await self.req_que.put(reqs)
def _prefetch_resps(self):
"""prefetch from resp que.
Different behavior in threadsafe mode.
"""
if self.is_thread_safe():
resp_que = self.resp_thread_que
else:
resp_que = self.resp_que
num_resps = resp_que.qsize()
for _ in range(num_resps):
resp: Response = resp_que.get_nowait()
req_id = resp.req_id
self._push_resp(req_id, resp)
def _push_resp(self, req_id: int, resp: Response):
"""push response."""
self.resp_dict.setdefault(req_id, [])
self.resp_dict[req_id].append(resp)
def _pop_resp(self, req_id: int, default: Any = None):
"""pop response."""
if req_id not in self.resp_dict:
return default
resps = self.resp_dict[req_id]
ret = resps.pop(0)
if len(resps) == 0:
self.resp_dict.pop(req_id)
return ret
def _gather_request(self, req_types: List[RequestType], data: List[Any]):
"""gather requests."""
if self.manager._loop_task is None and not self.is_thread_safe():
self.manager.create_loop_task()
if not self.is_loop_alive():
logger.error('Engine main loop stopped.')
exit(1)
assert len(req_types) == len(data)
batch_size = len(req_types)
req_ids = list(range(self._next_req_id,
self._next_req_id + batch_size))
self._next_req_id += batch_size
reqs = [
Request(type=rtype,
sender_id=self.sender_id,
req_id=req_id,
data=rdata)
for req_id, rtype, rdata in zip(req_ids, req_types, data)
]
return req_ids, reqs
async def async_batched_send_async(self, req_types: List[RequestType],
data: List[Any]):
"""Batched send request asynchronize."""
req_ids, reqs = self._gather_request(req_types, data)
await self._async_req_put(reqs)
return req_ids
async def async_send_async(self, req_type: RequestType, data: Any):
"""send request asynchronize."""
return (await self.async_batched_send_async(req_types=[req_type],
data=[data]))[0]
def batched_send_async(self, req_types: List[RequestType],
data: List[Any]) -> List[int]:
"""Batched send request asynchronize.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_batched_send_async(req_types, data)
return self.run_until_complete(coro)
req_ids, reqs = self._gather_request(req_types, data)
self._req_put(reqs)
return req_ids
def send_async(self, req_type: RequestType, data: Any) -> int:
"""send request asynchronize."""
return self.batched_send_async(req_types=[req_type], data=[data])[0]
async def async_recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
self._prefetch_resps()
for req_id in self.resp_dict:
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
return await self._async_resp_get()
def recv_any(self, que_timeout: float = None) -> Response:
"""receive any response."""
coro = self.async_recv_any(que_timeout)
return self.run_until_complete(coro)
def recv_all(self, req_id: int, block: bool = True):
"""revceive all response with req_id."""
self._prefetch_resps()
resps = self.resp_dict.pop(req_id, [])
return resps
async def async_recv(self,
req_id: int,
que_timeout: float = None) -> Response:
"""receive response of given request id async."""
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = await self._async_resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
def recv(self, req_id: int, que_timeout: float = None) -> Response:
"""receive response of given request id.
Different behavior in threadsafe mode.
"""
if not self.is_thread_safe():
coro = self.async_recv(req_id, que_timeout)
return self.run_until_complete(coro)
ret = self._pop_resp(req_id, default=None)
if ret is not None:
return ret
# check resp que
while True:
resp: Response = self._resp_get()
if resp.req_id != req_id:
self._push_resp(req_id, resp)
else:
return resp
async def async_send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None):
"""send and receive synchronize."""
req_id = await self.async_send_async(req_type, data)
return await self.async_recv(req_id, que_timeout=que_timeout)
def send(self,
req_type: RequestType,
data: Any,
que_timeout: float = None) -> Response:
"""send and receive synchronize."""
req_id = self.send_async(req_type, data)
return self.recv(req_id, que_timeout=que_timeout)
def response_callback(self, resp: Response):
"""response callback."""
self.resp_que.put_nowait(resp)
The provided code snippet includes necessary dependencies for implementing the `cancel` function. Write a Python function `def cancel(req_sender: RequestSender, session_id: int)` to solve the following problem:
Stop current streaming inference.
Here is the function:
def cancel(req_sender: RequestSender, session_id: int):
"""Stop current streaming inference."""
resp = req_sender.send(RequestType.STOP_SESSION,
dict(session_id=session_id))
_check_resp_success(resp, (f'Failed to cancel session: {session_id}. '
f'Error: {resp.type}.')) | Stop current streaming inference. |
8,211 | from dataclasses import dataclass
import numpy as np
def _div_up(x, n):
"""perform div up."""
return (x + n - 1) // n
The provided code snippet includes necessary dependencies for implementing the `_round_up` function. Write a Python function `def _round_up(x, n)` to solve the following problem:
perform round up.
Here is the function:
def _round_up(x, n):
"""perform round up."""
return _div_up(x, n) * n | perform round up. |
8,212 | from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.utils.checkpoint
from torch.distributed._tensor import DeviceMesh, Shard, distribute_tensor
from transformers.modeling_outputs import BaseModelOutputWithPast
from ..dist_utils import (colwise_parallelize_linear,
rowwise_parallelize_linear_fn, try_to_local)
from ..kernels import paged_attention_fwd
from .functional import fill_kv_cache
The provided code snippet includes necessary dependencies for implementing the `split_tensor_along_last_dim` function. Write a Python function `def split_tensor_along_last_dim( tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool = False, ) -> List[torch.Tensor]` to solve the following problem:
Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. Returns: A list of Tensors
Here is the function:
def split_tensor_along_last_dim(
tensor: torch.Tensor,
num_partitions: int,
contiguous_split_chunks: bool = False,
) -> List[torch.Tensor]:
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
Returns:
A list of Tensors
"""
tensor_list = tensor.chunk(num_partitions, dim=-1)
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list | Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. Returns: A list of Tensors |
8,213 | from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.utils.checkpoint
from torch.distributed._tensor import DeviceMesh, Shard, distribute_tensor
from transformers.modeling_outputs import BaseModelOutputWithPast
from ..dist_utils import (colwise_parallelize_linear,
rowwise_parallelize_linear_fn, try_to_local)
from ..kernels import paged_attention_fwd
from .functional import fill_kv_cache
def apply_rotary_pos_emb(x: torch.Tensor,
rope_cache: torch.Tensor) -> torch.Tensor:
# x: [sq, b, np, hn]
sq, hn = x.size(0), x.size(-1)
xslice = x[..., :hn // 2]
rope_cache = rope_cache[:sq]
xshaped = xslice.unflatten(-1, (-1, 2))
rope_cache = rope_cache.unsqueeze(2)
# inplace
torch.stack(
[
xshaped[..., 0] * rope_cache[..., 0] -
xshaped[..., 1] * rope_cache[..., 1],
xshaped[..., 1] * rope_cache[..., 0] +
xshaped[..., 0] * rope_cache[..., 1],
],
-1,
out=xshaped,
)
return x | null |
8,214 | from typing import List, Optional, Tuple, Union
import torch
import torch.distributed as dist
import transformers
from packaging import version
from torch import nn
from torch.distributed._tensor import DeviceMesh
from transformers.modeling_outputs import BaseModelOutputWithPast
from ..dist_utils import (colwise_parallelize_linear_fn,
rowwise_parallelize_linear_fn)
from ..kernels import apply_rotary_pos_emb as apply_rotary_pos_emb_old
from ..kernels import fill_kv_cache, fused_rotary_emb, paged_attention_fwd
from .functional import attention_forward_with_rerope, repeat_kv
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., :x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
The provided code snippet includes necessary dependencies for implementing the `apply_rotary_pos_emb` function. Write a Python function `def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1)` to solve the following problem:
Applies Rotary Position Embedding to the query and key tensors.
Here is the function:
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors."""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed | Applies Rotary Position Embedding to the query and key tensors. |
8,215 | from typing import List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed._tensor import DeviceMesh, Shard, distribute_tensor
from transformers.modeling_outputs import BaseModelOutputWithPast
from ..dist_utils import (colwise_parallelize_linear_fn,
rowwise_parallelize_linear_fn, try_to_local)
from ..kernels import apply_rotary_pos_emb
from ..kernels.alibi_pagedattention import alibi_paged_attention_fwd
from ..kernels.fill_kv_cache import fill_kv_cache
from ..kernels.pagedattention import paged_attention_fwd
def try_to_local(tensor: Union[Tensor, DTensor]):
"""Try to convert DTensor to Tensor.
Args:
tensor (Tensor|DTensor): Tensor to convert.
"""
if isinstance(tensor, DTensor):
tensor = tensor.to_local()
return tensor
def rowwise_parallelize_linear_fn(module: nn.Module,
device_mesh: DeviceMesh,
to_local: bool = False) -> None:
"""
This function parallelizes the input :Linear module in
:class:`RowwiseParallel` style.
Args:
module (:class:`nn.Module`):
The :class:`nn.Linear` module to be parallelized.
device_mesh (:class:`DeviceMesh`):
Object which describes the mesh topology of devices.
Returns:
None
"""
if isinstance(module, (torch.nn.Linear, QLinear)):
return rowwise_parallelize_linear(module,
device_mesh=device_mesh,
to_local=to_local)
elif isinstance(module, LoRALinear):
return rowwise_parallelize_loralinear(module,
device_mesh=device_mesh,
to_local=to_local)
else:
raise TypeError(f'Unsupported module: {type(module)}')
def colwise_parallelize_linear_fn(module: nn.Module,
device_mesh: DeviceMesh,
to_local: bool = False) -> None:
"""
This function parallelizes the input :Linear module in
:class:`ColwiseParallel` style.
Args:
module (:class:`nn.Module`):
The :class:`nn.Linear` module to be parallelized.
device_mesh (:class:`DeviceMesh`):
Object which describes the mesh topology of devices.
Returns:
None
"""
if isinstance(module, (torch.nn.Linear, QLinear)):
return colwise_parallelize_linear(module,
device_mesh=device_mesh,
to_local=to_local)
elif isinstance(module, LoRALinear):
return colwise_parallelize_loralinear(module,
device_mesh=device_mesh,
to_local=to_local)
else:
raise TypeError(f'Unsupported module: {type(module)}')
The provided code snippet includes necessary dependencies for implementing the `_attention_partition_fn` function. Write a Python function `def _attention_partition_fn(mod_name: str, mod: nn.Module, device_mesh: DeviceMesh)` to solve the following problem:
A function for attention partition.
Here is the function:
def _attention_partition_fn(mod_name: str, mod: nn.Module,
device_mesh: DeviceMesh):
"""A function for attention partition."""
def __w_pack_linear_fn(mod: nn.Module):
"""fn for w pack linear."""
for name, param in mod.named_parameters():
param = param.unflatten(0, (3, -1))
dist_tensor = distribute_tensor(param, device_mesh, [Shard(1)])
dist_tensor = try_to_local(dist_tensor)
dist_tensor = dist_tensor.flatten(0, 1)
dist_param = torch.nn.Parameter(dist_tensor)
mod.register_parameter(name, dist_param)
def __w_pack_lora_linear_fn(mod: nn.Module):
"""fn for w pack lora linear."""
mod._tp_mode = 'colwise'
base_layer = mod.base_layer
__w_pack_linear_fn(base_layer)
for lora_a_mod in mod.lora_A.values():
colwise_parallelize_linear_fn(lora_a_mod,
device_mesh=device_mesh,
to_local=True)
for lora_b_mod in mod.lora_B.values():
__w_pack_linear_fn(lora_b_mod)
if mod_name in ['W_pack']:
from peft.tuners.lora import Linear as LoraLinear
if isinstance(mod, LoraLinear):
__w_pack_lora_linear_fn(mod)
else:
__w_pack_linear_fn(mod)
elif mod_name in ['o_proj']:
rowwise_parallelize_linear_fn(mod,
device_mesh=device_mesh,
to_local=True) | A function for attention partition. |
8,216 | import math
from typing import Any, Callable, Optional, Sequence, Tuple
import numpy as np
import torch
from torch import Tensor
from ..kernels import apply_rotary_pos_emb, fill_kv_cache, rerope_attention_fwd
The provided code snippet includes necessary dependencies for implementing the `repeat_kv` function. Write a Python function `def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor` to solve the following problem:
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (num_key_value_heads, seqlen, head_dim) to (num_attention_heads, seqlen, head_dim)
Here is the function:
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""This is the equivalent of torch.repeat_interleave(x, dim=1,
repeats=n_rep).
The hidden states go from (num_key_value_heads, seqlen, head_dim) to
(num_attention_heads, seqlen, head_dim)
"""
if n_rep == 1:
return hidden_states
num_key_value_heads, slen, head_dim = hidden_states.shape
hidden_states = hidden_states[:,
None, :, :].expand(num_key_value_heads,
n_rep, slen, head_dim)
return hidden_states.reshape(num_key_value_heads * n_rep, slen, head_dim) | This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (num_key_value_heads, seqlen, head_dim) to (num_attention_heads, seqlen, head_dim) |
8,217 | import math
from typing import Any, Callable, Optional, Sequence, Tuple
import numpy as np
import torch
from torch import Tensor
from ..kernels import apply_rotary_pos_emb, fill_kv_cache, rerope_attention_fwd
The provided code snippet includes necessary dependencies for implementing the `generate_batched_mask` function. Write a Python function `def generate_batched_mask(q_lens, k_lens, max_q_len: int = None, max_k_len: int = None, device='cuda')` to solve the following problem:
Generate batched mask.
Here is the function:
def generate_batched_mask(q_lens,
k_lens,
max_q_len: int = None,
max_k_len: int = None,
device='cuda'):
"""Generate batched mask."""
if max_q_len is None:
max_q_len = max(q_lens)
if max_k_len is None:
max_k_len = max(k_lens)
q_range = torch.arange(max_q_len).to(device)
k_range = torch.arange(max_k_len).to(device)
cross = k_range.unsqueeze(0) - q_range.unsqueeze(1)
cross = cross.unsqueeze(0)
threshold = (k_lens - q_lens).view(-1, 1, 1)
mask = torch.where(cross <= threshold, 1, 0).to(device)
for idx, q_len in enumerate(q_lens):
mask[idx, q_len:, :] = 0
return mask | Generate batched mask. |
8,218 | import math
from typing import Any, Callable, Optional, Sequence, Tuple
import numpy as np
import torch
from torch import Tensor
from ..kernels import apply_rotary_pos_emb, fill_kv_cache, rerope_attention_fwd
def get_slopes(n: int):
"""Get alibi slopes."""
def _get_interleave_power_of_2(n):
start = 2**(-(2**-(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
if math.log2(n).is_integer():
return _get_interleave_power_of_2(n)
else:
closest_power_of_2 = 2**math.floor(math.log2(n))
return (
_get_interleave_power_of_2(closest_power_of_2) +
get_slopes(2 * closest_power_of_2)[0::2][:n - closest_power_of_2])
The provided code snippet includes necessary dependencies for implementing the `get_alibi_biases` function. Write a Python function `def get_alibi_biases(n_heads: int, mask: torch.Tensor)` to solve the following problem:
Get alibi bias.
Here is the function:
def get_alibi_biases(n_heads: int, mask: torch.Tensor):
"""Get alibi bias."""
m = torch.tensor(get_slopes(n_heads)).to(mask.device)
distance = mask.cumsum(dim=-1) - 1
return distance * m[None, :, None, None] | Get alibi bias. |
8,219 | import math
from typing import Any, Callable, Optional, Sequence, Tuple
import numpy as np
import torch
from torch import Tensor
from ..kernels import apply_rotary_pos_emb, fill_kv_cache, rerope_attention_fwd
def quant_kv(key: torch.Tensor, value: torch.Tensor, out_type: torch.dtype):
"""Quantize key and value of attention to `out_type`.
Args:
key (torch.Tensor): Attention key.
value (torch.Tensor): Attention value.
out_type (torch.dtype): Output data type.
"""
assert out_type is torch.int8
# quantize key and value
_min = torch.min(key, axis=-1).values
_max = torch.max(key, axis=-1).values
key_zp = (_min + _max) / 2
key_scale = (_max - key_zp) / 127
key_int8 = torch.round(
(key - key_zp[:, :, None]) / key_scale[:, :, None]).to(out_type)
_min = torch.min(value, axis=-1).values
_max = torch.max(value, axis=-1).values
value_zp = (_min + _max) / 2
value_scale = (_max - value_zp) / 127
value_int8 = torch.round(
(value - value_zp[:, :, None]) / value_scale[:, :, None]).to(out_type)
# wrap zp and scale to qparams
qparams = {
'key_zp': key_zp,
'key_scale': key_scale,
'value_zp': value_zp,
'value_scale': value_scale,
}
return key_int8, value_int8, qparams
def dequant_kv(context: Any, layer_id: str, key_int8: torch.Tensor,
value_int8: torch.Tensor, out_type: torch.dtype):
"""Dequantize key and value of attention to `out_type`.
Args:
context (Any): StepContext during inference.
layer_id (str): Layer object id.
key (torch.Tensor): Quantized attention key.
value (torch.Tensor): Quantized attention value.
out_type (torch.dtype): output data type.
"""
qparams = context.get_output(layer_id)
key_scale = qparams['key_scale']
key_zp = qparams['key_zp']
key_float = (key_int8 * key_scale[:, :, None] +
key_zp[:, :, None]).to(out_type)
value_scale = qparams['value_scale']
value_zp = qparams['value_zp']
value_float = (value_int8 * value_scale[:, :, None] +
value_zp[:, :, None]).to(out_type)
return key_float, value_float
def sync_qparam_to_context(context: Any, layer_id: str, qparams: dict):
"""Merge quantization param to context.
Args:
context (Any): StepContext during inference.
layer_id (str): Layer object id.
qparams (dict): Quantization param of current step.
"""
if context.inputs.meta is not None:
last_qparam = context.inputs.meta[layer_id]
for _k in last_qparam.keys():
_v = torch.concat([last_qparam[_k], qparams[_k]], axis=0)
last_qparam[_k] = _v
context.set_output(layer_id, last_qparam)
else:
context.set_output(layer_id, qparams)
def fill_kv_cache(k_states: Tensor, v_states: Tensor, k_caches: Tensor,
v_caches: Tensor, q_start_loc: Tensor, q_seq_length: Tensor,
kv_seq_length: Tensor, max_q_seq_length: int,
block_offsets: Tensor):
"""fill key/value state to cache for paged attention."""
def _kernel_meta():
device = k_states.device
device_idx = device.index
device_type = device.type
stream = get_cuda_stream(device_idx)
return dict(device=device, device_type=device_type, stream=stream)
block_offsets = block_offsets.contiguous()
batch_size = block_offsets.size(0)
block_size, num_heads, head_dim = k_caches.size()[1:]
max_num_blocks = triton.cdiv(max_q_seq_length, block_size) + 1
BLOCK = block_size
BLOCK_H = triton.next_power_of_2(num_heads)
BLOCK_D = triton.next_power_of_2(head_dim)
grid = [batch_size, max_num_blocks]
kernel_meta = _kernel_meta()
_fill_kv_cache_kernel[grid](
k_states,
v_states,
k_caches,
v_caches,
q_start_loc,
q_seq_length,
kv_seq_length,
block_offsets,
num_heads=num_heads,
head_dim=head_dim,
stride_kss=k_states.stride(-3),
stride_ksh=k_states.stride(-2),
stride_ksd=k_states.stride(-1),
stride_vss=v_states.stride(-3),
stride_vsh=v_states.stride(-2),
stride_vsd=v_states.stride(-1),
stride_kcn=k_caches.stride(0),
stride_kcb=k_caches.stride(1),
stride_kch=k_caches.stride(2),
stride_kcd=k_caches.stride(3),
stride_vcn=v_caches.stride(0),
stride_vcb=v_caches.stride(1),
stride_vch=v_caches.stride(2),
stride_vcd=v_caches.stride(3),
stride_boff=block_offsets.stride(0),
BLOCK=BLOCK,
BLOCK_D=BLOCK_D,
BLOCK_H=BLOCK_H,
**kernel_meta,
)
The provided code snippet includes necessary dependencies for implementing the `attention_forward_with_rerope` function. Write a Python function `def attention_forward_with_rerope( hidden_states: Tensor, history_lengths: Sequence, block_offsets: Tensor, num_heads: int, num_kv_heads: int, head_dim: int, position_ids: torch.LongTensor, past_key_value: Tuple[Tensor], attention_mask: Tensor, context: Any = None, q_proj: Optional[Callable] = None, k_proj: Optional[Callable] = None, v_proj: Optional[Callable] = None, qkv_proj: Optional[Callable] = None, o_proj: Optional[Callable] = None, rotary_emb_context_fn: Optional[Callable] = None, rotary_emb_generate_fn: Optional[Callable] = None, bias_type: str = 'default', training_length=4096, window=512, layer_id: str = None) -> Tensor` to solve the following problem:
Attention module forward with ReRoPE. Args: hidden_states (Tensor): Input of attention layer. history_lengths (Sequence): Cache lengths of each data in batch. block_offsets (Tensor): Block table of the key/value caches, used by paged attention. num_heads (int): numbers of query heads. num_kv_heads (int): numbers of key/value heads. head_dim (int): Feature dimension of heads. position_ids (LongTensor): position ids of the input. past_key_value (Tuple[Tensor]): key value cache. q_proj (Callable): query project module/function. k_proj (Callable): key project module/function. v_proj (Callable): value project module/function. qkv_proj (Callable): query/key/value project module/function. o_proj (Callable): output project module/function. rotary_emb_context_fn (Callable): rotary embedding context callback. rotary_emb_generate_fn (Callable): rotary embedding generate callback. bias_type (str): type of attention bias. support ['default']. training_length (int): model sequence length during trainning. window (int): ReRoPE window size, default value is 512.
Here is the function:
def attention_forward_with_rerope(
hidden_states: Tensor,
history_lengths: Sequence,
block_offsets: Tensor,
num_heads: int,
num_kv_heads: int,
head_dim: int,
position_ids: torch.LongTensor,
past_key_value: Tuple[Tensor],
attention_mask: Tensor,
context: Any = None,
q_proj: Optional[Callable] = None,
k_proj: Optional[Callable] = None,
v_proj: Optional[Callable] = None,
qkv_proj: Optional[Callable] = None,
o_proj: Optional[Callable] = None,
rotary_emb_context_fn: Optional[Callable] = None,
rotary_emb_generate_fn: Optional[Callable] = None,
bias_type: str = 'default',
training_length=4096,
window=512,
layer_id: str = None) -> Tensor:
"""Attention module forward with ReRoPE.
Args:
hidden_states (Tensor): Input of attention layer.
history_lengths (Sequence): Cache lengths of each data in batch.
block_offsets (Tensor): Block table of the key/value caches,
used by paged attention.
num_heads (int): numbers of query heads.
num_kv_heads (int): numbers of key/value heads.
head_dim (int): Feature dimension of heads.
position_ids (LongTensor): position ids of the input.
past_key_value (Tuple[Tensor]): key value cache.
q_proj (Callable): query project module/function.
k_proj (Callable): key project module/function.
v_proj (Callable): value project module/function.
qkv_proj (Callable): query/key/value project module/function.
o_proj (Callable): output project module/function.
rotary_emb_context_fn (Callable): rotary embedding context callback.
rotary_emb_generate_fn (Callable): rotary embedding generate callback.
bias_type (str): type of attention bias. support ['default'].
training_length (int): model sequence length during trainning.
window (int): ReRoPE window size, default value is 512.
"""
hidden_size = -1
if qkv_proj is not None:
assert q_proj is None
assert k_proj is None
assert v_proj is None
query_states, key_states, value_states = qkv_proj(hidden_states)
else:
assert qkv_proj is None
assert q_proj is not None
assert k_proj is not None
assert v_proj is not None
query_states = q_proj(hidden_states)
key_states = k_proj(hidden_states)
value_states = v_proj(hidden_states)
hidden_size = num_heads * head_dim
query_states = query_states.view(-1, num_heads, head_dim)
key_states = key_states.view(-1, num_kv_heads, head_dim)
value_states = value_states.view(-1, num_kv_heads, head_dim)
query_states *= ((position_ids.flatten() + 1)[:, None, None].log() /
np.log(training_length)).clip(1).to(query_states.dtype)
kv_seq_length = (position_ids[..., -1] + 1).item()
q_seq_length = getattr(context, 'q_seq_length', None)
if q_seq_length is None:
q_seq_length = kv_seq_length - kv_seq_length.new_tensor(
history_lengths)
q_start_loc = getattr(context, 'q_start_loc', None)
if q_start_loc is None:
q_start_loc = q_seq_length.cumsum(0)
q_start_loc = torch.cat([q_start_loc.new_zeros(1), q_start_loc[:-1]])
if past_key_value[0].dtype != hidden_states.dtype:
# dynamic quantize hidden_states to kv_cache and save
quant = True
qkey, qvalue, qparams = quant_kv(key_states, value_states,
past_key_value[0].dtype)
sync_qparam_to_context(context=context,
layer_id=layer_id,
qparams=qparams)
fill_kv_cache(qkey,
qvalue,
past_key_value[0],
past_key_value[1],
q_start_loc,
q_seq_length,
block_offsets=block_offsets,
history_lengths=history_lengths,
context=context)
else:
fill_kv_cache(key_states,
value_states,
past_key_value[0],
past_key_value[1],
q_start_loc,
q_seq_length,
block_offsets=block_offsets,
history_lengths=history_lengths,
context=context)
bsz, q_len, _ = hidden_states.size()
if bias_type.lower() == 'default':
if q_len == 1:
key_states = past_key_value[0][block_offsets].view(
-1, num_heads, head_dim)[0:history_lengths[-1] + 1]
value_states = past_key_value[1][block_offsets].view(
-1, num_heads, head_dim)[0:history_lengths[-1] + 1]
if quant:
# dequant int8 tensor to hidden_states.dtype
key_states, value_states = dequant_kv(
context=context,
layer_id=layer_id,
key_int8=key_states,
value_int8=value_states,
out_type=hidden_states.dtype)
full_position_ids = torch.arange(
position_ids.item() + 1,
device=position_ids.device).unsqueeze(0)
key_states, value_states = rotary_emb_generate_fn(
key_states, value_states, full_position_ids, window)
attn_weights = torch.matmul(query_states.transpose(
0, 1), key_states.permute(1, 2, 0)) / math.sqrt(head_dim)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
# upcast attention to fp32
attn_weights = torch.nn.functional.softmax(attn_weights,
dim=-1,
dtype=torch.float32).to(
query_states.dtype)
attn_output = torch.matmul(attn_weights,
value_states.transpose(0, 1))
else:
query_states1, query_states2, key_states1, key_states2, value_states = rotary_emb_context_fn( # noqa: E501
query_states, key_states, value_states, position_ids, window)
sm_scale = 1.0 / math.sqrt(head_dim)
PADDING_UNIT = past_key_value[0].shape[1]
assert PADDING_UNIT in {16, 32, 64, 128, 256}
# padding_len = -query_states1.shape[2] % PADDING_UNIT
# query_states1 = F.pad(query_states1,
# (0, 0, 0, padding_len)).contiguous()
# query_states2 = F.pad(query_states2,
# (0, 0, 0, padding_len)).contiguous()
# key_states1 = F.pad(key_states1,
# (0, 0, 0, padding_len)).contiguous()
# key_states2 = F.pad(key_states2,
# (0, 0, 0, padding_len)).contiguous()
# value_states = F.pad(value_states,
# (0, 0, 0, padding_len)).contiguous()
query_states1 = query_states1.contiguous()
query_states2 = query_states2.contiguous()
key_states1 = key_states1.contiguous()
key_states2 = key_states2.contiguous()
value_states = value_states.contiguous()
attn_output = rerope_attention_fwd(query_states1,
query_states2,
key_states1,
key_states2,
value_states,
True,
sm_scale,
window,
BLOCK_M=PADDING_UNIT).squeeze(0)
# attn_output = attn_output[:, 0:q_len]
if attn_output.size() != (num_heads, q_len, head_dim):
raise ValueError(
f'`attn_output` should be of size {(bsz, num_heads, q_len, head_dim)}, but is' # noqa: E501
f' {attn_output.size()}')
attn_output = attn_output.transpose(0, 1).reshape(
bsz, q_len, hidden_size).contiguous()
else:
raise ValueError(f'Unknown bias type: {bias_type}')
if o_proj is not None:
attn_output = o_proj(attn_output)
return attn_output | Attention module forward with ReRoPE. Args: hidden_states (Tensor): Input of attention layer. history_lengths (Sequence): Cache lengths of each data in batch. block_offsets (Tensor): Block table of the key/value caches, used by paged attention. num_heads (int): numbers of query heads. num_kv_heads (int): numbers of key/value heads. head_dim (int): Feature dimension of heads. position_ids (LongTensor): position ids of the input. past_key_value (Tuple[Tensor]): key value cache. q_proj (Callable): query project module/function. k_proj (Callable): key project module/function. v_proj (Callable): value project module/function. qkv_proj (Callable): query/key/value project module/function. o_proj (Callable): output project module/function. rotary_emb_context_fn (Callable): rotary embedding context callback. rotary_emb_generate_fn (Callable): rotary embedding generate callback. bias_type (str): type of attention bias. support ['default']. training_length (int): model sequence length during trainning. window (int): ReRoPE window size, default value is 512. |
8,220 | from typing import List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed._tensor import DeviceMesh
from transformers.modeling_outputs import BaseModelOutputWithPast
from ..dist_utils import (colwise_parallelize_linear_fn,
rowwise_parallelize_linear_fn)
from ..kernels import fill_kv_cache, fused_rotary_emb, paged_attention_fwd
def _make_inv_freq(self, device: torch.device):
if self.inv_freq is None:
self.inv_freq = 1.0 / (self.base**(torch.arange(
0, self.dim, 2, dtype=torch.int64, device=device).float() /
self.dim)) | null |
8,221 | import torch
The provided code snippet includes necessary dependencies for implementing the `batch_tensor` function. Write a Python function `def batch_tensor(inputs: torch.Tensor, seq_length: torch.LongTensor)` to solve the following problem:
Convert continuoused tensor to batched tensor. Args: inputs (Tensor): continuoused tensor. seq_length (Tensor): length of each sequence. Return: Tensor: batched tensor.
Here is the function:
def batch_tensor(inputs: torch.Tensor, seq_length: torch.LongTensor):
"""Convert continuoused tensor to batched tensor.
Args:
inputs (Tensor): continuoused tensor.
seq_length (Tensor): length of each sequence.
Return:
Tensor: batched tensor.
"""
from torch.nn.utils.rnn import pad_sequence
end_loc = seq_length.cumsum(0)
start_loc = end_loc - seq_length
inputs = [inputs[0, sloc:eloc] for sloc, eloc in zip(start_loc, end_loc)]
inputs = pad_sequence(inputs, batch_first=True)
return inputs | Convert continuoused tensor to batched tensor. Args: inputs (Tensor): continuoused tensor. seq_length (Tensor): length of each sequence. Return: Tensor: batched tensor. |
8,222 | from typing import Any, List, Tuple
import torch
from .layout_convert import continuous_tensor, page_cache
def make_model_inputs(input_ids: torch.Tensor,
block_offsets: torch.Tensor,
seq_length: torch.Tensor = None,
history_length: List[int] = None):
"""make model inputs."""
from lmdeploy.pytorch.engine.model_agent import ModelInputs
batch_size = input_ids.size(0)
max_seq_len = input_ids.size(1)
if seq_length is None:
max_seq_len = input_ids.size(1)
seq_length = torch.full((batch_size, ), max_seq_len)
input_ids = continuous_tensor(input_ids, seq_length)
if history_length is None:
history_length = [0] * batch_size
else:
assert len(history_length) == len(seq_length)
is_decoding = input_ids.size(0) == batch_size
q_start_loc = seq_length.cumsum(0) - seq_length
mask_range = torch.arange(max_seq_len)[None, :]
attention_mask = (mask_range < seq_length[:, None]).long()
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids += position_ids.new_tensor(history_length).unsqueeze(-1)
if isinstance(history_length, torch.Tensor):
history_length = history_length.tolist()
return ModelInputs(input_ids=input_ids,
seq_length=seq_length,
attention_mask=attention_mask,
block_offsets=block_offsets,
position_ids=position_ids,
q_start_loc=q_start_loc,
history_lengths=history_length,
is_decoding=is_decoding)
def page_cache(paged_cache: torch.Tensor,
batched_cache: torch.Tensor,
cache_length: torch.Tensor,
block_offsets: torch.Tensor,
permute_head: bool = True):
"""Convert batched cache to paged cache.
Args:
paged_cache (Tensor): Output paged cache.
batched_cache (Tensor): Input batched cache.
cache_length (Tensor): length of the cache.
block_offsets (Tensor): Offset of each blocks.
"""
assert block_offsets.dim() == 2
block_size = paged_cache.size(1)
batch_size = batched_cache.size(0)
if permute_head:
batched_cache = batched_cache.permute(0, 2, 1, 3)
for b_idx in range(batch_size):
cache_len = cache_length[b_idx]
b_cache = batched_cache[b_idx]
block_off = block_offsets[b_idx]
block_off_idx = 0
for s_start in range(0, cache_len, block_size):
s_end = min(s_start + block_size, cache_len)
s_len = s_end - s_start
b_off = block_off[block_off_idx]
paged_cache[b_off, :s_len] = b_cache[s_start:s_end]
block_off_idx += 1
class StepContext:
"""context of Model.
patched model might need extra information to perform inference. This
dataclass provide these infos and tools.
"""
inputs: ModelInputs
block_offsets: torch.LongTensor
position_ids: torch.LongTensor
position_ids_1d: torch.LongTensor
q_start_loc: torch.LongTensor
history_lengths: torch.LongTensor
q_seq_length: torch.LongTensor
kv_seq_length: torch.LongTensor
max_q_seq_length: int
max_kv_seq_length: int
kv_caches: List
is_decoding: bool
world_size: int = 1
json_config: Dict = None
local_adapter_ids: torch.LongTensor = None
global_adapter_ids: torch.LongTensor = None
adapter_offsets: torch.LongTensor = None
max_rank: int = 0
_outputs: Dict = field(default_factory=dict)
def new(
cls,
inputs: ModelInputs,
world_size: int = 1,
device: str = 'cuda',
json_config: dict = None,
kv_caches: List = None,
):
"""build step context.
Args:
inputs (ModelInputs): packaged model inputs.
world_size (int): The distribution world size.
device (str): The device of the tensors.
"""
position_ids = inputs.position_ids
max_q_seq_length = position_ids.size(-1)
# seq_len + history_length
kv_seq_length = position_ids[..., -1] + 1
# position ids 1d
q_seq_length = inputs.seq_length
position_ids_1d = cls.get_position_ids_1d(position_ids, q_seq_length,
device)
max_kv_seq_length = max_q_seq_length + max(inputs.history_lengths)
ret = StepContext(inputs=inputs,
block_offsets=inputs.block_offsets,
position_ids=inputs.position_ids,
position_ids_1d=position_ids_1d,
q_start_loc=inputs.q_start_loc,
history_lengths=inputs.history_lengths,
q_seq_length=inputs.seq_length,
kv_seq_length=kv_seq_length,
max_q_seq_length=max_q_seq_length,
max_kv_seq_length=max_kv_seq_length,
kv_caches=kv_caches,
is_decoding=inputs.is_decoding,
world_size=world_size,
json_config=json_config,
local_adapter_ids=inputs.local_adapter_ids,
global_adapter_ids=inputs.global_adapter_ids,
adapter_offsets=inputs.adapter_offsets,
max_rank=inputs.max_rank)
return ret
def tensorlize_block_offsets(cls, block_offsets, device):
"""tensorlize block_offsets."""
import numpy as np
offset_len = [len(offset) for offset in block_offsets]
max_offsets_len = max(offset_len)
batch_size = len(offset_len)
pad_block_offsets = np.zeros((batch_size, max_offsets_len),
dtype=np.int64)
for pad_offset, offset, off_len in zip(pad_block_offsets,
block_offsets, offset_len):
pad_offset[:off_len] = offset
block_offsets = torch.from_numpy(pad_block_offsets).to(device)
return block_offsets
def get_position_ids_1d(cls,
position_ids: torch.LongTensor,
seq_length: torch.LongTensor,
device: str = 'cuda'):
"""get 1d position_ids."""
if position_ids.size(1) == 1:
position_ids_1d = position_ids.flatten()
else:
position_ids_1d = [
ids[:l] for ids, l in zip(position_ids.cpu(), seq_length.cpu())
]
position_ids_1d = torch.cat(position_ids_1d).to(device)
return position_ids_1d
def get_block_offsets(self):
"""return block offsets."""
return self.block_offsets
def set_output(self, key, value):
"""set output."""
self._outputs[key] = value
def get_output(self, key):
"""get output."""
if key in self._outputs:
return self._outputs[key]
return None
The provided code snippet includes necessary dependencies for implementing the `make_step_context` function. Write a Python function `def make_step_context( input_ids: torch.Tensor, seq_length: torch.Tensor = None, history_length: List[int] = None, past_key_values: List[Tuple] = None, world_size: int = 1, device: str = 'cuda', block_size: int = 64, num_key_value_heads: int = 32, head_size: int = 128, kv_cache_dtype: torch.dtype = torch.float16, json_config: Any = None, )` to solve the following problem:
make step context.
Here is the function:
def make_step_context(
input_ids: torch.Tensor,
seq_length: torch.Tensor = None,
history_length: List[int] = None,
past_key_values: List[Tuple] = None,
world_size: int = 1,
device: str = 'cuda',
block_size: int = 64,
num_key_value_heads: int = 32,
head_size: int = 128,
kv_cache_dtype: torch.dtype = torch.float16,
json_config: Any = None,
):
"""make step context."""
from torch.nn.utils.rnn import pad_sequence
from lmdeploy.pytorch.engine.model_agent import StepContext
batch_size = input_ids.size(0)
max_seq_len = input_ids.size(1)
if seq_length is None:
max_seq_len = input_ids.size(1)
seq_length = torch.full((batch_size, ), max_seq_len)
if history_length is None:
history_length = [0] * batch_size
else:
assert len(history_length) == len(seq_length)
history_length = torch.tensor(history_length)
def __create_kv_caches(past_key_values):
"""create kv caches."""
total_length = seq_length + history_length
num_blocks_per_seq = (total_length + block_size - 1) // block_size
num_blocks = sum(num_blocks_per_seq)
num_caches = 1 if past_key_values is None else len(past_key_values)
cache_shape = [num_blocks, block_size, num_key_value_heads, head_size]
block_offsets_1d = torch.arange(0, num_blocks)
block_end_loc = num_blocks_per_seq.cumsum(0)
block_start_loc = block_end_loc - num_blocks_per_seq
block_offsets = [
block_offsets_1d[sloc:eloc]
for sloc, eloc in zip(block_start_loc, block_end_loc)
]
block_offsets = pad_sequence(block_offsets, batch_first=True)
kv_caches = []
for _ in range(num_caches):
k_cache = torch.empty(cache_shape,
dtype=kv_cache_dtype,
device=device)
v_cache = torch.empty_like(k_cache)
kv_caches.append((k_cache, v_cache))
return kv_caches, block_offsets
def __fill_kv_caches(kv_caches, past_key_values, block_offsets):
"""fill kv caches."""
if past_key_values is None:
return
if all(hlen == 0 for hlen in history_length):
return
num_layers = len(past_key_values)
for layer_idx in range(num_layers):
k_cache, v_cache = kv_caches[layer_idx]
past_k, past_v = past_key_values[layer_idx]
page_cache(k_cache, past_k, history_length, block_offsets)
page_cache(v_cache, past_v, history_length, block_offsets)
kv_caches, block_offsets = __create_kv_caches(past_key_values)
__fill_kv_caches(kv_caches, past_key_values, block_offsets)
history_length = history_length.tolist()
model_inputs = make_model_inputs(input_ids,
block_offsets=block_offsets,
seq_length=seq_length,
history_length=history_length)
model_inputs = model_inputs.to_device(device)
return StepContext.new(
inputs=model_inputs,
world_size=world_size,
device=device,
json_config=json_config,
kv_caches=kv_caches,
) | make step context. |
8,223 | import argparse
import copy
import json
import os
import shutil
import torch
from mmengine.utils import mkdir_or_exist
def parse_args():
parser = argparse.ArgumentParser(
description='Convert a hugging face model to the smallest sharded one')
parser.add_argument('src_dir', help='the directory of the model')
parser.add_argument('dst_dir', help='the directory to save the new model')
args = parser.parse_args()
return args | null |
8,224 | import torch
from torch import nn
from lmdeploy.lite.quantization.awq import (FC_FCS_MAP, NORM_FCS_MAP,
quant_weights, smooth_layers)
from lmdeploy.lite.utils import collect_target_modules
from .calibrate import calibrate
LAYER_TYPE_MAP = {
'InternLMForCausalLM': 'InternLMDecoderLayer',
'InternLM2ForCausalLM': 'InternLM2DecoderLayer',
'QWenLMHeadModel': 'QWenBlock',
'BaiChuanForCausalLM': 'DecoderLayer', # Baichuan 7B
'BaichuanForCausalLM': 'DecoderLayer', # Baichuan2 7B
'LlamaForCausalLM': 'LlamaDecoderLayer',
}
if __name__ == '__main__':
import fire
fire.Fire(auto_awq)
NORM_FCS_MAP = {
'LlamaDecoderLayer': {
'input_layernorm':
['self_attn.k_proj', 'self_attn.q_proj', 'self_attn.v_proj'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
},
'InternLMDecoderLayer': {
'input_layernorm':
['self_attn.k_proj', 'self_attn.q_proj', 'self_attn.v_proj'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
},
'InternLM2DecoderLayer': {
'attention_norm': ['attention.wqkv'],
'ffn_norm': ['feed_forward.w1', 'feed_forward.w3']
},
'QWenBlock': {
'ln_1': ['attn.c_attn'],
'ln_2': ['mlp.w1', 'mlp.w2']
},
'DecoderLayer': {
'input_layernorm': ['self_attn.W_pack'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
}
}
FC_FCS_MAP = {
'LlamaDecoderLayer': {
'self_attn.v_proj': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
},
'InternLMDecoderLayer': {
'self_attn.v_proj': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
},
'InternLM2DecoderLayer': {
'feed_forward.w3': ['feed_forward.w2']
},
'QWenBlock': {
'attn.c_attn': ['attn.c_proj'],
'mlp.w1': ['mlp.c_proj']
},
'DecoderLayer': {
'self_attn.W_pack': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
}
}
def quant_weights(model, fcs, bits, symmetry, group_size=-1, device='cuda'):
"""Quantize the weights of the target model's linear layers."""
from lmdeploy.legacy.pytorch.modules import WeightOnlyQLinear
from lmdeploy.lite.quantization import WeightQuantizer
for name, fc in fcs.items():
fc.to(device)
quantizer = WeightQuantizer(bits, symmetry, 'per_group', group_size)
q_linear = WeightOnlyQLinear.from_linear(fc, quantizer)
parent_name, _, child_name = name.rpartition('.')
parent = model.get_submodule(parent_name)
fc.to('cpu')
setattr(parent, child_name, q_linear)
print(f'{name} weight packed.')
def smooth_layers(layers,
fc2fcs,
norm2fcs,
a_scales,
group_size=-1,
device='cuda'):
"""Apply weight smoothing based on input scales."""
for l_name, layer in layers.items():
layer.to(device)
for ln_name, fc_names in norm2fcs.items():
a_name = [f'{l_name}.{n}' for n in fc_names][0]
ln = layer.get_submodule(ln_name)
fcs = [layer.get_submodule(n) for n in fc_names]
smooth_ln_fcs(ln, fcs, a_scales[a_name], group_size)
for f_name, fc_names in fc2fcs.items():
a_name = [f'{l_name}.{n}' for n in fc_names][0]
fc = layer.get_submodule(f_name)
fcs = [layer.get_submodule(n) for n in fc_names]
smooth_fc_fcs(fc, fcs, a_scales[a_name], group_size)
layer.to('cpu')
print(f'{l_name} smooth weight done.')
def calibrate(model: str,
calib_dataset: str = 'ptb',
calib_samples: int = 128,
calib_seqlen: int = 2048,
work_dir: str = './work_dir',
device: str = 'cuda') -> None:
"""The main function for loading the model and performing calibration on a
given dataset.
Args:
model (str): The name or path of the model to be loaded.
calib_dataset (str, optional): The calibration dataset name.
Defaults to 'ptb'.
calib_samples (int, optional): The number of samples for calibration.
Defaults to 128.
calib_seqlen (int, optional): The sequence length for calibration.
Defaults to 2048.
work_dir (str): The working directory for outputs.
Defaults to './work_dir'.
device (str, optional): The device to be used for calculation.
Defaults to 'cuda'.
Returns:
model (nn.Module): The loaded huggingface model.
tokenizer : The loaded hugginface tokenizer.
work_dir (str): The working directory for outputs.
"""
assert calib_dataset in ['c4', 'ptb', 'wikitext2', 'pileval'], \
'Support only `c4`, `ptb`, `wikitext2` or `pileval`.'
# Load tokenizer and configuration
tokenizer = AutoTokenizer.from_pretrained(model,
use_fast=False,
trust_remote_code=True)
model = load_hf_from_pretrained(model,
torch_dtype=torch.float16,
trust_remote_code=True)
model_type = type(model).__name__
if model_type not in LAYER_TYPE_MAP or model_type not in NORM_TYPE_MAP:
raise RuntimeError(
f'Currently, quantification and calibration of {model_type} are '
f'not supported. The supported model types are '
f"{', '.join(LAYER_TYPE_MAP.keys())}.")
if model_type == 'QWenLMHeadModel':
try:
import flash_attn # noqa: F401
except ImportError:
raise RuntimeError(
'When using Qwen, you need to `pip install flash-attn` first, '
'otherwise calibration and quantification will not work '
'properly.')
layer_type = LAYER_TYPE_MAP[type(model).__name__]
norm_type = NORM_TYPE_MAP[type(model).__name__]
_prepare_for_calibrate(model, layer_type,
HEAD_NAME_MAP[type(model).__name__], device)
print('Loading calibrate dataset ...')
calib_loader, _ = get_calib_loaders(calib_dataset,
tokenizer,
nsamples=calib_samples,
seqlen=calib_seqlen)
# Initialize calibration context
calib_ctx = CalibrationContext(model,
tokenizer,
layer_type=layer_type,
norm_type=norm_type,
device=device)
with calib_ctx:
all_data = torch.cat([
data if isinstance(data, torch.Tensor) else data[0]
for data in calib_loader
]).to(device)
calib_ctx.calibrate(all_data)
# Create work directory if not exists
work_dir = Path(work_dir)
work_dir.mkdir(parents=True, exist_ok=True)
calib_ctx.export(work_dir)
return model, tokenizer, work_dir
The provided code snippet includes necessary dependencies for implementing the `auto_awq` function. Write a Python function `def auto_awq(model: str, work_dir: str = './work_dir', calib_dataset: str = 'ptb', calib_samples: int = 128, calib_seqlen: int = 2048, w_bits: int = 4, w_sym: bool = False, w_group_size: int = 128, device: str = 'cuda')` to solve the following problem:
Perform weight quantization using AWQ algorithm. Args: model (str): The path of model in hf format. work_dir (str): The working directory to save results. calib_dataset (str): The calibration dataset name. calib_samples (int): The number of samples for calibration. calib_seqlen (int): The sequence length for calibration. w_bits (int): Bit number for weight quantization. w_sym (bool): Whether to do symmetric quantization. w_group_size (int): Group size for weight quantization statistics. device (str): Device type of running.
Here is the function:
def auto_awq(model: str,
work_dir: str = './work_dir',
calib_dataset: str = 'ptb',
calib_samples: int = 128,
calib_seqlen: int = 2048,
w_bits: int = 4,
w_sym: bool = False,
w_group_size: int = 128,
device: str = 'cuda'):
"""Perform weight quantization using AWQ algorithm.
Args:
model (str): The path of model in hf format.
work_dir (str): The working directory to save results.
calib_dataset (str): The calibration dataset name.
calib_samples (int): The number of samples for calibration.
calib_seqlen (int): The sequence length for calibration.
w_bits (int): Bit number for weight quantization.
w_sym (bool): Whether to do symmetric quantization.
w_group_size (int): Group size for weight quantization statistics.
device (str): Device type of running.
"""
model, tokenizer, work_dir = calibrate(model, calib_dataset, calib_samples,
calib_seqlen, work_dir, device)
layer_type = LAYER_TYPE_MAP[type(model).__name__]
fc2fcs = FC_FCS_MAP[layer_type]
norm2fcs = NORM_FCS_MAP[layer_type]
act_scales = torch.load(work_dir / 'inputs_stats.pth')['absmax']
layers = collect_target_modules(model, layer_type)
fcs = {}
for l_name, layer in layers.items():
name2fc = collect_target_modules(layer, nn.Linear, prefix=l_name)
fcs.update(name2fc)
smooth_layers(layers, fc2fcs, norm2fcs, act_scales, w_group_size, device)
quant_weights(model, fcs, w_bits, w_sym, w_group_size, device)
model.save_pretrained(work_dir,
max_shard_size='2GB',
safe_serialization=False)
tokenizer.save_pretrained(work_dir) | Perform weight quantization using AWQ algorithm. Args: model (str): The path of model in hf format. work_dir (str): The working directory to save results. calib_dataset (str): The calibration dataset name. calib_samples (int): The number of samples for calibration. calib_seqlen (int): The sequence length for calibration. w_bits (int): Bit number for weight quantization. w_sym (bool): Whether to do symmetric quantization. w_group_size (int): Group size for weight quantization statistics. device (str): Device type of running. |
8,225 | import os.path as osp
import shutil
import fire
import torch
from torch import nn
import lmdeploy
from lmdeploy.lite.apis.calibrate import calibrate
from lmdeploy.lite.quantization.awq import (FC_FCS_MAP, NORM_FCS_MAP,
smooth_layers)
from lmdeploy.lite.utils import collect_target_modules
from lmdeploy.pytorch.models import QLinear, QRMSNorm
LAYER_TYPE_MAP = {
'InternLMForCausalLM': 'InternLMDecoderLayer',
'InternLM2ForCausalLM': 'InternLM2DecoderLayer',
'QWenLMHeadModel': 'QWenBlock',
'BaiChuanForCausalLM': 'DecoderLayer',
'LlamaForCausalLM': 'LlamaDecoderLayer',
}
NORM_TYPE_MAP = {
'InternLMForCausalLM': 'InternLMRMSNorm',
'InternLM2ForCausalLM': 'InternLM2RMSNorm',
'QWenLMHeadModel': 'RMSNorm',
'BaiChuanForCausalLM': 'RMSNorm',
'LlamaForCausalLM': 'LlamaRMSNorm',
}
MODEL_PATH_MAP = {
'InternLMForCausalLM':
osp.join(LMDEPLOY_ROOT, 'pytorch/modeling/modeling_internlm.py'),
'InternLM2ForCausalLM':
osp.join(LMDEPLOY_ROOT, 'pytorch/modeling/modeling_internlm2.py'),
'LlamaForCausalLM':
osp.join(LMDEPLOY_ROOT, 'pytorch/modeling/modeling_llama.py'),
'BaiChuanForCausalLM':
osp.join(LMDEPLOY_ROOT, 'pytorch/modeling/modeling_baichuan.py')
}
AUTO_MAP = {
'InternLMForCausalLM': {
'AutoConfig': 'configuration_internlm.InternLMConfig',
'AutoModel': 'modeling_internlm.InternLMForCausalLM',
'AutoModelForCausalLM': 'modeling_internlm.InternLMForCausalLM'
},
'InternLM2ForCausalLM': {
'AutoConfig': 'configuration_internlm2.InternLMConfig',
'AutoModelForCausalLM': 'modeling_internlm2.InternLM2ForCausalLM',
'AutoModel': 'modeling_internlm2.InternLM2ForCausalLM'
},
'LlamaForCausalLM': {
'AutoModel': 'modeling_llama.LlamaForCausalLM',
'AutoModelForCausalLM': 'modeling_llama.LlamaForCausalLM'
},
'BaiChuanForCausalLM': {
'AutoConfig': 'configuration_baichuan.BaiChuanConfig',
'AutoModelForCausalLM': 'modeling_baichuan.BaiChuanForCausalLM'
}
}
if __name__ == '__main__':
fire.Fire(smooth_quant)
def calibrate(model: str,
calib_dataset: str = 'ptb',
calib_samples: int = 128,
calib_seqlen: int = 2048,
work_dir: str = './work_dir',
device: str = 'cuda') -> None:
"""The main function for loading the model and performing calibration on a
given dataset.
Args:
model (str): The name or path of the model to be loaded.
calib_dataset (str, optional): The calibration dataset name.
Defaults to 'ptb'.
calib_samples (int, optional): The number of samples for calibration.
Defaults to 128.
calib_seqlen (int, optional): The sequence length for calibration.
Defaults to 2048.
work_dir (str): The working directory for outputs.
Defaults to './work_dir'.
device (str, optional): The device to be used for calculation.
Defaults to 'cuda'.
Returns:
model (nn.Module): The loaded huggingface model.
tokenizer : The loaded hugginface tokenizer.
work_dir (str): The working directory for outputs.
"""
assert calib_dataset in ['c4', 'ptb', 'wikitext2', 'pileval'], \
'Support only `c4`, `ptb`, `wikitext2` or `pileval`.'
# Load tokenizer and configuration
tokenizer = AutoTokenizer.from_pretrained(model,
use_fast=False,
trust_remote_code=True)
model = load_hf_from_pretrained(model,
torch_dtype=torch.float16,
trust_remote_code=True)
model_type = type(model).__name__
if model_type not in LAYER_TYPE_MAP or model_type not in NORM_TYPE_MAP:
raise RuntimeError(
f'Currently, quantification and calibration of {model_type} are '
f'not supported. The supported model types are '
f"{', '.join(LAYER_TYPE_MAP.keys())}.")
if model_type == 'QWenLMHeadModel':
try:
import flash_attn # noqa: F401
except ImportError:
raise RuntimeError(
'When using Qwen, you need to `pip install flash-attn` first, '
'otherwise calibration and quantification will not work '
'properly.')
layer_type = LAYER_TYPE_MAP[type(model).__name__]
norm_type = NORM_TYPE_MAP[type(model).__name__]
_prepare_for_calibrate(model, layer_type,
HEAD_NAME_MAP[type(model).__name__], device)
print('Loading calibrate dataset ...')
calib_loader, _ = get_calib_loaders(calib_dataset,
tokenizer,
nsamples=calib_samples,
seqlen=calib_seqlen)
# Initialize calibration context
calib_ctx = CalibrationContext(model,
tokenizer,
layer_type=layer_type,
norm_type=norm_type,
device=device)
with calib_ctx:
all_data = torch.cat([
data if isinstance(data, torch.Tensor) else data[0]
for data in calib_loader
]).to(device)
calib_ctx.calibrate(all_data)
# Create work directory if not exists
work_dir = Path(work_dir)
work_dir.mkdir(parents=True, exist_ok=True)
calib_ctx.export(work_dir)
return model, tokenizer, work_dir
NORM_FCS_MAP = {
'LlamaDecoderLayer': {
'input_layernorm':
['self_attn.k_proj', 'self_attn.q_proj', 'self_attn.v_proj'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
},
'InternLMDecoderLayer': {
'input_layernorm':
['self_attn.k_proj', 'self_attn.q_proj', 'self_attn.v_proj'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
},
'InternLM2DecoderLayer': {
'attention_norm': ['attention.wqkv'],
'ffn_norm': ['feed_forward.w1', 'feed_forward.w3']
},
'QWenBlock': {
'ln_1': ['attn.c_attn'],
'ln_2': ['mlp.w1', 'mlp.w2']
},
'DecoderLayer': {
'input_layernorm': ['self_attn.W_pack'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
}
}
FC_FCS_MAP = {
'LlamaDecoderLayer': {
'self_attn.v_proj': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
},
'InternLMDecoderLayer': {
'self_attn.v_proj': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
},
'InternLM2DecoderLayer': {
'feed_forward.w3': ['feed_forward.w2']
},
'QWenBlock': {
'attn.c_attn': ['attn.c_proj'],
'mlp.w1': ['mlp.c_proj']
},
'DecoderLayer': {
'self_attn.W_pack': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
}
}
def smooth_layers(layers,
fc2fcs,
norm2fcs,
a_scales,
group_size=-1,
device='cuda'):
"""Apply weight smoothing based on input scales."""
for l_name, layer in layers.items():
layer.to(device)
for ln_name, fc_names in norm2fcs.items():
a_name = [f'{l_name}.{n}' for n in fc_names][0]
ln = layer.get_submodule(ln_name)
fcs = [layer.get_submodule(n) for n in fc_names]
smooth_ln_fcs(ln, fcs, a_scales[a_name], group_size)
for f_name, fc_names in fc2fcs.items():
a_name = [f'{l_name}.{n}' for n in fc_names][0]
fc = layer.get_submodule(f_name)
fcs = [layer.get_submodule(n) for n in fc_names]
smooth_fc_fcs(fc, fcs, a_scales[a_name], group_size)
layer.to('cpu')
print(f'{l_name} smooth weight done.')
def smooth_quant(model: str,
work_dir: str = './work_dir',
calib_dataset: str = 'ptb',
calib_samples: int = 128,
calib_seqlen: int = 2048,
device: str = 'cuda'):
model, tokenizer, work_dir = calibrate(model, calib_dataset, calib_samples,
calib_seqlen, work_dir, device)
# calibrate function exports the calibration statistics
# (inputs, outputs, keys and values) to `work_dir`.
inp_stats = torch.load(work_dir / 'inputs_stats.pth')
act_scales = inp_stats['absmax']
model_type = type(model).__name__
if model_type not in LAYER_TYPE_MAP or model_type not in NORM_TYPE_MAP:
raise RuntimeError(
f'Currently, quantification and calibration of {model_type} are '
f'not supported. The supported model types are '
f"{', '.join(LAYER_TYPE_MAP.keys())}.")
if model_type == 'QWenLMHeadModel':
try:
import flash_attn # noqa: F401
except ImportError:
raise RuntimeError(
'When using Qwen, you need to `pip install flash-attn` first, '
'otherwise calibration and quantification will not work '
'properly.')
layer_type = LAYER_TYPE_MAP[type(model).__name__]
norm_type = NORM_TYPE_MAP[type(model).__name__]
fc2fcs = FC_FCS_MAP[layer_type]
norm2fcs = NORM_FCS_MAP[layer_type]
layers = collect_target_modules(model, layer_type)
fcs = {}
for l_name, layer in layers.items():
name2fc = collect_target_modules(layer, nn.Linear, prefix=l_name)
fcs.update(name2fc)
smooth_layers(layers, fc2fcs, norm2fcs, act_scales, -1, device)
rmsnorms = collect_target_modules(model, norm_type)
for name, linear in fcs.items():
linear.to(device)
q_linear = QLinear.from_float(linear)
parent_name, _, child_name = name.rpartition('.')
parent = model.get_submodule(parent_name)
setattr(parent, child_name, q_linear)
linear.to('cpu')
for name, norm in rmsnorms.items():
norm.to(device)
q_norm = QRMSNorm.from_float(norm)
parent_name, _, child_name = name.rpartition('.')
parent = model.get_submodule(parent_name)
setattr(parent, child_name, q_norm)
norm.to('cpu')
if hasattr(model.config, 'auto_map'):
model.config.auto_map.update(AUTO_MAP[type(model).__name__])
else:
model.config.auto_map = AUTO_MAP[type(model).__name__]
model.save_pretrained(work_dir,
max_shard_size='2GB',
safe_serialization=False)
tokenizer.save_pretrained(work_dir)
shutil.copy(MODEL_PATH_MAP[type(model).__name__], work_dir) | null |
8,226 | import os
from pathlib import Path
from typing import Union
import numpy as np
import torch
def _export_weight(into: str,
kv_qparams: np.array,
out_path: str,
tm_params: dict = None):
"""Save kv_qparams to disk or copy to tm_params."""
if tm_params is None:
print(into)
kv_qparams.tofile(out_path)
else:
name = os.path.basename(out_path)
src = torch.from_numpy(kv_qparams)
for tm_tensor in tm_params[name]:
tm_tensor.copy_from(src)
tm_params.pop(name)
The provided code snippet includes necessary dependencies for implementing the `_export_sym` function. Write a Python function `def _export_sym(key_stats: dict, value_stats: dict, bits: int, out_dir: Union[str, Path], tp: int = 1, tm_params: dict = None) -> None` to solve the following problem:
Export symmetric quantization parameters to specified directory.
Here is the function:
def _export_sym(key_stats: dict,
value_stats: dict,
bits: int,
out_dir: Union[str, Path],
tp: int = 1,
tm_params: dict = None) -> None:
"""Export symmetric quantization parameters to specified directory."""
keys_absmax = key_stats['absmax']
values_absmax = value_stats['absmax']
for layer_idx, name in enumerate(keys_absmax.keys()):
k_absmax = keys_absmax[name]
v_absmax = values_absmax[name]
heads, dims = k_absmax.shape
assert heads % tp == 0
mp_k_absmax = torch.chunk(k_absmax, tp)
mp_v_absmax = torch.chunk(v_absmax, tp)
for i in range(tp):
# quant: q = f / scale
# dequant: f = q * scale
k_s = mp_k_absmax[i].max() / (2**(bits - 1) - 1)
v_s = mp_v_absmax[i].max() / (2**(bits - 1) - 1)
kv_qparams = np.array([k_s, v_s], dtype=np.float32)
out_path = out_dir / f'layers.{layer_idx}.past_kv_scale.{i}.weight' # noqa: E501
info = f'Layer {layer_idx} MP {i} qparam: {k_s} \t{v_s}'
_export_weight(info, kv_qparams, out_path, tm_params) | Export symmetric quantization parameters to specified directory. |
8,227 | import os
from pathlib import Path
from typing import Union
import numpy as np
import torch
def _export_weight(into: str,
kv_qparams: np.array,
out_path: str,
tm_params: dict = None):
"""Save kv_qparams to disk or copy to tm_params."""
if tm_params is None:
print(into)
kv_qparams.tofile(out_path)
else:
name = os.path.basename(out_path)
src = torch.from_numpy(kv_qparams)
for tm_tensor in tm_params[name]:
tm_tensor.copy_from(src)
tm_params.pop(name)
The provided code snippet includes necessary dependencies for implementing the `_export_asym` function. Write a Python function `def _export_asym(key_stats: dict, value_stats: dict, bits: int, out_dir: Union[str, Path], tp: int = 1, tm_params: dict = None) -> None` to solve the following problem:
Export asymmetric quantization parameters to specified directory.
Here is the function:
def _export_asym(key_stats: dict,
value_stats: dict,
bits: int,
out_dir: Union[str, Path],
tp: int = 1,
tm_params: dict = None) -> None:
"""Export asymmetric quantization parameters to specified directory."""
keys_min = key_stats['min']
values_min = value_stats['min']
keys_max = key_stats['max']
values_max = value_stats['max']
for layer_idx, name in enumerate(keys_min.keys()):
k_max = keys_max[name]
v_max = values_max[name]
k_min = keys_min[name]
v_min = values_min[name]
heads, dims = k_min.shape
assert heads % tp == 0
tp_k_min = torch.chunk(k_min, tp)
tp_v_min = torch.chunk(v_min, tp)
tp_k_max = torch.chunk(k_max, tp)
tp_v_max = torch.chunk(v_max, tp)
for i in range(tp):
# zp = (min+max) / 2
# scale = (max-min) / 255
# quant: q = (f-zp) / scale
# dequant: f = q * scale + zp
k_min = tp_k_min[i].min()
v_min = tp_v_min[i].min()
k_max = tp_k_max[i].max()
v_max = tp_v_max[i].max()
k_scale = (k_max - k_min) / (2**bits - 1)
v_scale = (v_max - v_min) / (2**bits - 1)
k_zp = (k_max + k_min) / 2
v_zp = (v_max + v_min) / 2
kv_qparams = np.array([k_scale, k_zp, v_scale, v_zp],
dtype=np.float32)
out_path = out_dir / f'layers.{layer_idx}.past_kv_scale.{i}.weight'
info = f'Layer {layer_idx} MP {i} qparam: ' \
f'\t{k_scale} \t{k_zp} \t{v_scale} \t{v_zp}'
_export_weight(info, kv_qparams, out_path, tm_params) | Export asymmetric quantization parameters to specified directory. |
8,228 | from typing import List
import torch
NORM_FCS_MAP = {
'LlamaDecoderLayer': {
'input_layernorm':
['self_attn.k_proj', 'self_attn.q_proj', 'self_attn.v_proj'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
},
'InternLMDecoderLayer': {
'input_layernorm':
['self_attn.k_proj', 'self_attn.q_proj', 'self_attn.v_proj'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
},
'InternLM2DecoderLayer': {
'attention_norm': ['attention.wqkv'],
'ffn_norm': ['feed_forward.w1', 'feed_forward.w3']
},
'QWenBlock': {
'ln_1': ['attn.c_attn'],
'ln_2': ['mlp.w1', 'mlp.w2']
},
'DecoderLayer': {
'input_layernorm': ['self_attn.W_pack'],
'post_attention_layernorm': ['mlp.gate_proj', 'mlp.up_proj']
}
}
FC_FCS_MAP = {
'LlamaDecoderLayer': {
'self_attn.v_proj': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
},
'InternLMDecoderLayer': {
'self_attn.v_proj': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
},
'InternLM2DecoderLayer': {
'feed_forward.w3': ['feed_forward.w2']
},
'QWenBlock': {
'attn.c_attn': ['attn.c_proj'],
'mlp.w1': ['mlp.c_proj']
},
'DecoderLayer': {
'self_attn.W_pack': ['self_attn.o_proj'],
'mlp.up_proj': ['mlp.down_proj']
}
}
The provided code snippet includes necessary dependencies for implementing the `check_awq_supported` function. Write a Python function `def check_awq_supported(layer_type)` to solve the following problem:
Check if the smooth function is supported by inspecting layer type.
Here is the function:
def check_awq_supported(layer_type):
"""Check if the smooth function is supported by inspecting layer type."""
norm_fcs_found = False
fc_fcs_found = False
if isinstance(layer_type, str):
if layer_type in NORM_FCS_MAP:
norm_fcs_found = True
if layer_type in FC_FCS_MAP:
fc_fcs_found = True
elif isinstance(layer_type, type):
if layer_type.__name__ in NORM_FCS_MAP:
norm_fcs_found = True
if layer_type.__name__ in FC_FCS_MAP:
fc_fcs_found = True
else:
raise NotImplementedError
if not norm_fcs_found:
raise NotImplementedError
if not fc_fcs_found:
raise NotImplementedError | Check if the smooth function is supported by inspecting layer type. |
8,229 | import json
import os
import shutil
from huggingface_hub import snapshot_download
from lmdeploy.turbomind.utils import get_hf_config_content
def get_hf_config_content(pretrained_model_name_or_path, **kwargs) -> dict:
"""Get config content of a hf model."""
config_path = get_hf_config_path(pretrained_model_name_or_path, **kwargs)
with open(config_path, 'r') as f:
config = json.load(f)
return config
MODELS = Registry('model', locations=['lmdeploy.model'])
])
def get_model_format(model_name: str, model_format: str):
"""Get model format if not given or equal awq."""
# get model name prefix
if model_name.find('-') != -1:
model_name = model_name[:model_name.find('-')]
# rules:
# 1) llama -> match special -> hf (if not matched)
# 2) append awq (if model_format is awq)
inferred_model_format = model_format
if model_format in [None, 'hf']:
inferred_model_format = special_input_model_map.get(model_name, 'hf')
elif model_format == 'awq':
inferred_model_format = special_input_model_map.get(model_name,
'hf') + '-awq'
return inferred_model_format
INPUT_MODELS = Registry(
'source model', locations=['lmdeploy.turbomind.deploy.source_model.base'])
OUTPUT_MODELS = Registry(
'target model', locations=['lmdeploy.turbomind.deploy.target_model.base'])
class TurbomindModelConfig:
"""Config for turbomind model."""
model_name: str = None
tensor_para_size: int = None
head_num: int = None
kv_head_num: int = None
vocab_size: int = None
num_layer: int = None
inter_size: int = None
norm_eps: float = None
attn_bias: int = None
start_id: int = None
end_id: int = None
session_len: int = None
weight_type: str = 'fp16'
rotary_embedding: int = 128
rope_theta: float = 10000.0
size_per_head: int = 128
group_size: int = 0
max_batch_size: int = 64
max_context_token_num: int = 1
step_length: int = 1
cache_max_entry_count: float = 0.8
cache_block_seq_len: int = 128
cache_chunk_size: int = -1
num_tokens_per_iter: int = 0
max_prefill_iters: int = 1
extra_tokens_per_iter: int = 0
use_context_fmha: int = 1
quant_policy: int = 0
max_position_embeddings: int = 0
rope_scaling_factor: float = 0.0
use_logn_attn: int = 0
def from_dict(cls, env, allow_none=False):
"""Construct from dict."""
params = inspect.signature(cls).parameters
used = {k: v for k, v in env.items() if k in params and v is not None}
if not allow_none:
return cls(**used)
else:
default = {
k: None
for k in params.keys() if params[k].default is inspect._empty
}
default.update(used)
return cls(**default)
def from_engine_config(cls, config: TurbomindEngineConfig):
env = copy.deepcopy(config.__dict__)
env['tensor_para_size'] = env['tp']
ret = TurbomindModelConfig.from_dict(env, allow_none=True)
ret.rotary_embedding = ret.size_per_head
# workround to support `max_prefill_token_num` in turbomind engine
if config.max_prefill_token_num is not None and \
config.session_len is not None:
ret.num_tokens_per_iter = config.max_prefill_token_num
ret.max_prefill_iters = (config.session_len +
config.max_prefill_token_num -
1) // config.max_prefill_token_num
return ret
def toini(self):
config = copy.deepcopy(self.__dict__)
parser = ConfigParser()
parser['llama'] = config
with io.StringIO() as ss:
parser.write(ss)
ss.seek(0)
ini = ss.read()
return ini
def __str__(self):
return json.dumps(self.__dict__, indent=2)
def valid(self):
"""Check if cfg is valid."""
for _, v in self.__dict__.items():
if v is None:
return False
return True
__version__ = '0.2.5'
The provided code snippet includes necessary dependencies for implementing the `export_turbomind_config` function. Write a Python function `def export_turbomind_config(model_name: str, model_path: str, work_dir: str, model_format: str = 'awq', group_size: int = 128, tp: int = 1)` to solve the following problem:
Export hf lmdeploy model and config.json.
Here is the function:
def export_turbomind_config(model_name: str,
model_path: str,
work_dir: str,
model_format: str = 'awq',
group_size: int = 128,
tp: int = 1):
"""Export hf lmdeploy model and config.json."""
import lmdeploy
from lmdeploy.model import MODELS
from lmdeploy.turbomind.deploy.converter import get_model_format
from lmdeploy.turbomind.deploy.source_model.base import INPUT_MODELS
from lmdeploy.turbomind.deploy.target_model.base import (
OUTPUT_MODELS, TurbomindModelConfig)
assert model_name in MODELS.module_dict.keys(), \
f"'{model_name}' is not supported. " \
f'The supported models are: {MODELS.module_dict.keys()}'
if not os.path.exists(model_path):
model_path = snapshot_download(model_path, local_files_only=True)
lmdeploy_dir = os.path.split(lmdeploy.__file__)[0]
hf_repo = os.path.join(lmdeploy_dir, 'turbomind', 'hf_repo')
files = os.listdir(hf_repo)
for file in files:
src = os.path.join(hf_repo, file)
dst = os.path.join(work_dir, file)
shutil.copy(src, dst)
cfg = TurbomindModelConfig.from_dict({}, allow_none=True)
cfg.model_name = model_name
cfg.tensor_para_size = tp
cfg.rotary_embedding = cfg.size_per_head
cfg.group_size = group_size
cfg.weight_type = 'int4'
output_format = 'w4'
inferred_model_format = get_model_format(model_name, model_format)
input_model = INPUT_MODELS.get(inferred_model_format)(
model_path=model_path, tokenizer_path=work_dir, ckpt_path=work_dir)
output_model = OUTPUT_MODELS.get(output_format)(input_model=input_model,
cfg=cfg,
to_file=False,
out_dir='')
old_data = get_hf_config_content(model_path)
config = output_model.cfg.__dict__
config_file = os.path.join(work_dir, 'config.json')
with open(config_file) as f:
data = json.load(f)
for k, v in old_data.items():
if k in data:
data[f'__{k}'] = v
else:
data[k] = v
data['turbomind'] = config
from lmdeploy.version import __version__
data['lmdeploy_version'] = __version__
with open(config_file, 'w') as f:
f.write(json.dumps(data, indent=2) + '\n') | Export hf lmdeploy model and config.json. |
8,230 | import numpy as np
import torch
def set_seed(seed):
np.random.seed(seed)
torch.random.manual_seed(seed) | null |
8,231 | import numpy as np
import torch
def get_wikitext2(tokenizer, nsamples, seed, seqlen):
"""Load Wikitext-2 train and test datasets and tokenize.
Args:
tokenizer: Tokenizer to encode text.
nsamples: Number of samples to take from train set.
seed: Random seed for sampling.
seqlen: Maximum sequence length.
Returns:
train_loader: List of sampled and tokenized training examples.
test_enc: Full tokenized Wikitext-2 test set.
"""
from datasets import load_dataset
traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')
testdata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test')
trainenc = tokenizer('\n\n'.join(traindata['text']), return_tensors='pt')
testenc = tokenizer('\n\n'.join(testdata['text']), return_tensors='pt')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_ptb(tokenizer, nsamples, seed, seqlen):
"""Load PTB train and validation datasets and tokenize.
Args:
tokenizer: Tokenizer to encode text.
nsamples: Number of samples to take from train set.
seed: Random seed for sampling.
seqlen: Maximum sequence length.
Returns:
train_loader: List of sampled and tokenized training examples.
test_enc: Full tokenized PTB validation set.
"""
from datasets import load_dataset
traindata = load_dataset('ptb_text_only', 'penn_treebank', split='train')
valdata = load_dataset('ptb_text_only',
'penn_treebank',
split='validation')
trainenc = tokenizer('\n\n'.join(traindata['sentence']),
return_tensors='pt')
testenc = tokenizer('\n\n'.join(valdata['sentence']), return_tensors='pt')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4(tokenizer, nsamples, seed, seqlen):
"""Load C4 train and validation datasets and tokenize.
Args:
tokenizer: Tokenizer to encode text.
nsamples: Number of samples to take from train set.
seed: Random seed for sampling.
seqlen: Maximum sequence length.
Returns:
train_loader: List of sampled and tokenized training examples.
test_enc: Full tokenized PTB validation set.
"""
from datasets import load_dataset
traindata = load_dataset(
'allenai/c4',
'allenai--c4',
data_files={'train': 'en/c4-train.00000-of-01024.json.gz'},
split='train',
use_auth_token=False)
valdata = load_dataset(
'allenai/c4',
'allenai--c4',
data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'},
split='validation',
use_auth_token=False)
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]['text'], return_tensors='pt')
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
import random
random.seed(0)
valenc = []
for _ in range(256):
while True:
i = random.randint(0, len(valdata) - 1)
tmp = tokenizer(valdata[i]['text'], return_tensors='pt')
if tmp.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, tmp.input_ids.shape[1] - seqlen)
j = i + seqlen
valenc.append(tmp.input_ids[:, i:j])
valenc = torch.hstack(valenc)
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_ptb_new(tokenizer, nsamples, seed, seqlen):
"""Load PTB New train and validation datasets and tokenize.
Args:
tokenizer: Tokenizer to encode text.
nsamples: Number of samples to take from train set.
seed: Random seed for sampling.
seqlen: Maximum sequence length.
Returns:
train_loader: List of sampled and tokenized training examples.
test_enc: Full tokenized PTB validation set.
"""
from datasets import load_dataset
traindata = load_dataset('ptb_text_only', 'penn_treebank', split='train')
testdata = load_dataset('ptb_text_only', 'penn_treebank', split='test')
trainenc = tokenizer(' '.join(traindata['sentence']), return_tensors='pt')
testenc = tokenizer(' '.join(testdata['sentence']), return_tensors='pt')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4_new(tokenizer, nsamples, seed, seqlen):
"""Load C4 New train and validation datasets and tokenize.
Args:
tokenizer: Tokenizer to encode text.
nsamples: Number of samples to take from train set.
seed: Random seed for sampling.
seqlen: Maximum sequence length.
Returns:
train_loader: List of sampled and tokenized training examples.
test_enc: Full tokenized PTB validation set.
"""
from datasets import load_dataset
traindata = load_dataset(
'allenai/c4',
'allenai--c4',
data_files={'train': 'en/c4-train.00000-of-01024.json.gz'},
split='train')
valdata = load_dataset(
'allenai/c4',
'allenai--c4',
data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'},
split='validation')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]['text'], return_tensors='pt')
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
valenc = tokenizer(' '.join(valdata[:1100]['text']), return_tensors='pt')
valenc = valenc.input_ids[:, :(256 * seqlen)]
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_pileval(tokenizer, nsamples, seed, seqlen=512):
"""Load pileval train dataset and tokenize.
Args:
tokenizer: Tokenizer to encode text.
nsamples: Number of samples to take from train set.
seed: Random seed for sampling.
seqlen: Maximum sequence length.
Returns:
train_loader: List of sampled and tokenized training examples.
test_enc: Full tokenized PTB validation set.
"""
from datasets import load_dataset
from datasets.builder import DatasetGenerationError
try:
dataset = load_dataset(
'json',
data_files='https://the-eye.eu/public/AI/pile/val.jsonl.zst',
split='train')
except DatasetGenerationError:
raise InterruptedError('There have been some issues when generating '
'the dataset, you could try to download it '
'locally first, and replace the `data_files`'
'with local addresses or use other datasets '
'(c4, wiki, ptb).')
dataset = dataset.shuffle(seed=seed)
samples = []
n_run = 0
for data in dataset:
line = data['text']
line = line.strip()
line_encoded = tokenizer.encode(line)
if len(line_encoded) > 512:
continue
sample = torch.tensor([line_encoded])
if sample.numel() == 0:
continue
samples.append(sample)
n_run += 1
if n_run == nsamples:
break
# now concatenate all samples and split according to block size
cat_samples = torch.cat(samples, dim=1)
n_split = cat_samples.shape[1] // seqlen
print(f' * Split into {n_split} blocks')
return [
cat_samples[:, i * seqlen:(i + 1) * seqlen] for i in range(n_split)
], None
The provided code snippet includes necessary dependencies for implementing the `get_calib_loaders` function. Write a Python function `def get_calib_loaders(name, tokenizer, nsamples=128, seed=0, seqlen=2048)` to solve the following problem:
Get calibration data loaders for a dataset. Args: name: Dataset name ('wikitext2', 'ptb', 'c4', etc). tokenizer: Tokenizer to encode text. nsamples: Number of samples to take from train set. seed: Random seed for sampling. seqlen: Maximum sequence length. Returns: train_loader: List of sampled and tokenized training examples. test_data: Full tokenized validation set.
Here is the function:
def get_calib_loaders(name, tokenizer, nsamples=128, seed=0, seqlen=2048):
"""Get calibration data loaders for a dataset.
Args:
name: Dataset name ('wikitext2', 'ptb', 'c4', etc).
tokenizer: Tokenizer to encode text.
nsamples: Number of samples to take from train set.
seed: Random seed for sampling.
seqlen: Maximum sequence length.
Returns:
train_loader: List of sampled and tokenized training examples.
test_data: Full tokenized validation set.
"""
if 'wikitext2' in name:
return get_wikitext2(tokenizer, nsamples, seed, seqlen)
if 'ptb' in name:
if 'new' in name:
return get_ptb_new(tokenizer, nsamples, seed, seqlen)
return get_ptb(tokenizer, nsamples, seed, seqlen)
if 'c4' in name:
if 'new' in name:
return get_c4_new(tokenizer, nsamples, seed, seqlen)
return get_c4(tokenizer, nsamples, seed, seqlen)
if 'pileval' in name:
return get_pileval(tokenizer, nsamples, seed, seqlen) | Get calibration data loaders for a dataset. Args: name: Dataset name ('wikitext2', 'ptb', 'c4', etc). tokenizer: Tokenizer to encode text. nsamples: Number of samples to take from train set. seed: Random seed for sampling. seqlen: Maximum sequence length. Returns: train_loader: List of sampled and tokenized training examples. test_data: Full tokenized validation set. |
8,232 | from typing import Dict, List, Tuple, Union
from torch import nn
def collect_target_modules(model: nn.Module,
target: Union[str, type],
skip_names: List[str] = [],
prefix: str = '') -> Dict[str, nn.Module]:
"""Collects the specific target modules from the model.
Args:
model : The PyTorch module from which to collect the target modules.
target : The specific target to be collected. It can be a class of a
module or the name of a module.
skip_names : List of names of modules to be skipped during collection.
prefix : A string to be added as a prefix to the module names.
Returns:
A dictionary mapping from module names to module instances.
"""
if not isinstance(target, (type, str)):
raise TypeError('Target must be a string (name of the module) '
'or a type (class of the module)')
def _is_target(n, m):
if isinstance(target, str):
return target == type(m).__name__ and n not in skip_names
return isinstance(m, target) and n not in skip_names
name2mod = {}
for name, mod in model.named_modules():
m_name = f'{prefix}.{name}' if prefix else name
if _is_target(name, mod):
name2mod[m_name] = mod
return name2mod
The provided code snippet includes necessary dependencies for implementing the `collect_target_weights` function. Write a Python function `def collect_target_weights(model: nn.Module, target: Union[str, type], skip_names: List[str]) -> Dict[str, nn.Module]` to solve the following problem:
Collects weights of the specific target modules from the model. Args: model : The PyTorch module from which to collect the weights of target modules. target : The specific target whose weights to be collected. It can be a class of a module or the name of a module. skip_names : Names of modules to be skipped during weight collection. Returns: A dictionary mapping from module instances to their corresponding weights.
Here is the function:
def collect_target_weights(model: nn.Module, target: Union[str, type],
skip_names: List[str]) -> Dict[str, nn.Module]:
"""Collects weights of the specific target modules from the model.
Args:
model : The PyTorch module from which to collect the weights of
target modules.
target : The specific target whose weights to be collected. It can be
a class of a module or the name of a module.
skip_names : Names of modules to be skipped during weight collection.
Returns:
A dictionary mapping from module instances to their
corresponding weights.
"""
named_modules = collect_target_modules(model, target, skip_names)
mod2weight = {}
for _, mod in named_modules.items():
assert hasattr(
mod, 'weight'), "The module does not have a 'weight' attribute"
mod2weight[mod] = mod.weight
return mod2weight | Collects weights of the specific target modules from the model. Args: model : The PyTorch module from which to collect the weights of target modules. target : The specific target whose weights to be collected. It can be a class of a module or the name of a module. skip_names : Names of modules to be skipped during weight collection. Returns: A dictionary mapping from module instances to their corresponding weights. |
8,233 | from typing import Dict, List, Tuple, Union
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `bimap_name_mod` function. Write a Python function `def bimap_name_mod( name2mod_mappings: List[Dict[str, nn.Module]] ) -> Tuple[Dict[str, nn.Module], Dict[nn.Module, str]]` to solve the following problem:
Generates bidirectional maps from module names to module instances and vice versa. Args: name2mod_mappings : List of dictionaries each mapping from module names to module instances. Returns: Two dictionaries providing bidirectional mappings between module names and module instances.
Here is the function:
def bimap_name_mod(
name2mod_mappings: List[Dict[str, nn.Module]]
) -> Tuple[Dict[str, nn.Module], Dict[nn.Module, str]]:
"""Generates bidirectional maps from module names to module instances and
vice versa.
Args:
name2mod_mappings : List of dictionaries each mapping from module
names to module instances.
Returns:
Two dictionaries providing bidirectional mappings between module
names and module instances.
"""
name2mod = {}
mod2name = {}
for mapping in name2mod_mappings:
mod2name.update({v: k for k, v in mapping.items()})
name2mod.update(mapping)
return name2mod, mod2name | Generates bidirectional maps from module names to module instances and vice versa. Args: name2mod_mappings : List of dictionaries each mapping from module names to module instances. Returns: Two dictionaries providing bidirectional mappings between module names and module instances. |
8,234 | from typing import NamedTuple, Optional
import torch
class QParams(NamedTuple):
"""A class to hold the quantization parameters."""
scales: torch.Tensor
zero_points: Optional[torch.Tensor]
The provided code snippet includes necessary dependencies for implementing the `cal_qparams_per_channel_absmax` function. Write a Python function `def cal_qparams_per_channel_absmax(w: torch.Tensor, n_bits: int, return_stats: bool = False) -> QParams` to solve the following problem:
Calculate quantization parameters for each channel using absolute max value.
Here is the function:
def cal_qparams_per_channel_absmax(w: torch.Tensor,
n_bits: int,
return_stats: bool = False) -> QParams:
"""Calculate quantization parameters for each channel using absolute max
value."""
float_w = w.float()
absmax = float_w.abs().max(dim=-1, keepdim=True)[0]
q_max = 2**(n_bits - 1) - 1
scales = absmax.div(q_max)
if return_stats:
return QParams(scales=scales, zero_points=None), absmax
else:
return QParams(scales=scales, zero_points=None) | Calculate quantization parameters for each channel using absolute max value. |
8,235 | from typing import NamedTuple, Optional
import torch
class QParams(NamedTuple):
"""A class to hold the quantization parameters."""
scales: torch.Tensor
zero_points: Optional[torch.Tensor]
def precise_round(x):
return x.sign() * (x.abs() + 0.5).floor()
The provided code snippet includes necessary dependencies for implementing the `cal_qparams_per_channel_minmax` function. Write a Python function `def cal_qparams_per_channel_minmax(w: torch.Tensor, n_bits: int, return_stats: bool = False) -> QParams` to solve the following problem:
Calculate quantization parameters for each channel using min and max values.
Here is the function:
def cal_qparams_per_channel_minmax(w: torch.Tensor,
n_bits: int,
return_stats: bool = False) -> QParams:
"""Calculate quantization parameters for each channel using min and max
values."""
float_w = w.float()
w_min = float_w.min(dim=-1, keepdim=True)[0]
w_max = float_w.max(dim=-1, keepdim=True)[0]
q_max = 2**n_bits - 1
scales = (w_max - w_min)
scales = scales.div_(q_max)
zero_points = precise_round(-w_min / scales)
if return_stats:
return QParams(scales=scales, zero_points=zero_points), (w_min, w_max)
else:
return QParams(scales=scales, zero_points=zero_points) | Calculate quantization parameters for each channel using min and max values. |
8,236 | from typing import NamedTuple, Optional
import torch
class QParams(NamedTuple):
"""A class to hold the quantization parameters."""
scales: torch.Tensor
zero_points: Optional[torch.Tensor]
The provided code snippet includes necessary dependencies for implementing the `cal_qparams_per_group_absmax` function. Write a Python function `def cal_qparams_per_group_absmax(w: torch.Tensor, n_bits: int, group_size: int, return_stats: bool = False) -> QParams` to solve the following problem:
Calculate quantization parameters for each group using absolute max value.
Here is the function:
def cal_qparams_per_group_absmax(w: torch.Tensor,
n_bits: int,
group_size: int,
return_stats: bool = False) -> QParams:
"""Calculate quantization parameters for each group using absolute max
value."""
outc, inc = w.shape
assert inc >= group_size, \
'Input channels should be greater than or equal to group_size.'
assert inc % group_size == 0, \
'Input channels should be divisible by group_size.'
float_w = w.float()
absmax = float_w.abs().reshape(outc, -1, group_size).max(dim=-1,
keepdim=True)[0]
q_max = 2**(n_bits - 1) - 1
scales = absmax.div(q_max)
if return_stats:
return QParams(scales=scales, zero_points=None), absmax
else:
return QParams(scales=scales, zero_points=None) | Calculate quantization parameters for each group using absolute max value. |
8,237 | from typing import NamedTuple, Optional
import torch
class QParams(NamedTuple):
"""A class to hold the quantization parameters."""
scales: torch.Tensor
zero_points: Optional[torch.Tensor]
def precise_round(x):
return x.sign() * (x.abs() + 0.5).floor()
The provided code snippet includes necessary dependencies for implementing the `cal_qparams_per_group_minmax` function. Write a Python function `def cal_qparams_per_group_minmax(w: torch.Tensor, n_bits: int, group_size: int, return_stats: bool = False) -> QParams` to solve the following problem:
Calculate quantization parameters for each group using min and max values.
Here is the function:
def cal_qparams_per_group_minmax(w: torch.Tensor,
n_bits: int,
group_size: int,
return_stats: bool = False) -> QParams:
"""Calculate quantization parameters for each group using min and max
values."""
outc, inc = w.shape
assert inc >= group_size, \
'Input channels should be greater than or equal to group_size.'
assert inc % group_size == 0, \
'Input channels should be divisible by group_size.'
float_w = w.float()
w_group_wise = float_w.reshape(outc, -1, group_size)
w_min = w_group_wise.min(dim=-1, keepdim=True)[0]
w_max = w_group_wise.max(dim=-1, keepdim=True)[0]
q_max = 2**n_bits - 1
scales = (w_max - w_min)
scales = scales.div_(q_max)
zero_points = precise_round(-w_min / scales)
if return_stats:
return QParams(scales=scales, zero_points=zero_points), (w_min, w_max)
else:
return QParams(scales=scales, zero_points=zero_points) | Calculate quantization parameters for each group using min and max values. |
8,238 | from typing import NamedTuple, Optional
import torch
class QParams(NamedTuple):
"""A class to hold the quantization parameters."""
scales: torch.Tensor
zero_points: Optional[torch.Tensor]
def precise_round(x):
return x.sign() * (x.abs() + 0.5).floor()
The provided code snippet includes necessary dependencies for implementing the `cal_qparams_per_tensor_minmax` function. Write a Python function `def cal_qparams_per_tensor_minmax(w: torch.Tensor, n_bits: int, return_stats: bool = False) -> QParams` to solve the following problem:
Calculate quantization parameters for the entire tensor using min and max values.
Here is the function:
def cal_qparams_per_tensor_minmax(w: torch.Tensor,
n_bits: int,
return_stats: bool = False) -> QParams:
"""Calculate quantization parameters for the entire tensor using min and
max values."""
float_w = w.float()
w_min = float_w.min()
w_max = float_w.max()
q_max = 2**n_bits - 1
scales = (w_max - w_min)
scales = scales.clamp_(min=1e-5).div_(q_max)
zero_points = precise_round(-w_min / scales)
if return_stats:
return QParams(scales=scales, zero_points=zero_points), (w_min, w_max)
else:
return QParams(scales=scales, zero_points=zero_points) | Calculate quantization parameters for the entire tensor using min and max values. |
8,239 | from typing import NamedTuple, Optional
import torch
class QParams(NamedTuple):
"""A class to hold the quantization parameters."""
scales: torch.Tensor
zero_points: Optional[torch.Tensor]
The provided code snippet includes necessary dependencies for implementing the `cal_qparams_per_tensor_absmax` function. Write a Python function `def cal_qparams_per_tensor_absmax(w: torch.Tensor, n_bits: int, return_stats: bool = False) -> QParams` to solve the following problem:
Calculate quantization parameters for the entire tensor using absolute max value.
Here is the function:
def cal_qparams_per_tensor_absmax(w: torch.Tensor,
n_bits: int,
return_stats: bool = False) -> QParams:
"""Calculate quantization parameters for the entire tensor using absolute
max value."""
float_w = w.float()
absmax = float_w.abs().max()
q_max = 2**(n_bits - 1) - 1
scales = absmax.div(q_max)
if return_stats:
return QParams(scales=scales, zero_points=None), absmax
else:
return QParams(scales=scales, zero_points=None) | Calculate quantization parameters for the entire tensor using absolute max value. |
8,240 | import torch
from transformers import AutoConfig, AutoModelForCausalLM
from lmdeploy.pytorch.accel import LoadNoInit
class LoadNoInit:
"""Initialize model without parameter initialization."""
def __init__(self):
self.constant_ = torch.nn.init.constant_
self.zeros_ = torch.nn.init.zeros_
self.ones_ = torch.nn.init.ones_
self.uniform_ = torch.nn.init.uniform_
self.normal_ = torch.nn.init.normal_
self.kaiming_uniform_ = torch.nn.init.kaiming_uniform_
self.kaiming_normal_ = torch.nn.init.kaiming_normal_
self.tensor_normal_ = torch.Tensor.normal_
def __enter__(self, *args, **kwargs):
"""Replace initializers with no-op."""
torch.nn.init.constant_ = lambda *args, **kwargs: None
torch.nn.init.zeros_ = lambda *args, **kwargs: None
torch.nn.init.ones_ = lambda *args, **kwargs: None
torch.nn.init.uniform_ = lambda *args, **kwargs: None
torch.nn.init.normal_ = lambda *args, **kwargs: None
torch.nn.init.kaiming_uniform_ = lambda *args, **kwargs: None
torch.nn.init.kaiming_normal_ = lambda *args, **kwargs: None
torch.Tensor.normal_ = lambda *args, **kwargs: None
def __exit__(self, *args, **kwargs):
"""Recover."""
torch.nn.init.constant_ = self.constant_
torch.nn.init.zeros_ = self.zeros_
torch.nn.init.ones_ = self.ones_
torch.nn.init.uniform_ = self.uniform_
torch.nn.init.normal_ = self.normal_
torch.nn.init.kaiming_uniform_ = self.kaiming_uniform_
torch.nn.init.kaiming_normal_ = self.kaiming_normal_
torch.Tensor.normal_ = self.tensor_normal_
def load_hf_from_pretrained(pretrained_model_name_or_path,
dtype=torch.float16,
**kwargs):
if dtype == torch.bfloat16 and not torch.cuda.is_bf16_supported():
raise RuntimeError('Your device does not supports bf16(bfloat16), '
'please change to fp16(float16)')
kwargs.pop('config', None)
hf_config = AutoConfig.from_pretrained(pretrained_model_name_or_path,
torch_dtype=dtype,
trust_remote_code=True)
# HACK hard code for qwen, other configs do not have the `fp16` attribute.
if dtype == torch.float16:
hf_config.fp16 = True
elif dtype == torch.bfloat16:
hf_config.bf16 = True
with LoadNoInit():
# Load model
model = AutoModelForCausalLM.from_pretrained(
pretrained_model_name_or_path, config=hf_config, **kwargs)
model.config.use_cache = False
return model | null |
8,241 | from typing import Any, Dict, List, Tuple, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `split_decoder_layer_inputs` function. Write a Python function `def split_decoder_layer_inputs( *args: Union[torch.Tensor, Any], **kwargs: Union[torch.Tensor, Any] ) -> Tuple[List[List[Any]], List[Dict[str, Any]]]` to solve the following problem:
This function splits batched decoder layer inputs into individual elements. Args: *args (Union[torch.Tensor, Any]): Positional arguments which could be a mix of tensors and other types. **kwargs (Union[torch.Tensor, Any]): Keyword arguments which could be a mix of tensors and other types. Returns: Tuple[List[List[Any]], List[Dict[str, Any]]]: A tuple containing two lists, one for positional arguments, one for keyword arguments. Each list contains individual elements from the batch.
Here is the function:
def split_decoder_layer_inputs(
*args: Union[torch.Tensor, Any], **kwargs: Union[torch.Tensor, Any]
) -> Tuple[List[List[Any]], List[Dict[str, Any]]]:
"""This function splits batched decoder layer inputs into individual
elements.
Args:
*args (Union[torch.Tensor, Any]): Positional arguments which could
be a mix of tensors and other types.
**kwargs (Union[torch.Tensor, Any]): Keyword arguments which could
be a mix of tensors and other types.
Returns:
Tuple[List[List[Any]], List[Dict[str, Any]]]: A tuple containing two
lists, one for positional arguments, one for keyword arguments.
Each list contains individual elements from the batch.
"""
if not isinstance(args[0], torch.Tensor):
raise ValueError('The first argument must be a Tensor')
bs = args[0].size(0)
batch_args = []
batch_kwargs = []
for i in range(bs):
new_args = []
# Iterate over each argument. If it's a torch.Tensor and its first
# dimension equals the batch size, then get the value corresponding
# to the current index, else directly add the whole value.
for val in args:
if isinstance(val, torch.Tensor) and val.size(0) == bs:
new_args.append(val[i:i + 1])
else:
new_args.append(val)
new_kwargs = {}
# Execute the same operation for the keyword arguments.
for name, val in kwargs.items():
if isinstance(val, torch.Tensor) and val.size(0) == bs:
new_kwargs[name] = val[i:i + 1]
else:
new_kwargs[name] = val
batch_args.append(new_args)
batch_kwargs.append(new_kwargs)
return batch_args, batch_kwargs | This function splits batched decoder layer inputs into individual elements. Args: *args (Union[torch.Tensor, Any]): Positional arguments which could be a mix of tensors and other types. **kwargs (Union[torch.Tensor, Any]): Keyword arguments which could be a mix of tensors and other types. Returns: Tuple[List[List[Any]], List[Dict[str, Any]]]: A tuple containing two lists, one for positional arguments, one for keyword arguments. Each list contains individual elements from the batch. |
8,242 | from typing import Any, Dict, List, Tuple, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `concat_decoder_layer_outputs` function. Write a Python function `def concat_decoder_layer_outputs( batch_outputs: List[Tuple[Any]]) -> Tuple[Any]` to solve the following problem:
This function concatenates individual decoder layer outputs into a batched output. Args: batch_outputs (List[Tuple[Any]]): A list of tuples, where each tuple represents the output from an individual element in the batch. Returns: Tuple[Any]: A tuple representing the batched output.
Here is the function:
def concat_decoder_layer_outputs(
batch_outputs: List[Tuple[Any]]) -> Tuple[Any]:
"""This function concatenates individual decoder layer outputs into a
batched output.
Args:
batch_outputs (List[Tuple[Any]]): A list of tuples, where each tuple
represents the output from an individual element in the batch.
Returns:
Tuple[Any]: A tuple representing the batched output.
"""
num_returns = len(batch_outputs[0])
def is_past_key_value(data: Any) -> bool:
"""Check whether data is a past key-value pair.
Args:
data (Any): The data to check.
Returns:
bool: True if data is a past key-value pair, False otherwise.
"""
flag = isinstance(data, tuple)
flag = flag and len(data) == 2
flag = flag and isinstance(data[0], torch.Tensor)
flag = flag and isinstance(data[1], torch.Tensor)
return flag
new_outputs = []
# Iterate over all types of return values.
for i in range(num_returns):
# Check if the current element is a past key-value pair.
flag = is_past_key_value(batch_outputs[0][i])
if flag:
# Concatenate the keys and values separately.
key = torch.cat([out[i][0] for out in batch_outputs])
value = torch.cat([out[i][1] for out in batch_outputs])
out_i = (key, value)
else:
# If it's not a past key-value pair, concatenate directly.
out_i = torch.cat([out[i] for out in batch_outputs])
new_outputs.append(out_i)
return tuple(new_outputs) | This function concatenates individual decoder layer outputs into a batched output. Args: batch_outputs (List[Tuple[Any]]): A list of tuples, where each tuple represents the output from an individual element in the batch. Returns: Tuple[Any]: A tuple representing the batched output. |
8,243 | import inspect
import re
import warnings
from contextlib import contextmanager
from functools import partial
from typing import List
import torch
from torch import nn
from lmdeploy.lite.defaults import KV_CACHE_SIGNATURE, OFFLOAD_MOD
def offload_kv_cache(model: nn.Module, device: str = 'cuda') -> None:
"""Offloads kv cache to given device during forward pass.
Args:
model (nn.Module): Model for inference
device (str): Device to offload to
Yields:
None
"""
modules = find_modules_by_return_value(model, KV_CACHE_SIGNATURE)
original_forwards = {mod: mod.forward for mod in modules}
input_idxs = {mod: find_kv_cache_idx(mod) for mod in modules}
output_idxs = {
mod: extract_return_values(mod).index(KV_CACHE_SIGNATURE)
for mod in modules
}
def wrap_forward(module, *args, **kwargs):
idx = input_idxs[module]
if idx >= len(args):
# kv cache in kwargs
if KV_CACHE_SIGNATURE in kwargs:
if kwargs[KV_CACHE_SIGNATURE]:
kwargs[KV_CACHE_SIGNATURE] = kwargs[KV_CACHE_SIGNATURE].to(
device)
else:
raise ValueError(f'No kv cache input found at index {idx}')
else:
# kv cache in args
args = list(args)
args[idx] = args[idx].to(device)
args = tuple(args)
result = original_forwards[module](*args, **kwargs)
result = list(result)
idx = output_idxs[module]
# Move kv cache outputs back to CPU
key = result[idx][0].to('cpu')
value = result[idx][1].to('cpu')
torch.cuda.empty_cache()
result[idx] = (key, value)
result = tuple(result)
return result
try:
for module in modules:
original_forwards[module] = module.forward
module.forward = partial(wrap_forward, module)
yield
finally:
for module in modules:
module.forward = original_forwards[module]
del original_forwards[module]
def offload_weights(model: nn.Module, device: str = 'cuda') -> None:
"""Offloads specified modules to given device during forward pass.
Args:
model (nn.Module): Model for inference
device (str): Device to offload to
Yields:
None
"""
target_modules = OFFLOAD_MOD
def before_forward(module: nn.Module, inp: torch.Tensor):
module.to(device)
def after_forward(module: nn.Module, inp: torch.Tensor, out: torch.Tensor):
module.to('cpu')
torch.cuda.empty_cache()
def _to_device(m, spec_modules, dev):
if len(spec_modules) == 0 or len(list(m.children())) == 0:
m.to(dev)
return
for child in m.children():
if isinstance(child, spec_modules):
child.to('cpu')
else:
_to_device(child, spec_modules, dev)
# m.to(dev)
warnings.warn('By default, offloading will be done on '
'`nn.Linear`. You can add modules which want offload to '
'the `lmdeploy.lite.defaults.OFFLOAD_MOD`.')
target = OFFLOAD_MOD
_to_device(model, target, device)
handles = []
for module in model.modules():
if isinstance(module, target_modules):
handle1 = module.register_forward_pre_hook(before_forward)
handle2 = module.register_forward_hook(after_forward)
handles.extend([handle1, handle2])
try:
yield
finally:
for handle in handles:
handle.remove()
model.to('cpu')
torch.cuda.empty_cache()
The provided code snippet includes necessary dependencies for implementing the `memory_efficient_inference` function. Write a Python function `def memory_efficient_inference(model: nn.Module, offload: bool = True, device: str = 'cuda') -> None` to solve the following problem:
Memory efficient inference context manager. Moves model to device for inference, with option to offload specific modules. Args: model (nn.Module): Model for inference offload (bool): Whether to offload modules device (str): Device for inference Yields: None
Here is the function:
def memory_efficient_inference(model: nn.Module,
offload: bool = True,
device: str = 'cuda') -> None:
"""Memory efficient inference context manager.
Moves model to device for inference, with option to offload
specific modules.
Args:
model (nn.Module): Model for inference
offload (bool): Whether to offload modules
device (str): Device for inference
Yields:
None
"""
if offload:
warnings.warn('Using offload mode - modules defined in OFFLOAD_MOD '
'will be moved to GPU during forward pass only.')
warnings.warn(
'Using offload mode will incur performance penalty due to '
'frequent CPU-GPU data transfers.')
with torch.inference_mode():
with offload_kv_cache(model, device):
with offload_weights(model, device):
yield
else:
model.to(device)
with torch.inference_mode():
yield | Memory efficient inference context manager. Moves model to device for inference, with option to offload specific modules. Args: model (nn.Module): Model for inference offload (bool): Whether to offload modules device (str): Device for inference Yields: None |
8,244 | from .chat import SubCliChat
from .cli import CLI
from .lite import SubCliLite
from .serve import SubCliServe
class SubCliChat(object):
_help = 'Chat with pytorch or turbomind engine.'
_desc = _help
parser = CLI.subparsers.add_parser('chat', help=_help, description=_desc)
subparsers = parser.add_subparsers(
title='Commands', description='This group has the following commands:')
def add_parser_torch():
"""Add parser for torch command."""
parser = SubCliChat.subparsers.add_parser(
'torch',
formatter_class=DefaultsAndTypesHelpFormatter,
help=SubCliChat.torch.__doc__,
description=SubCliChat.torch.__doc__,
)
parser.set_defaults(run=SubCliChat.torch)
parser.add_argument('model_path',
type=str,
help='The huggingface model path')
# engine args
engine_group = parser.add_argument_group('Engine arguments')
ArgumentHelper.model_name(engine_group)
ArgumentHelper.tp(engine_group)
ArgumentHelper.session_len(engine_group)
ArgumentHelper.adapters(engine_group)
ArgumentHelper.cache_max_entry_count(engine_group)
# other args
parser.add_argument('--trust-remote-code',
action='store_false',
default=True,
help='Trust remote code')
def add_parser_turbomind():
"""Add parser for turbomind command."""
parser = SubCliChat.subparsers.add_parser(
'turbomind',
formatter_class=DefaultsAndTypesHelpFormatter,
help=SubCliChat.turbomind.__doc__,
description=SubCliChat.turbomind.__doc__,
)
parser.set_defaults(run=SubCliChat.turbomind)
parser.add_argument(
'model_path',
type=str,
help='The path of the deployed model. '
'It can be in format of huggingface or turbomind. '
'When it is turbomind model, all arguments for engine'
'config would be ignored, so you need to change the `config.ini`')
# engine arguments
engine_group = parser.add_argument_group('Engine arguments')
ArgumentHelper.tp(engine_group)
ArgumentHelper.model_format(engine_group)
ArgumentHelper.quant_policy(engine_group)
ArgumentHelper.model_name(engine_group)
ArgumentHelper.cache_max_entry_count(engine_group)
ArgumentHelper.rope_scaling_factor(engine_group)
ArgumentHelper.session_len(engine_group)
# other arguments
ArgumentHelper.cap(parser)
ArgumentHelper.meta_instruction(parser)
def torch(args):
"""Chat with PyTorch inference engine through terminal."""
from lmdeploy.messages import PytorchEngineConfig
from lmdeploy.pytorch.chat import run_chat
adapters = get_lora_adapters(args.adapters)
engine_config = PytorchEngineConfig(
model_name=args.model_name,
tp=args.tp,
session_len=args.session_len,
cache_max_entry_count=args.cache_max_entry_count,
adapters=adapters)
run_chat(args.model_path,
engine_config,
trust_remote_code=args.trust_remote_code)
def turbomind(args):
"""Chat with TurboMind inference engine through terminal."""
from lmdeploy.turbomind.chat import main
kwargs = convert_args(args)
main(**kwargs)
def add_parsers():
"""Add all parsers."""
SubCliChat.add_parser_torch()
SubCliChat.add_parser_turbomind()
class CLI(object):
_desc = 'The CLI provides a unified API for converting, ' \
'compressing and deploying large language models.'
parser = argparse.ArgumentParser(prog='lmdeploy',
description=_desc,
add_help=True)
parser.add_argument('-v',
'--version',
action='version',
version=__version__)
subparsers = parser.add_subparsers(
title='Commands',
description='lmdeploy has following commands:',
dest='command')
def add_parser_convert():
"""Add parser for convert command."""
parser = CLI.subparsers.add_parser(
'convert',
formatter_class=DefaultsAndTypesHelpFormatter,
description=CLI.convert.__doc__,
help=CLI.convert.__doc__)
# define arguments
parser.add_argument(
'model_name',
type=str,
help='The name of the to-be-deployed model, such as llama-7b, '
'llama-13b, vicuna-7b and etc. You can run `lmdeploy list` to '
'get the supported model names')
parser.add_argument('model_path',
type=str,
help='The directory path of the model')
ArgumentHelper.model_format(parser)
ArgumentHelper.tp(parser)
# other args
parser.add_argument('--tokenizer-path',
type=str,
default=None,
help='The path of tokenizer model')
parser.add_argument('--dst-path',
type=str,
default='workspace',
help='The destination path that saves outputs')
parser.add_argument(
'--quant-path',
type=str,
default=None,
help='Path of the quantized model, which can be none')
parser.add_argument(
'--group-size',
type=int,
default=0,
help='A parameter used in awq to quantize fp16 weights '
'to 4 bits')
parser.set_defaults(run=CLI.convert)
def add_parser_list():
"""Add parser for list command."""
parser = CLI.subparsers.add_parser(
'list',
formatter_class=DefaultsAndTypesHelpFormatter,
description=CLI.list.__doc__,
help=CLI.list.__doc__)
parser.set_defaults(run=CLI.list)
# define arguments
ArgumentHelper.engine(parser)
def add_parser_checkenv():
"""Add parser for check_env command."""
parser = CLI.subparsers.add_parser(
'check_env',
formatter_class=DefaultsAndTypesHelpFormatter,
description=CLI.check_env.__doc__,
help=CLI.check_env.__doc__)
parser.set_defaults(run=CLI.check_env)
parser.add_argument('--dump-file',
type=str,
default=None,
help='The file path to save env info. Only '
'support file format in `json`, `yml`,'
' `pkl`')
def convert(args):
"""Convert LLMs to turbomind format."""
from lmdeploy.turbomind.deploy.converter import main
kwargs = convert_args(args)
main(**kwargs)
def list(args):
"""List the supported model names."""
from lmdeploy.model import MODELS
model_names = list(MODELS.module_dict.keys())
deprecate_names = [
'baichuan-7b', 'baichuan2-7b', 'chatglm2-6b', 'internlm-chat-20b',
'internlm-chat-7b', 'internlm-chat-7b-8k', 'internlm2-1_8b',
'internlm-20b', 'internlm2-20b', 'internlm2-7b', 'internlm2-chat',
'internlm2-chat-1_8b', 'internlm2-chat-20b', 'internlm2-chat-7b',
'llama-2-chat', 'llama-2', 'qwen-14b', 'qwen-7b', 'solar-70b',
'yi-200k', 'yi-34b', 'yi-chat', 'Mistral-7B-Instruct',
'Mixtral-8x7B-Instruct', 'baichuan-base', 'deepseek-chat',
'internlm-chat'
]
model_names = [
n for n in model_names if n not in deprecate_names + ['base']
]
deprecate_names.sort()
model_names.sort()
print('The older chat template name like "internlm2-7b", "qwen-7b"'
' and so on are deprecated and will be removed in the future.'
' The supported chat template names are:')
print('\n'.join(model_names))
def check_env(args):
"""Check the environmental information."""
import importlib
import mmengine
from mmengine.utils import get_git_hash
from mmengine.utils.dl_utils import collect_env
from lmdeploy.version import __version__
env_info = collect_env()
env_info['LMDeploy'] = __version__ + '+' + get_git_hash()[:7]
# remove some unnecessary info
remove_reqs = ['MMEngine', 'OpenCV']
for req in remove_reqs:
if req in env_info:
env_info.pop(req)
# extra important dependencies
extra_reqs = ['transformers', 'gradio', 'fastapi', 'pydantic']
for req in extra_reqs:
try:
env_info[req] = importlib.import_module(req).__version__
except Exception:
env_info[req] = 'Not Found'
# print env info
for k, v in env_info.items():
print(f'{k}: {v}')
# dump to local file
dump_file = args.dump_file
if dump_file is not None:
work_dir, _ = os.path.split(dump_file)
if work_dir:
os.makedirs(work_dir, exist_ok=True)
mmengine.dump(env_info, dump_file)
def add_parsers():
"""Add all parsers."""
CLI.add_parser_convert()
CLI.add_parser_list()
CLI.add_parser_checkenv()
class SubCliLite(object):
"""CLI for compressing LLMs."""
_help = 'Compressing and accelerating LLMs with lmdeploy.lite module'
_desc = _help
parser = CLI.subparsers.add_parser(
'lite',
help=_help,
description=_desc,
)
subparsers = parser.add_subparsers(
title='Commands', description='This group has the following commands:')
def add_parser_auto_awq():
"""Add parser for auto_awq command."""
parser = SubCliLite.subparsers.add_parser(
'auto_awq',
formatter_class=DefaultsAndTypesHelpFormatter,
description=SubCliLite.auto_awq.__doc__,
help=SubCliLite.auto_awq.__doc__)
parser.set_defaults(run=SubCliLite.auto_awq)
parser.add_argument('model',
type=str,
help='The path of model in hf format')
ArgumentHelper.work_dir(parser)
ArgumentHelper.calib_dataset(parser)
ArgumentHelper.calib_samples(parser)
ArgumentHelper.calib_seqlen(parser)
ArgumentHelper.device(parser)
parser.add_argument('--w-bits',
type=int,
default=4,
help='Bit number for weight quantization')
parser.add_argument('--w-sym',
action='store_true',
help='Whether to do symmetric quantization')
parser.add_argument(
'--w-group-size',
type=int,
default=128,
help='Group size for weight quantization statistics')
def add_parser_calibrate():
"""Add parser for calibrate command."""
parser = SubCliLite.subparsers.add_parser(
'calibrate',
formatter_class=DefaultsAndTypesHelpFormatter,
description=SubCliLite.calibrate.__doc__,
help=SubCliLite.calibrate.__doc__)
parser.set_defaults(run=SubCliLite.calibrate)
parser.add_argument('model',
type=str,
help='The name or path of the model to be loaded')
ArgumentHelper.work_dir(parser)
ArgumentHelper.calib_dataset(parser)
ArgumentHelper.calib_samples(parser)
ArgumentHelper.calib_seqlen(parser)
ArgumentHelper.device(parser)
def add_parser_smooth_quant():
"""Add parser for smooth_quant command."""
parser = SubCliLite.subparsers.add_parser(
'smooth_quant',
formatter_class=DefaultsAndTypesHelpFormatter,
description=SubCliLite.smooth_quant.__doc__,
help=SubCliLite.smooth_quant.__doc__)
parser.set_defaults(run=SubCliLite.smooth_quant)
parser.add_argument('model',
type=str,
help='The name or path of the model to be loaded')
parser.add_argument(
'--work-dir',
type=str,
default='./work_dir',
help='The working directory for outputs. defaults to "./work_dir"')
ArgumentHelper.calib_dataset(parser)
ArgumentHelper.calib_samples(parser)
ArgumentHelper.calib_seqlen(parser)
ArgumentHelper.device(parser)
def add_parser_kv_qparams():
"""Add parser for kv_qparams command."""
parser = SubCliLite.subparsers.add_parser(
'kv_qparams',
formatter_class=DefaultsAndTypesHelpFormatter,
description=SubCliLite.kv_qparams.__doc__,
help=SubCliLite.kv_qparams.__doc__)
parser.set_defaults(run=SubCliLite.kv_qparams)
parser.add_argument('work_dir',
type=str,
help='Directory path where the stats are saved')
parser.add_argument('turbomind_dir',
type=str,
help='Directory path where to save the results')
parser.add_argument('--kv-bits',
type=int,
default=8,
help='Number of bits for quantization')
parser.add_argument('--kv-sym',
action='store_true',
help='Whether to use symmetric quantizaiton')
parser.add_argument(
'--num-tp',
type=int,
default=None,
help='GPU number used in tensor parallelism. Should be 2^n')
parser.add_argument('--tm-params',
nargs='*',
default=None,
action=DictAction,
help='Used key-values pairs in xxx=yyy format'
' to update the turbomind model weights'
' config')
def auto_awq(args):
"""Perform weight quantization using AWQ algorithm."""
from lmdeploy.lite.apis.auto_awq import auto_awq
kwargs = convert_args(args)
auto_awq(**kwargs)
def calibrate(args):
"""Perform calibration on a given dataset."""
from lmdeploy.lite.apis.calibrate import calibrate
kwargs = convert_args(args)
calibrate(**kwargs)
def kv_qparams(args):
"""Export key and value stats."""
from lmdeploy.lite.apis.kv_qparams import main as run_kv_qparams
kwargs = convert_args(args)
run_kv_qparams(**kwargs)
def smooth_quant(args):
"""Perform w8a8 quantization using SmoothQuant."""
from lmdeploy.lite.apis.smooth_quant import smooth_quant
kwargs = convert_args(args)
smooth_quant(**kwargs)
def add_parsers():
"""Add all parsers."""
SubCliLite.add_parser_auto_awq()
SubCliLite.add_parser_calibrate()
SubCliLite.add_parser_kv_qparams()
SubCliLite.add_parser_smooth_quant()
class SubCliServe:
"""Serve LLMs and interact on terminal or web UI."""
_help = 'Serve LLMs with gradio, openai API or triton server.'
_desc = _help
parser = CLI.subparsers.add_parser(
'serve',
help=_help,
description=_desc,
)
subparsers = parser.add_subparsers(
title='Commands', description='This group has the following commands:')
def add_parser_gradio():
"""Add parser for gradio command."""
parser = SubCliServe.subparsers.add_parser(
'gradio',
formatter_class=DefaultsAndTypesHelpFormatter,
description=SubCliServe.gradio.__doc__,
help=SubCliServe.gradio.__doc__)
parser.set_defaults(run=SubCliServe.gradio)
parser.add_argument(
'model_path_or_server',
type=str,
help='The path of the deployed model or the tritonserver url or '
'restful api url. for example: - ./workspace - 0.0.0.0:23333'
' - http://0.0.0.0:23333')
parser.add_argument('--server-name',
type=str,
default='0.0.0.0',
help='The ip address of gradio server')
parser.add_argument('--server-port',
type=int,
default=6006,
help='The port of gradio server')
# common args
ArgumentHelper.backend(parser)
# chat template args
ArgumentHelper.meta_instruction(parser)
ArgumentHelper.cap(parser)
# pytorch engine args
pt_group = parser.add_argument_group('PyTorch engine arguments')
# common engine args
tp_act = ArgumentHelper.tp(pt_group)
model_name_act = ArgumentHelper.model_name(pt_group)
session_len_act = ArgumentHelper.session_len(pt_group)
max_batch_size_act = ArgumentHelper.max_batch_size(pt_group)
cache_max_entry_act = ArgumentHelper.cache_max_entry_count(pt_group)
# turbomind args
tb_group = parser.add_argument_group('TurboMind engine arguments')
# common engine args
tb_group._group_actions.append(tp_act)
tb_group._group_actions.append(model_name_act)
tb_group._group_actions.append(session_len_act)
tb_group._group_actions.append(max_batch_size_act)
tb_group._group_actions.append(cache_max_entry_act)
ArgumentHelper.model_format(tb_group)
ArgumentHelper.quant_policy(tb_group)
ArgumentHelper.rope_scaling_factor(tb_group)
def add_parser_api_server():
"""Add parser for api_server command."""
parser = SubCliServe.subparsers.add_parser(
'api_server',
formatter_class=DefaultsAndTypesHelpFormatter,
description=SubCliServe.api_server.__doc__,
help=SubCliServe.api_server.__doc__)
parser.set_defaults(run=SubCliServe.api_server)
parser.add_argument(
'model_path',
type=str,
help='The path of a model. it could be one of the following '
'options: - i) a local directory path of a turbomind model'
' which is converted by `lmdeploy convert` command or '
'download from ii) and iii). - ii) the model_id of a '
'lmdeploy-quantized model hosted inside a model repo on '
'huggingface.co, such as "internlm/internlm-chat-20b-4bit",'
' "lmdeploy/llama2-chat-70b-4bit", etc. - iii) the model_id'
' of a model hosted inside a model repo on huggingface.co,'
' such as "internlm/internlm-chat-7b", "qwen/qwen-7b-chat "'
', "baichuan-inc/baichuan2-7b-chat" and so on')
parser.add_argument('--server-name',
type=str,
default='0.0.0.0',
help='Host ip for serving')
parser.add_argument('--server-port',
type=int,
default=23333,
help='Server port')
parser.add_argument('--allow-origins',
nargs='+',
type=str,
default=['*'],
help='A list of allowed origins for cors')
parser.add_argument('--allow-credentials',
action='store_true',
help='Whether to allow credentials for cors')
parser.add_argument('--allow-methods',
nargs='+',
type=str,
default=['*'],
help='A list of allowed http methods for cors')
parser.add_argument('--allow-headers',
nargs='+',
type=str,
default=['*'],
help='A list of allowed http headers for cors')
parser.add_argument('--qos-config-path',
type=str,
default='',
help='Qos policy config path')
# common args
ArgumentHelper.backend(parser)
ArgumentHelper.log_level(parser)
ArgumentHelper.api_keys(parser)
ArgumentHelper.ssl(parser)
# chat template args
ArgumentHelper.meta_instruction(parser)
ArgumentHelper.cap(parser)
# pytorch engine args
pt_group = parser.add_argument_group('PyTorch engine arguments')
# common engine args
tp_act = ArgumentHelper.tp(pt_group)
model_name_act = ArgumentHelper.model_name(pt_group)
session_len_act = ArgumentHelper.session_len(pt_group)
max_batch_size_act = ArgumentHelper.max_batch_size(pt_group)
cache_max_entry_act = ArgumentHelper.cache_max_entry_count(pt_group)
# turbomind args
tb_group = parser.add_argument_group('TurboMind engine arguments')
# common engine args
tb_group._group_actions.append(tp_act)
tb_group._group_actions.append(model_name_act)
tb_group._group_actions.append(session_len_act)
tb_group._group_actions.append(max_batch_size_act)
tb_group._group_actions.append(cache_max_entry_act)
ArgumentHelper.model_format(tb_group)
ArgumentHelper.quant_policy(tb_group)
ArgumentHelper.rope_scaling_factor(tb_group)
def add_parser_api_client():
"""Add parser for api_client command."""
parser = SubCliServe.subparsers.add_parser(
'api_client',
formatter_class=DefaultsAndTypesHelpFormatter,
description=SubCliServe.api_client.__doc__,
help=SubCliServe.api_client.__doc__)
parser.set_defaults(run=SubCliServe.api_client)
parser.add_argument('api_server_url',
type=str,
help='The URL of api server')
parser.add_argument('--api-key',
type=str,
default=None,
help='api key. Default to None, which means no '
'api key will be used')
ArgumentHelper.session_id(parser)
def add_parser_triton_client():
"""Add parser for triton_client command."""
parser = SubCliServe.subparsers.add_parser(
'triton_client',
formatter_class=DefaultsAndTypesHelpFormatter,
description=SubCliServe.triton_client.__doc__,
help=SubCliServe.triton_client.__doc__)
parser.set_defaults(run=SubCliServe.triton_client)
parser.add_argument(
'tritonserver_addr',
type=str,
help='The address in format "ip:port" of triton inference server')
ArgumentHelper.session_id(parser)
ArgumentHelper.cap(parser)
ArgumentHelper.stream_output(parser)
def gradio(args):
"""Serve LLMs with web UI using gradio."""
from lmdeploy.archs import autoget_backend
from lmdeploy.messages import (PytorchEngineConfig,
TurbomindEngineConfig)
from lmdeploy.model import ChatTemplateConfig
from lmdeploy.serve.gradio.app import run
backend = args.backend
if backend != 'pytorch' and ':' not in args.model_path_or_server:
# set auto backend mode
backend = autoget_backend(args.model_path_or_server)
if backend == 'pytorch':
backend_config = PytorchEngineConfig(
tp=args.tp,
model_name=args.model_name,
max_batch_size=args.max_batch_size,
cache_max_entry_count=args.cache_max_entry_count,
session_len=args.session_len)
else:
backend_config = TurbomindEngineConfig(
model_name=args.model_name,
tp=args.tp,
max_batch_size=args.max_batch_size,
session_len=args.session_len,
model_format=args.model_format,
quant_policy=args.quant_policy,
rope_scaling_factor=args.rope_scaling_factor,
cache_max_entry_count=args.cache_max_entry_count)
chat_template_config = ChatTemplateConfig(
model_name=args.model_name,
meta_instruction=args.meta_instruction,
capability=args.cap)
run(args.model_path_or_server,
server_name=args.server_name,
server_port=args.server_port,
backend=backend,
backend_config=backend_config,
chat_template_config=chat_template_config)
def api_server(args):
"""Serve LLMs with restful api using fastapi."""
from lmdeploy.archs import autoget_backend
from lmdeploy.model import ChatTemplateConfig
from lmdeploy.serve.openai.api_server import serve as run_api_server
backend = args.backend
if backend != 'pytorch':
# set auto backend mode
backend = autoget_backend(args.model_path)
if backend == 'pytorch':
from lmdeploy.messages import PytorchEngineConfig
backend_config = PytorchEngineConfig(
tp=args.tp,
model_name=args.model_name,
max_batch_size=args.max_batch_size,
cache_max_entry_count=args.cache_max_entry_count,
session_len=args.session_len)
else:
from lmdeploy.messages import TurbomindEngineConfig
backend_config = TurbomindEngineConfig(
model_name=args.model_name,
tp=args.tp,
max_batch_size=args.max_batch_size,
session_len=args.session_len,
model_format=args.model_format,
quant_policy=args.quant_policy,
rope_scaling_factor=args.rope_scaling_factor,
cache_max_entry_count=args.cache_max_entry_count)
chat_template_config = ChatTemplateConfig(
model_name=args.model_name,
meta_instruction=args.meta_instruction,
capability=args.cap)
run_api_server(args.model_path,
backend=backend,
backend_config=backend_config,
chat_template_config=chat_template_config,
server_name=args.server_name,
server_port=args.server_port,
allow_origins=args.allow_origins,
allow_credentials=args.allow_credentials,
allow_methods=args.allow_methods,
allow_headers=args.allow_headers,
log_level=args.log_level.upper(),
api_keys=args.api_keys,
ssl=args.ssl,
qos_config_path=args.qos_config_path)
def api_client(args):
"""Interact with restful api server in terminal."""
from lmdeploy.serve.openai.api_client import main as run_api_client
kwargs = convert_args(args)
run_api_client(**kwargs)
def triton_client(args):
"""Interact with Triton Server using gRPC protocol."""
from lmdeploy.serve.client import main as run_triton_client
kwargs = convert_args(args)
run_triton_client(**kwargs)
def add_parsers():
SubCliServe.add_parser_gradio()
SubCliServe.add_parser_api_server()
SubCliServe.add_parser_api_client()
SubCliServe.add_parser_triton_client()
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run()` to solve the following problem:
The entry point of running LMDeploy CLI.
Here is the function:
def run():
"""The entry point of running LMDeploy CLI."""
CLI.add_parsers()
SubCliChat.add_parsers()
SubCliServe.add_parsers()
SubCliLite.add_parsers()
parser = CLI.parser
args = parser.parse_args()
if 'run' in dir(args):
args.run(args)
else:
try:
args.print_help()
except AttributeError:
command = args.command
if command == 'serve':
SubCliServe.parser.print_help()
elif command == 'lite':
SubCliLite.parser.print_help()
elif command == 'chat':
SubCliChat.parser.print_help()
else:
parser.print_help() | The entry point of running LMDeploy CLI. |
8,245 | import argparse
from typing import List
The provided code snippet includes necessary dependencies for implementing the `convert_args` function. Write a Python function `def convert_args(args)` to solve the following problem:
Convert args to dict format.
Here is the function:
def convert_args(args):
"""Convert args to dict format."""
special_names = ['run', 'command']
kwargs = {
k[0]: k[1]
for k in args._get_kwargs() if k[0] not in special_names
}
return kwargs | Convert args to dict format. |
8,246 | import argparse
from typing import List
The provided code snippet includes necessary dependencies for implementing the `get_lora_adapters` function. Write a Python function `def get_lora_adapters(adapters: List[str])` to solve the following problem:
Parse lora adapers from cli input. Args: adapters (List[str]): CLI input string of lora adapter path(s). Returns: Dict[str,str] or None: Parsed lora adapter path(s).
Here is the function:
def get_lora_adapters(adapters: List[str]):
"""Parse lora adapers from cli input.
Args:
adapters (List[str]): CLI input string of lora adapter path(s).
Returns:
Dict[str,str] or None: Parsed lora adapter path(s).
"""
if not adapters:
return None
n = len(adapters)
output = {}
if n == 1:
name = 'default'
path = adapters[0].strip()
if '=' in path:
name, path = path.split('=', 1)
output[name] = path
else:
for pair in adapters:
assert '=' in pair, f'Multiple lora paths must in format of ' \
f'xxx=yyy. But given: {pair}'
name, path = pair.strip().split('=', 1)
assert name not in output, f'Multiple lora paths with ' \
f'repeated lora name: {name}'
output[name] = path
return output | Parse lora adapers from cli input. Args: adapters (List[str]): CLI input string of lora adapter path(s). Returns: Dict[str,str] or None: Parsed lora adapter path(s). |
8,247 | import os
import sys
import pytorch_sphinx_theme
from m2r import MdInclude
from recommonmark.transform import AutoStructify
from sphinx.builders.html import StandaloneHTMLBuilder
def setup(app):
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_directive('mdinclude', MdInclude)
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
}, True)
app.add_transform(AutoStructify) | null |
8,249 | import argparse
import os
import random
from contextlib import contextmanager
from dataclasses import dataclass, field
from itertools import count
from pathlib import Path
from threading import Lock
from typing import List, Tuple
import gradio as gr
from packaging.version import Version, parse
from qwen_model import QwenVLChat
from xcomposer_model import InternLMXComposer
from lmdeploy.serve.gradio.constants import CSS, THEME, disable_btn, enable_btn
from lmdeploy.turbomind import TurboMind
from lmdeploy.turbomind.chat import valid_str
DEFAULT_MODEL_NAME = 'internlm-xcomposer-7b'
DEFAULT_HF_CKPT = 'internlm/internlm-xcomposer-7b'
DEFAULT_LLM_CKPT = None
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model-name',
type=str,
default=DEFAULT_MODEL_NAME,
help='Model name, default to %(default)s')
parser.add_argument(
'--hf-ckpt',
type=str,
default=DEFAULT_HF_CKPT,
help='hf checkpoint name or path, default to %(default)s')
parser.add_argument(
'--llm-ckpt',
type=str,
default=DEFAULT_LLM_CKPT,
help='LLM checkpoint name or path, default to %(default)s')
parser.add_argument('--server-port',
type=int,
default=9006,
help='Server port, default %(default)s')
parser.add_argument('--server-name',
type=str,
default='0.0.0.0',
help='Server name, default %(default)s')
args = parser.parse_args()
return args | null |
8,250 | import argparse
import os
import random
from contextlib import contextmanager
from dataclasses import dataclass, field
from itertools import count
from pathlib import Path
from threading import Lock
from typing import List, Tuple
import gradio as gr
from packaging.version import Version, parse
from qwen_model import QwenVLChat
from xcomposer_model import InternLMXComposer
from lmdeploy.serve.gradio.constants import CSS, THEME, disable_btn, enable_btn
from lmdeploy.turbomind import TurboMind
from lmdeploy.turbomind.chat import valid_str
SUPPORTED_MODELS = {
'internlm-xcomposer-7b': InternLMXComposer,
'qwen-vl-chat': QwenVLChat
}
def get_stop_words():
from lmdeploy.tokenizer import Tokenizer
old_func = Tokenizer.indexes_containing_token
def new_func(self, token):
indexes = self.encode(token, add_bos=False)
return indexes
Tokenizer.indexes_containing_token = new_func
yield
Tokenizer.indexes_containing_token = old_func
The provided code snippet includes necessary dependencies for implementing the `load_preprocessor_model` function. Write a Python function `def load_preprocessor_model(args)` to solve the following problem:
Load preprocessor and llm inference engine.
Here is the function:
def load_preprocessor_model(args):
"""Load preprocessor and llm inference engine."""
assert args.model_name in SUPPORTED_MODELS
llm_ckpt = args.hf_ckpt if args.llm_ckpt is None else args.llm_ckpt
preprocessor = SUPPORTED_MODELS[args.model_name](args.hf_ckpt)
with get_stop_words():
model = TurboMind.from_pretrained(llm_ckpt, model_name=args.model_name)
return preprocessor, model | Load preprocessor and llm inference engine. |
8,251 | import argparse
import os
import random
from contextlib import contextmanager
from dataclasses import dataclass, field
from itertools import count
from pathlib import Path
from threading import Lock
from typing import List, Tuple
import gradio as gr
from packaging.version import Version, parse
from qwen_model import QwenVLChat
from xcomposer_model import InternLMXComposer
from lmdeploy.serve.gradio.constants import CSS, THEME, disable_btn, enable_btn
from lmdeploy.turbomind import TurboMind
from lmdeploy.turbomind.chat import valid_str
class Session:
_lock = Lock()
_count = count()
_session_id: int = None
_message: List[Tuple[str, str]] = field(default_factory=list)
_step: int = 0
def __init__(self):
with Session._lock:
self._session_id = next(Session._count)
self._message = []
self._step = 0
def session_id(self):
return self._session_id
def message(self):
return self._message
def step(self):
return self._step
CSS = """
#container {
width: 95%;
margin-left: auto;
margin-right: auto;
}
#chatbot {
height: 500px;
overflow: auto;
}
.chat_wrap_space {
margin-left: 0.5em
}
"""
THEME = gr.themes.Soft(
primary_hue=gr.themes.colors.blue,
secondary_hue=gr.themes.colors.sky,
font=[gr.themes.GoogleFont('Inconsolata'), 'Arial', 'sans-serif'])
enable_btn = gr.update(interactive=True)
disable_btn = gr.update(interactive=False)
def valid_str(string, coding='utf-8'):
"""decode text according to its encoding type."""
invalid_chars = [b'\xef\xbf\xbd']
bstr = bytes(string, coding)
for invalid_char in invalid_chars:
bstr = bstr.replace(invalid_char, b'')
ret = bstr.decode(encoding=coding, errors='ignore')
return ret
def launch_demo(args, preprocessor, model):
def add_image(chatbot, session, file):
"""Append image to query."""
chatbot = chatbot + [((file.name, ), None)]
history = session._message
# [([user, url, url], assistant), ...]
if len(history) == 0 or history[-1][-1] is not None:
history.append([[file.name], None])
else:
history[-1][0].append(file.name)
return chatbot, session
def add_text(chatbot, session, text):
"""User query."""
chatbot = chatbot + [(text, None)]
history = session._message
if len(history) == 0 or history[-1][-1] is not None:
history.append([text, None])
else:
history[-1][0].insert(0, text)
return chatbot, session, disable_btn, enable_btn
def chat(
chatbot,
session,
request_output_len=512,
):
"""Chat with AI assistant."""
generator = model.create_instance()
history = session._message
sequence_start = len(history) == 1
seed = random.getrandbits(64) if sequence_start else None
input_ids, features, ranges = preprocessor.prepare_query(
history[-1][0], sequence_start)
if len(input_ids
) + session.step + request_output_len > model.model.session_len:
gr.Warning('WARNING: exceed session max length.'
' Please restart the session by reset button.')
yield chatbot, session, enable_btn, disable_btn, enable_btn
else:
response_size = 0
step = session.step
for outputs in generator.stream_infer(
session_id=session.session_id,
input_ids=input_ids,
input_embeddings=features,
input_embedding_ranges=ranges,
request_output_len=request_output_len,
stream_output=True,
sequence_start=sequence_start,
random_seed=seed,
step=step):
res, tokens = outputs[0]
# decode res
response = model.tokenizer.decode(res.tolist(),
offset=response_size)
if response.endswith('�'):
continue
response = valid_str(response)
response_size = tokens
if chatbot[-1][1] is None:
chatbot[-1][1] = ''
history[-1][1] = ''
chatbot[-1][1] += response
history[-1][1] += response
session._step = step + len(input_ids) + tokens
yield chatbot, session, disable_btn, enable_btn, disable_btn
yield chatbot, session, enable_btn, disable_btn, enable_btn
def stop(session):
"""Stop the session."""
generator = model.create_instance()
for _ in generator.stream_infer(session_id=session.session_id,
input_ids=[0],
request_output_len=0,
sequence_start=False,
sequence_end=False,
stop=True):
pass
def cancel(chatbot, session):
"""Stop the session and keey chat history."""
stop(session)
return chatbot, session, disable_btn, enable_btn, enable_btn
def reset(session):
"""Reset a new session."""
stop(session)
session._step = 0
session._message = []
return [], session, enable_btn
with gr.Blocks(css=CSS, theme=THEME) as demo:
with gr.Column(elem_id='container'):
gr.Markdown('## LMDeploy VL Playground')
chatbot = gr.Chatbot(elem_id='chatbot', label=model.model_name)
query = gr.Textbox(placeholder='Please input the instruction',
label='Instruction')
session = gr.State()
with gr.Row():
addimg_btn = gr.UploadButton('Upload Image',
file_types=['image'])
cancel_btn = gr.Button(value='Cancel', interactive=False)
reset_btn = gr.Button(value='Reset')
addimg_btn.upload(add_image, [chatbot, session, addimg_btn],
[chatbot, session],
show_progress=True,
queue=True)
send_event = query.submit(
add_text, [chatbot, session, query], [chatbot, session]).then(
chat, [chatbot, session],
[chatbot, session, query, cancel_btn, reset_btn])
query.submit(lambda: gr.update(value=''), None, [query])
cancel_btn.click(cancel, [chatbot, session],
[chatbot, session, cancel_btn, reset_btn, query],
cancels=[send_event])
reset_btn.click(reset, [session], [chatbot, session, query],
cancels=[send_event])
demo.load(lambda: Session(), inputs=None, outputs=[session])
demo.queue(api_open=True, **que_kwargs, max_size=100)
demo.launch(
share=True,
server_port=args.server_port,
server_name=args.server_name,
) | null |
8,252 | import os
from pathlib import Path
import torch
from transformers import AutoModel, AutoTokenizer
from xcomposer_model import InternLMXComposerTemplate
def get_attr(m, key):
keys = key.split('.')
for key in keys:
m = getattr(m, key)
return m | null |
8,253 | import argparse
import csv
import json
import os
import random
import time
from queue import Queue
from threading import Thread
from typing import List, Tuple, Union
import numpy as np
from tqdm import tqdm
from lmdeploy.cli.utils import ArgumentHelper, DefaultsAndTypesHelpFormatter
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
TurbomindEngineConfig)
from lmdeploy.pytorch.engine.engine import EngineInstance
from lmdeploy.tokenizer import DetokenizeState, Tokenizer
class Tokenizer:
"""Tokenize prompts or de-tokenize tokens into texts.
Args:
model_file (str): the path of the tokenizer model
"""
def __init__(self, model_file: str):
if model_file.endswith('.model'):
model_folder = osp.split(model_file)[0]
else:
model_folder = model_file
model_file = osp.join(model_folder, 'tokenizer.model')
tokenizer_config_file = osp.join(model_folder, 'tokenizer_config.json')
model_file_exists = osp.exists(model_file)
config_exists = osp.exists(tokenizer_config_file)
use_hf_model = config_exists or not model_file_exists
self.logger = get_logger('lmdeploy')
if not use_hf_model:
self.model = SentencePieceTokenizer(model_file)
else:
self.model = HuggingFaceTokenizer(model_folder)
def vocab_size(self):
"""vocabulary size."""
return self.model.vocab_size
def bos_token_id(self):
"""begine of the sentence token id."""
return self.model.bos_token_id
def eos_token_id(self):
"""end of the sentence token id."""
return self.model.eos_token_id
def encode(self, s: str, add_bos: bool = True, **kwargs):
"""Tokenize a prompt.
Args:
s (str): a prompt
Returns:
list[int]: token ids
"""
return self.model.encode(s, add_bos, **kwargs)
def decode(
self,
t: Sequence[int],
offset: Optional[int] = None,
skip_special_tokens: bool = True,
):
"""De-tokenize.
Args:
t (List[int]): a list of token ids
offset (int): for incrementally decoding. Default to None, which
means not applied.
Returns:
str: text of decoding tokens
"""
return self.model.decode(t, offset, skip_special_tokens)
def detokenize_incrementally(self,
all_input_ids: Sequence[int],
state: DetokenizeState,
skip_special_tokens: bool = True,
spaces_between_special_tokens: bool = True):
"""Incrementally detokenize the input indexes.
Args:
all_input_ids (List[int]): a list of token ids. Expected to be
different sections of a long sequence.
state (DetokenizeState): an instance of DetokenizeState. Consists
of incrementally decoding states.
skip_special_tokens (bool): Whether or not to remove special tokens
in the decoding. Default to be True.
spaces_between_special_tokens (bool): Whether or not to add spaces
between special tokens. Default to be True.
Returns:
str: decoding output string of the current round.
state (DetokenizeState): an instance of DetokenizeState. Consists
of incrementally decoding states.
"""
return self.model.detokenize_incrementally(
all_input_ids,
state=state,
skip_special_tokens=skip_special_tokens,
spaces_between_special_tokens=spaces_between_special_tokens)
def __call__(self, s: Union[str, Sequence[str]]):
"""Tokenize prompts.
Args:
s (str): prompts
Returns:
list[int]: token ids
"""
return self.model(s)
def indexes_containing_token(self, token):
"""Return all the possible indexes, whose decoding output may contain
the input token."""
encoded = self.encode(token, add_bos=False)
if len(encoded) > 1:
self.logger.warning(
f'The token {token}, its length of indexes {encoded} is over '
'than 1. Currently, it can not be used as stop words')
return []
return self.model.indexes_containing_token(token)
def sample_requests(
dataset_path: str,
num_requests: int,
tokenizer: Tokenizer,
) -> List[Tuple[str, int, int]]:
# Load the dataset.
with open(dataset_path) as f:
dataset = json.load(f)
# Filter out the conversations with less than 2 turns.
dataset = [data for data in dataset if len(data['conversations']) >= 2]
# Only keep the first two turns of each conversation.
dataset = [(data['conversations'][0]['value'],
data['conversations'][1]['value']) for data in dataset]
# pre-sample to avoid go through all the dataset
dataset = random.sample(dataset, max(int(num_requests * 1.2), 1000))
# Tokenize the prompts and completions.
prompts = [prompt for prompt, _ in dataset]
prompt_token_ids = tokenizer(prompts).input_ids
completions = [completion for _, completion in dataset]
completion_token_ids = tokenizer(completions).input_ids
tokenized_dataset = []
for i in range(len(dataset)):
output_len = len(completion_token_ids[i])
tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len))
# Filter out too long sequences.
filtered_dataset: List[Tuple[str, int, int]] = []
for prompt, prompt_token_ids, output_len in tokenized_dataset:
prompt_len = len(prompt_token_ids)
if prompt_len < 4 or output_len < 4:
# Prune too short sequences.
continue
if prompt_len > 1024 or prompt_len + output_len > 2048:
# Prune too long sequences.
continue
filtered_dataset.append((prompt, prompt_len, output_len))
# Sample the requests.
sampled_requests = random.sample(filtered_dataset, num_requests)
return sampled_requests | null |
8,254 | import argparse
import csv
import json
import os
import random
import time
from queue import Queue
from threading import Thread
from typing import List, Tuple, Union
import numpy as np
from tqdm import tqdm
from lmdeploy.cli.utils import ArgumentHelper, DefaultsAndTypesHelpFormatter
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
TurbomindEngineConfig)
from lmdeploy.pytorch.engine.engine import EngineInstance
from lmdeploy.tokenizer import DetokenizeState, Tokenizer
class DefaultsAndTypesHelpFormatter(argparse.HelpFormatter):
def _get_help_string(self, action):
class ArgumentHelper:
def model_name(parser):
def model_format(parser, default: str = None):
def tp(parser):
def session_id(parser):
def session_len(parser, default: int = None):
def max_batch_size(parser):
def quant_policy(parser):
def rope_scaling_factor(parser):
def use_logn_attn(parser):
def block_size(parser):
def top_p(parser):
def top_k(parser):
def temperature(parser, default: float = 0.8):
def repetition_penalty(parser):
def cap(parser):
def log_level(parser):
def api_keys(parser):
def ssl(parser):
def backend(parser):
def engine(parser):
def stream_output(parser):
def calib_dataset(parser):
def calib_samples(parser):
def calib_seqlen(parser):
def device(parser):
def meta_instruction(parser):
def cache_max_entry_count(parser):
def adapters(parser):
def work_dir(parser):
def parse_args():
parser = argparse.ArgumentParser(
description='Benchmark the request throughput of lmdeploy '
'in localhost',
formatter_class=DefaultsAndTypesHelpFormatter)
parser.add_argument('dataset', type=str, help='the path dataset')
parser.add_argument('model_path',
type=str,
help='the path of the model in localhost or '
'the repo_id of the model in huggingface.co')
parser.add_argument(
'-c',
'--concurrency',
type=int,
help='Number of working threads to process the sampled prompts',
default=256)
parser.add_argument('-n',
'--num-prompts',
type=int,
help='Number of prompts to process',
default=5000)
parser.add_argument('--csv',
type=str,
help='Where to save the result.',
default='./profile_throughput.csv')
parser.add_argument('--seed',
type=int,
default=0,
help='Seed used in sampling prompts from dataset')
# other args
ArgumentHelper.top_p(parser)
ArgumentHelper.temperature(parser)
ArgumentHelper.top_k(parser)
ArgumentHelper.log_level(parser)
ArgumentHelper.backend(parser)
# pytorch engine args
pt_group = parser.add_argument_group('PyTorch engine arguments')
tp_act = ArgumentHelper.tp(pt_group)
session_len_act = ArgumentHelper.session_len(pt_group, default=4096)
cache_count_act = ArgumentHelper.cache_max_entry_count(pt_group)
# turbomind engine args
tb_group = parser.add_argument_group('TurboMind engine argument')
tb_group._group_actions.append(tp_act)
tb_group._group_actions.append(session_len_act)
tb_group._group_actions.append(cache_count_act)
ArgumentHelper.model_format(tb_group, default='hf')
args = parser.parse_args()
return args | null |
8,255 | import csv
import json
import random
import time
from queue import Queue
from threading import Thread
from typing import List, Tuple
import fire
import numpy as np
from tqdm import tqdm
from lmdeploy.serve.turbomind.chatbot import Chatbot
from lmdeploy.tokenizer import Tokenizer
class Tokenizer:
"""Tokenize prompts or de-tokenize tokens into texts.
Args:
model_file (str): the path of the tokenizer model
"""
def __init__(self, model_file: str):
if model_file.endswith('.model'):
model_folder = osp.split(model_file)[0]
else:
model_folder = model_file
model_file = osp.join(model_folder, 'tokenizer.model')
tokenizer_config_file = osp.join(model_folder, 'tokenizer_config.json')
model_file_exists = osp.exists(model_file)
config_exists = osp.exists(tokenizer_config_file)
use_hf_model = config_exists or not model_file_exists
self.logger = get_logger('lmdeploy')
if not use_hf_model:
self.model = SentencePieceTokenizer(model_file)
else:
self.model = HuggingFaceTokenizer(model_folder)
def vocab_size(self):
"""vocabulary size."""
return self.model.vocab_size
def bos_token_id(self):
"""begine of the sentence token id."""
return self.model.bos_token_id
def eos_token_id(self):
"""end of the sentence token id."""
return self.model.eos_token_id
def encode(self, s: str, add_bos: bool = True, **kwargs):
"""Tokenize a prompt.
Args:
s (str): a prompt
Returns:
list[int]: token ids
"""
return self.model.encode(s, add_bos, **kwargs)
def decode(
self,
t: Sequence[int],
offset: Optional[int] = None,
skip_special_tokens: bool = True,
):
"""De-tokenize.
Args:
t (List[int]): a list of token ids
offset (int): for incrementally decoding. Default to None, which
means not applied.
Returns:
str: text of decoding tokens
"""
return self.model.decode(t, offset, skip_special_tokens)
def detokenize_incrementally(self,
all_input_ids: Sequence[int],
state: DetokenizeState,
skip_special_tokens: bool = True,
spaces_between_special_tokens: bool = True):
"""Incrementally detokenize the input indexes.
Args:
all_input_ids (List[int]): a list of token ids. Expected to be
different sections of a long sequence.
state (DetokenizeState): an instance of DetokenizeState. Consists
of incrementally decoding states.
skip_special_tokens (bool): Whether or not to remove special tokens
in the decoding. Default to be True.
spaces_between_special_tokens (bool): Whether or not to add spaces
between special tokens. Default to be True.
Returns:
str: decoding output string of the current round.
state (DetokenizeState): an instance of DetokenizeState. Consists
of incrementally decoding states.
"""
return self.model.detokenize_incrementally(
all_input_ids,
state=state,
skip_special_tokens=skip_special_tokens,
spaces_between_special_tokens=spaces_between_special_tokens)
def __call__(self, s: Union[str, Sequence[str]]):
"""Tokenize prompts.
Args:
s (str): prompts
Returns:
list[int]: token ids
"""
return self.model(s)
def indexes_containing_token(self, token):
"""Return all the possible indexes, whose decoding output may contain
the input token."""
encoded = self.encode(token, add_bos=False)
if len(encoded) > 1:
self.logger.warning(
f'The token {token}, its length of indexes {encoded} is over '
'than 1. Currently, it can not be used as stop words')
return []
return self.model.indexes_containing_token(token)
def sample_requests(
dataset_path: str,
num_requests: int,
tokenizer: Tokenizer,
) -> List[Tuple[str, int, int]]:
# Load the dataset.
with open(dataset_path) as f:
dataset = json.load(f)
# Filter out the conversations with less than 2 turns.
dataset = [data for data in dataset if len(data['conversations']) >= 2]
# Only keep the first two turns of each conversation.
dataset = [(data['conversations'][0]['value'],
data['conversations'][1]['value']) for data in dataset]
# pre-sample to avoid go through all the dataset
dataset = random.sample(dataset, max(int(num_requests * 1.2), 1000))
# Tokenize the prompts and completions.
prompts = [prompt for prompt, _ in dataset]
prompt_token_ids = tokenizer(prompts).input_ids
completions = [completion for _, completion in dataset]
completion_token_ids = tokenizer(completions).input_ids
tokenized_dataset = []
for i in range(len(dataset)):
output_len = len(completion_token_ids[i])
tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len))
# Filter out too long sequences.
filtered_dataset: List[Tuple[str, int, int]] = []
for prompt, prompt_token_ids, output_len in tokenized_dataset:
prompt_len = len(prompt_token_ids)
if prompt_len < 4 or output_len < 4:
# Prune too short sequences.
continue
if prompt_len > 1024 or prompt_len + output_len > 2048:
# Prune too long sequences.
continue
filtered_dataset.append((prompt, prompt_len, output_len))
# Sample the requests.
sampled_requests = random.sample(filtered_dataset, num_requests)
return sampled_requests | null |
8,256 | import argparse
import csv
import os
import time
from dataclasses import dataclass
from queue import Queue
from threading import Thread
from typing import List, Union
import numpy as np
from pynvml import (NVMLError, nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex,
nvmlDeviceGetMemoryInfo, nvmlDeviceGetName,
nvmlDeviceGetPowerState, nvmlDeviceGetTemperature,
nvmlInit, nvmlShutdown, nvmlSystemGetDriverVersion)
from tqdm import tqdm
from lmdeploy.cli.utils import ArgumentHelper, DefaultsAndTypesHelpFormatter
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
TurbomindEngineConfig)
def infer(model, session_id: int, input_ids: List,
gen_config: EngineGenerationConfig, test_round: int, que: Queue):
if session_id == 1:
pbar = tqdm(total=test_round)
chatbot = model.create_instance()
output_seqlen = gen_config.max_new_tokens
stats = []
for _ in range(test_round):
token_latency_stats = [0] * (output_seqlen + 1)
prev = time.perf_counter()
n_prev_token = 0
"""
The iterator provided by `stream_infer` denotes the number of generated tokens so far,
which is represented by the variable `n_token`.
Please note that `n_token` is not a continuous value. In other words, during the iteration,
its value might be 5, 7, 8, 16, and so on, rather than 1, 2, 3, 4, etc.
So, it is quite difficult to get the latency of each generated token.
As a work-around, we set the latency `now-prev` of each iteration to the first token of
the new generated tokens, and leave the latency of the rest tokens being 0.
For example, in the first iteration, 5 tokens are generated.
The time elapsing in this iteration `now-prev` is set to the latency of first token of
the 5 tokens, i.e. `token_latency_stats[0]`, and `token_latency_stats[1:4]` is set 0`
""" # noqa: E501
for outputs in chatbot.stream_infer(session_id,
input_ids,
gen_config=gen_config,
sequence_start=True,
sequence_end=True,
stream_output=True):
_, res, n_token = outputs
now = time.perf_counter()
if n_prev_token != n_token:
token_latency_stats[n_prev_token] = np.round(now - prev, 3)
n_prev_token = n_token
prev = now
# for pytorch engine to restart a session
if hasattr(chatbot, 'end'):
chatbot.end(session_id)
if session_id == 1:
pbar.update(1)
assert output_seqlen <= n_token <= output_seqlen + 1, \
f'Error. session_id({session_id}) request {output_seqlen} ' \
f'tokens, but generate {n_token} tokens'
stats.append(token_latency_stats[:output_seqlen])
que.put((session_id, stats))
def warmup(model, concurrency: int, input_ids: List[int], warmup_round: int,
gen_config: EngineGenerationConfig):
if not warmup_round:
return
print('start to warmup ...')
output_seqlen = gen_config.max_new_tokens
def _infer(model, session_id):
chatbot = model.create_instance()
for _ in range(warmup_round):
for _ in chatbot.stream_infer(session_id,
input_ids=input_ids,
request_output_len=output_seqlen,
sequence_start=True,
sequence_end=True,
ignore_eos=True,
gen_config=gen_config):
continue
# for pytorch engine to restart a session
if hasattr(chatbot, 'end'):
chatbot.end(session_id)
_start = time.perf_counter()
procs = []
for i in range(concurrency):
proc = Thread(target=_infer, args=(model, i + 1), daemon=True)
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
_end = time.perf_counter()
print(f'end warmup, elapsed time: {round(_end - _start, 2)}s')
class EngineGenerationConfig(GenerationConfig):
"""generation parameter used by the inference engines."""
stop_words: List[int] = None
bad_words: List[int] = None
def From(gen_config: GenerationConfig, tokenizer: Tokenizer):
"""convert `GenerationConfig` to `EngineGenerationConfig`
Args:
gen_config (GenerationConfig): an instance of class `GenerationConfig`
tokenizer (Tokenizer): a tokenizer to encode the `stop_words` and `bad_words` in `gen_config`
Returns:
EngineGenerationConfig: the generation config used by inference engines
Examples:
>>> from lmdeploy import Tokenizer, GenerationConfig, EngineGenerationConfig
>>> tokenizer = Tokenizer('internlm/internlm-chat-7b')
>>> gen_config = GenerationConfig(stop_words=['<eoa>'])
>>> gen_config = EngineGenerationConfig.From(gen_config, tokenizer)
""" # noqa E501
def special_word_token_ids(words):
if words is not None:
assert isinstance(words, List) and \
all(isinstance(elem, str) for elem in words), \
f'stop_words must be a list of str but got {type(words)}'
indexes = []
for word in words:
indexes += tokenizer.indexes_containing_token(word)
return indexes
return None
return EngineGenerationConfig(
n=gen_config.n,
max_new_tokens=gen_config.max_new_tokens,
min_new_tokens=gen_config.min_new_tokens,
top_p=gen_config.top_p,
top_k=gen_config.top_k,
temperature=gen_config.temperature,
repetition_penalty=gen_config.repetition_penalty,
ignore_eos=gen_config.ignore_eos,
random_seed=gen_config.random_seed,
skip_special_tokens=gen_config.skip_special_tokens,
stop_words=special_word_token_ids(gen_config.stop_words),
bad_words=special_word_token_ids(gen_config.bad_words))
class TurbomindEngineConfig:
"""TurboMind Engine config.
Args:
model_name (str): the name of the deployed model, deprecated and has no effect when version > 0.2.1
model_format (str): the layout of the deployed model. It can be one of the following values [hf, llama, awq], `hf` meaning `hf_llama`, `llama` meaning `meta_llama`, `awq` meaning the quantized model by AWQ.
tp (int): the number of GPU cards used in tensor parallelism, default to 1
session_len (int): the max session length of a sequence, default to None
max_batch_size (int): the max batch size during inference, default to 128
cache_max_entry_count (float): the percentage of gpu memory occupied by the k/v cache.
For versions of lmdeploy between `v0.2.0` and `v0.2.1`, it defaults to 0.5, depicting the percentage of TOTAL GPU memory to be allocated to the k/v cache.
For lmdeploy versions greater than `v0.2.1`, it defaults to 0.8, signifying the percentage of FREE GPU memory to be reserved for the k/v cache
quant_policy (int): , default to 0. When k/v is quantized into 8 bit, set it to 4
rope_scaling_factor (int): scaling factor used for dynamic ntk, default to 0. TurboMind follows the implementation of transformer LlamaAttention
use_logn_attn (bool): whether or not to use log attn: default to False
download_dir (str): Directory to download and load the weights, default to the default cache directory of huggingface.
revision (str): The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.
max_prefill_token_num(int): the number of tokens each iteration during prefill, default to 8192
""" # noqa: E501
model_name: Optional[str] = None
model_format: Optional[str] = None
tp: int = 1
session_len: Optional[int] = None
max_batch_size: int = 128
cache_max_entry_count: float = 0.8
quant_policy: int = 0
rope_scaling_factor: float = 0.0
use_logn_attn: bool = False
download_dir: Optional[str] = None
revision: Optional[str] = None
max_prefill_token_num: int = 8192
class PytorchEngineConfig:
"""PyTorch Engine Config.
Args:
model_name (str): name of the given model.
tp (int): Tensor Parallelism. default 1.
session_len (int): Max session length. Default None.
max_batch_size (int): Max batch size. Default 128.
cache_max_entry_count (float): the percentage of gpu memory occupied
by the k/v cache. For lmdeploy versions greater than `v0.2.1`,
it defaults to 0.8, signifying the percentage of FREE GPU memory
to be reserved for the k/v cache
eviction_type (str): What action to perform when kv cache
is full, ['recompute', 'copy'], Default 'recompute'.
prefill_interval (int): Interval to perform prefill,
Default 16.
block_size (int): paging cache block size, default 64.
num_cpu_blocks (int): Num cpu blocks. If num is 0, cache
would be allocate according to current environment.
num_gpu_blocks (int): Num gpu blocks. If num is 0, cache
would be allocate according to current environment.
adapters (dict): The path configs to lora adapters.
max_prefill_token_num (int): tokens per iteration.
thread_safe (bool): thread safe engine instance.
download_dir (str): Directory to download and load the weights,
default to the default cache directory of huggingface.
revision (str): The specific model version to use.
It can be a branch name, a tag name, or a commit id.
If unspecified, will use the default version.
"""
model_name: str = ''
tp: int = 1
session_len: int = None
max_batch_size: int = 128
cache_max_entry_count: float = 0.8
eviction_type: str = 'recompute'
prefill_interval: int = 16
block_size: int = 64
num_cpu_blocks: int = 0
num_gpu_blocks: int = 0
adapters: Dict[str, str] = None
max_prefill_token_num: int = 8192
thread_safe: bool = False
download_dir: str = None
revision: str = None
def profile_throughput(model_path: str, concurrency: int, input_seqlen: int,
engine_config: Union[PytorchEngineConfig,
TurbomindEngineConfig],
gen_config: EngineGenerationConfig, test_round: int,
warmup_round: int):
output_seqlen = gen_config.max_new_tokens
print(f'profiling ... concurrency: {concurrency}, '
f'n_prompt_token: {input_seqlen}, '
f'n_completion_token: {output_seqlen}, '
f'test_round: {test_round}, warmup_round: {warmup_round}')
if isinstance(engine_config, TurbomindEngineConfig):
from lmdeploy.turbomind import TurboMind
tm_model = TurboMind.from_pretrained(model_path,
engine_config=engine_config)
elif isinstance(engine_config, PytorchEngineConfig):
from lmdeploy.pytorch.engine import Engine
tm_model = Engine(model_path, engine_config)
# make up a dummy `input_ids` with the length of `input_seqlen` exactly
assert input_seqlen > 0, 'input_seqlen should > 0'
input_ids = np.random.randint(low=0, high=101, size=input_seqlen).tolist()
warmup(tm_model, concurrency, input_ids, warmup_round, gen_config)
que = Queue()
procs = []
_start = time.perf_counter()
for i in range(concurrency):
proc = Thread(target=infer,
args=(tm_model, i + 1, input_ids, gen_config, test_round,
que))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
_end = time.perf_counter()
elapsed_time = _end - _start
token_latency_stats = []
while not que.empty():
_, _stats = que.get()
token_latency_stats += _stats
# The shape is [concurrency*test_round, output_seqlen]
token_latency_stats = np.stack(token_latency_stats, axis=0)
first_token_latency_min = np.round(
np.min(token_latency_stats[:, 0], axis=0), 3)
first_token_latency_max = np.round(
np.max(token_latency_stats[:, 0], axis=0), 3)
first_token_latency_ave = np.round(
np.mean(token_latency_stats[:, 0], axis=0), 3)
token_latency_max = np.round(np.max(np.sum(token_latency_stats, axis=1)),
3)
token_latency_min = np.round(np.min(np.sum(token_latency_stats, axis=1)),
3)
token_latency_ave = np.round(np.mean(np.sum(token_latency_stats, axis=1)),
3)
# sort token_latency without the first token's latency
sorted_token_latency = np.sort(token_latency_stats[:, 1:].flatten())
percentiles = [
np.round(
sorted_token_latency[int(percent * len(sorted_token_latency))], 3)
for percent in [0.5, 0.75, 0.95, 0.99]
]
throughput = np.round(token_latency_stats.size / elapsed_time, 2)
print(f'\n{"-" * 50}\ntotal time: {elapsed_time:.2f}s\n'
f'concurrency: {concurrency}, test_round: {test_round}\n'
f'input_tokens: {input_seqlen}, output_tokens: {output_seqlen}\n'
f'first_token latency(min, max, ave): '
f'{first_token_latency_min}s, {first_token_latency_max}s, '
f'{first_token_latency_ave}s\ntotal_token latency(min, max, ave): '
f'{token_latency_min}s, {token_latency_max}s, '
f'{token_latency_ave}s\n'
f'token_latency percentiles(50%,75%,95%,99%)(s): {percentiles}\n'
f'throughput: {throughput} token/s\n{"-" * 50}')
return tm_model.model_name, \
[first_token_latency_min, first_token_latency_max,
first_token_latency_ave], \
percentiles, throughput, tm_model.gpu_count | null |
8,257 | import argparse
import csv
import os
import time
from dataclasses import dataclass
from queue import Queue
from threading import Thread
from typing import List, Union
import numpy as np
from pynvml import (NVMLError, nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex,
nvmlDeviceGetMemoryInfo, nvmlDeviceGetName,
nvmlDeviceGetPowerState, nvmlDeviceGetTemperature,
nvmlInit, nvmlShutdown, nvmlSystemGetDriverVersion)
from tqdm import tqdm
from lmdeploy.cli.utils import ArgumentHelper, DefaultsAndTypesHelpFormatter
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
TurbomindEngineConfig)
class DefaultsAndTypesHelpFormatter(argparse.HelpFormatter):
"""Formatter to output default value and type in help information."""
def _get_help_string(self, action):
"""Add default and type info into help."""
help = action.help
if '%(default)' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if (action.option_strings or action.nargs
in defaulting_nargs) and 'default' not in help.lower():
help += '. Default: %(default)s'
if action.type:
help += '. Type: %(type)s'
return help
class ArgumentHelper:
"""Helper class to add unified argument."""
def model_name(parser):
"""Add argument model_name to parser."""
return parser.add_argument(
'--model-name',
type=str,
default=None,
help='The name of the to-be-deployed model, such as'
' llama-7b, llama-13b, vicuna-7b and etc. You '
'can run `lmdeploy list` to get the supported '
'model names')
def model_format(parser, default: str = None):
return parser.add_argument(
'--model-format',
type=str,
default=default,
choices=['hf', 'llama', 'awq'],
help='The format of input model. `hf` meaning `hf_llama`, `llama` '
'meaning `meta_llama`, `awq` meaning the quantized model by awq')
def tp(parser):
"""Add argument tp to parser."""
return parser.add_argument(
'--tp',
type=int,
default=1,
help='GPU number used in tensor parallelism. Should be 2^n')
def session_id(parser):
"""Add argument session_id to parser."""
return parser.add_argument('--session-id',
type=int,
default=1,
help='The identical id of a session')
def session_len(parser, default: int = None):
return parser.add_argument('--session-len',
type=int,
default=default,
help='The max session length of a sequence')
def max_batch_size(parser):
"""Add argument max_batch_size to parser."""
return parser.add_argument('--max-batch-size',
type=int,
default=128,
help='Maximum batch size')
def quant_policy(parser):
"""Add argument quant_policy to parser."""
return parser.add_argument('--quant-policy',
type=int,
default=0,
help='Whether to use kv int8')
def rope_scaling_factor(parser):
"""Add argument rope_scaling_factor to parser."""
return parser.add_argument('--rope-scaling-factor',
type=float,
default=0.0,
help='Rope scaling factor')
def use_logn_attn(parser):
"""Add argument use_logn_attn to parser."""
return parser.add_argument(
'--use-logn-attn',
action='store_true',
default=False,
help='Whether to use logn attention scaling')
def block_size(parser):
"""Add argument block_size to parser."""
return parser.add_argument('--block-size',
type=int,
default=64,
help='The block size for paging cache')
def top_p(parser):
"""Add argument top_p to parser."""
return parser.add_argument(
'--top-p',
type=float,
default=0.8,
help='An alternative to sampling with temperature,'
' called nucleus sampling, where the model '
'considers the results of the tokens with '
'top_p probability mass')
def top_k(parser):
"""Add argument top_k to parser."""
return parser.add_argument(
'--top-k',
type=int,
default=1,
help='An alternative to sampling with temperature, '
'where the model considers the top_k tokens '
'with the highest probability')
def temperature(parser, default: float = 0.8):
return parser.add_argument('-temp',
'--temperature',
type=float,
default=default,
help='Sampling temperature')
def repetition_penalty(parser):
"""Add argument repetition_penalty to parser."""
return parser.add_argument('--repetition-penalty',
type=float,
default=1.0,
help='Parameter to penalize repetition')
def cap(parser):
"""Add argument cap to parser."""
return parser.add_argument(
'--cap',
type=str,
default='chat',
choices=['completion', 'infilling', 'chat', 'python'],
help='The capability of a model. For example, codellama has the '
'ability among ["completion", "infilling", "chat", "python"]')
def log_level(parser):
"""Add argument log_level to parser."""
import logging
return parser.add_argument('--log-level',
type=str,
default='ERROR',
choices=list(logging._nameToLevel.keys()),
help='Set the log level')
def api_keys(parser):
return parser.add_argument(
'--api-keys',
type=str,
nargs='*',
default=None,
help='Optional list of space separated API keys',
)
def ssl(parser):
return parser.add_argument(
'--ssl',
action='store_true',
required=False,
default=False,
help='Enable SSL. Requires OS Environment variables'
" 'SSL_KEYFILE' and 'SSL_CERTFILE'",
)
def backend(parser):
"""Add argument backend to parser."""
return parser.add_argument('--backend',
type=str,
default='turbomind',
choices=['pytorch', 'turbomind'],
help='Set the inference backend')
def engine(parser):
"""Add argument engine to parser."""
return parser.add_argument('--engine',
type=str,
default='turbomind',
choices=['pytorch', 'turbomind'],
help='Set the inference backend')
def stream_output(parser):
"""Add argument stream_output to parser."""
return parser.add_argument(
'--stream-output',
action='store_true',
help='Indicator for streaming output or not')
def calib_dataset(parser):
"""Add argument calib_dataset to parser."""
return parser.add_argument('--calib-dataset',
type=str,
default='ptb',
help='The calibration dataset name')
def calib_samples(parser):
"""Add argument calib_samples to parser."""
return parser.add_argument(
'--calib-samples',
type=int,
default=128,
help='The number of samples for calibration')
def calib_seqlen(parser):
"""Add argument calib_seqlen to parser."""
return parser.add_argument('--calib-seqlen',
type=int,
default=2048,
help='The sequence length for calibration')
def device(parser):
"""Add argument device to parser."""
return parser.add_argument('--device',
type=str,
default='cuda',
choices=['cuda', 'cpu'],
help='Device type of running')
def meta_instruction(parser):
"""Add argument meta_instruction to parser."""
return parser.add_argument('--meta-instruction',
type=str,
default=None,
help='System prompt for ChatTemplateConfig')
def cache_max_entry_count(parser):
"""Add argument cache_max_entry_count to parser."""
return parser.add_argument(
'--cache-max-entry-count',
type=float,
default=0.8,
help='The percentage of gpu memory occupied by the k/v cache')
def adapters(parser):
"""Add argument adapters to parser."""
return parser.add_argument(
'--adapters',
nargs='*',
type=str,
default=None,
help='Used to set path(s) of lora adapter(s). One can input '
'key-value pairs in xxx=yyy format for multiple lora '
'adapters. If only have one adapter, one can only input '
'the path of the adapter.')
def work_dir(parser):
"""Add argument work_dir to parser."""
return parser.add_argument(
'--work-dir',
type=str,
default='./work_dir',
help='The working directory to save results')
def parse_args():
parser = argparse.ArgumentParser(
description='Profile the token generation performance with'
' pytorch or turbomind engine',
formatter_class=DefaultsAndTypesHelpFormatter)
parser.add_argument('model_path',
type=str,
help='the path of the model in localhost or '
'the repo_id of the model in huggingface.co')
parser.add_argument('-c',
'--concurrency',
nargs='+',
type=int,
help='how many requests launched concurrently',
default=[1, 16, 32, 64])
parser.add_argument(
'-pt',
'--prompt-tokens',
nargs='+',
type=int,
help='how many requests launched concurrently. One-to-one '
'correspondence with completion-tokens',
default=[1, 128, 128, 2048, 2048])
parser.add_argument('-ct',
'--completion-tokens',
nargs='+',
type=int,
help='how many tokens to be generated. One-to-one'
'correspondence with prompt-tokens',
default=[128, 128, 2048, 128, 2048])
parser.add_argument('--csv',
type=str,
help='Where to save the result.',
default='profile_generation.csv')
parser.add_argument('-tr',
'--test-round',
type=int,
help='number of test rounds',
default=3)
parser.add_argument('-w',
'--warmup-round',
type=int,
help='number of warmup rounds',
default=1)
# other args
ArgumentHelper.top_p(parser)
ArgumentHelper.temperature(parser)
ArgumentHelper.top_k(parser)
ArgumentHelper.log_level(parser)
ArgumentHelper.backend(parser)
# pytorch engine args
pt_group = parser.add_argument_group('PyTorch engine arguments')
tp_act = ArgumentHelper.tp(pt_group)
cache_count_act = ArgumentHelper.cache_max_entry_count(pt_group)
session_len_act = ArgumentHelper.session_len(pt_group, default=2048)
# turbomind engine args
tb_group = parser.add_argument_group('TurboMind engine argument')
tb_group._group_actions.append(tp_act)
tb_group._group_actions.append(session_len_act)
tb_group._group_actions.append(cache_count_act)
ArgumentHelper.model_format(tb_group, default='hf')
args = parser.parse_args()
return args | null |
8,258 | import argparse
import csv
import os
import time
from dataclasses import dataclass
from queue import Queue
from threading import Thread
from typing import List, Union
import numpy as np
from pynvml import (NVMLError, nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex,
nvmlDeviceGetMemoryInfo, nvmlDeviceGetName,
nvmlDeviceGetPowerState, nvmlDeviceGetTemperature,
nvmlInit, nvmlShutdown, nvmlSystemGetDriverVersion)
from tqdm import tqdm
from lmdeploy.cli.utils import ArgumentHelper, DefaultsAndTypesHelpFormatter
from lmdeploy.messages import (EngineGenerationConfig, PytorchEngineConfig,
TurbomindEngineConfig)
def __proc_cb(*args, ret_pipe, target):
try:
ret = target(*args)
ret_pipe[1].send(ret)
except Exception as e:
ret_pipe[1].send(e)
def _process_map(target, iterable):
from multiprocessing import Pipe, get_context
pipe = Pipe(False)
spawn_context = get_context('spawn')
proc = spawn_context.Process(target=__proc_cb,
args=iterable,
kwargs=dict(ret_pipe=pipe, target=target))
proc.start()
proc.join()
ret = pipe[0].recv()
if isinstance(ret, Exception):
raise ret
return ret | null |
8,259 | import csv
import logging
import os
import time
from typing import Optional
import fire
import torch
from transformers import AutoModelForCausalLM, GenerationConfig
from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger
class LoadNoInit:
"""Initialize model without parameter initialization."""
def __init__(self):
self.constant_ = torch.nn.init.constant_
self.zeros_ = torch.nn.init.zeros_
self.ones_ = torch.nn.init.ones_
self.uniform_ = torch.nn.init.uniform_
self.normal_ = torch.nn.init.normal_
self.kaiming_uniform_ = torch.nn.init.kaiming_uniform_
self.kaiming_normal_ = torch.nn.init.kaiming_normal_
self.tensor_normal_ = torch.Tensor.normal_
def __enter__(self, *args, **kwargs):
"""Replace initializers with no-op."""
torch.nn.init.constant_ = lambda *args, **kwargs: None
torch.nn.init.zeros_ = lambda *args, **kwargs: None
torch.nn.init.ones_ = lambda *args, **kwargs: None
torch.nn.init.uniform_ = lambda *args, **kwargs: None
torch.nn.init.normal_ = lambda *args, **kwargs: None
torch.nn.init.kaiming_uniform_ = lambda *args, **kwargs: None
torch.nn.init.kaiming_normal_ = lambda *args, **kwargs: None
torch.Tensor.normal_ = lambda *args, **kwargs: None
def __exit__(self, *args, **kwargs):
"""Recover."""
torch.nn.init.constant_ = self.constant_
torch.nn.init.zeros_ = self.zeros_
torch.nn.init.ones_ = self.ones_
torch.nn.init.uniform_ = self.uniform_
torch.nn.init.normal_ = self.normal_
torch.nn.init.kaiming_uniform_ = self.kaiming_uniform_
torch.nn.init.kaiming_normal_ = self.kaiming_normal_
torch.Tensor.normal_ = self.tensor_normal_
def init_hf_model(model_path: str):
start = time.monotonic()
with LoadNoInit():
model = AutoModelForCausalLM.from_pretrained(model_path,
torch_dtype=torch.float16,
trust_remote_code=True)
print(f'load model in {time.monotonic() -start} s')
return model | null |
8,260 | import csv
import logging
import os
import time
from typing import Optional
import fire
import torch
from transformers import AutoModelForCausalLM, GenerationConfig
from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger
def accel_deepspeed(model, max_out_tokens, tp_size=1):
import deepspeed
ds_model = deepspeed.init_inference(
model=model, # Transformers models
tensor_parallel={'tp_size': tp_size},
dtype=torch.float16, # dtype of the weights (fp16)
replace_with_kernel_inject=True,
max_out_tokens=max_out_tokens,
)
return ds_model | null |
8,261 | import csv
import json
import random
import time
from queue import Queue
from threading import Thread
from typing import List, Tuple
import fire
import numpy as np
from tqdm import tqdm
from lmdeploy.serve.openai.api_client import APIClient
from lmdeploy.tokenizer import Tokenizer
class Tokenizer:
"""Tokenize prompts or de-tokenize tokens into texts.
Args:
model_file (str): the path of the tokenizer model
"""
def __init__(self, model_file: str):
if model_file.endswith('.model'):
model_folder = osp.split(model_file)[0]
else:
model_folder = model_file
model_file = osp.join(model_folder, 'tokenizer.model')
tokenizer_config_file = osp.join(model_folder, 'tokenizer_config.json')
model_file_exists = osp.exists(model_file)
config_exists = osp.exists(tokenizer_config_file)
use_hf_model = config_exists or not model_file_exists
self.logger = get_logger('lmdeploy')
if not use_hf_model:
self.model = SentencePieceTokenizer(model_file)
else:
self.model = HuggingFaceTokenizer(model_folder)
def vocab_size(self):
"""vocabulary size."""
return self.model.vocab_size
def bos_token_id(self):
"""begine of the sentence token id."""
return self.model.bos_token_id
def eos_token_id(self):
"""end of the sentence token id."""
return self.model.eos_token_id
def encode(self, s: str, add_bos: bool = True, **kwargs):
"""Tokenize a prompt.
Args:
s (str): a prompt
Returns:
list[int]: token ids
"""
return self.model.encode(s, add_bos, **kwargs)
def decode(
self,
t: Sequence[int],
offset: Optional[int] = None,
skip_special_tokens: bool = True,
):
"""De-tokenize.
Args:
t (List[int]): a list of token ids
offset (int): for incrementally decoding. Default to None, which
means not applied.
Returns:
str: text of decoding tokens
"""
return self.model.decode(t, offset, skip_special_tokens)
def detokenize_incrementally(self,
all_input_ids: Sequence[int],
state: DetokenizeState,
skip_special_tokens: bool = True,
spaces_between_special_tokens: bool = True):
"""Incrementally detokenize the input indexes.
Args:
all_input_ids (List[int]): a list of token ids. Expected to be
different sections of a long sequence.
state (DetokenizeState): an instance of DetokenizeState. Consists
of incrementally decoding states.
skip_special_tokens (bool): Whether or not to remove special tokens
in the decoding. Default to be True.
spaces_between_special_tokens (bool): Whether or not to add spaces
between special tokens. Default to be True.
Returns:
str: decoding output string of the current round.
state (DetokenizeState): an instance of DetokenizeState. Consists
of incrementally decoding states.
"""
return self.model.detokenize_incrementally(
all_input_ids,
state=state,
skip_special_tokens=skip_special_tokens,
spaces_between_special_tokens=spaces_between_special_tokens)
def __call__(self, s: Union[str, Sequence[str]]):
"""Tokenize prompts.
Args:
s (str): prompts
Returns:
list[int]: token ids
"""
return self.model(s)
def indexes_containing_token(self, token):
"""Return all the possible indexes, whose decoding output may contain
the input token."""
encoded = self.encode(token, add_bos=False)
if len(encoded) > 1:
self.logger.warning(
f'The token {token}, its length of indexes {encoded} is over '
'than 1. Currently, it can not be used as stop words')
return []
return self.model.indexes_containing_token(token)
def sample_requests(
dataset_path: str,
num_requests: int,
tokenizer: Tokenizer,
) -> List[Tuple[str, int, int]]:
# Load the dataset.
with open(dataset_path) as f:
dataset = json.load(f)
# Filter out the conversations with less than 2 turns.
dataset = [data for data in dataset if len(data['conversations']) >= 2]
# Only keep the first two turns of each conversation.
dataset = [(data['conversations'][0]['value'],
data['conversations'][1]['value']) for data in dataset]
# pre-sample to avoid go through all the dataset
dataset = random.sample(dataset, max(int(num_requests * 1.2), 1000))
# Tokenize the prompts and completions.
prompts = [prompt for prompt, _ in dataset]
prompt_token_ids = tokenizer(prompts).input_ids
completions = [completion for _, completion in dataset]
completion_token_ids = tokenizer(completions).input_ids
tokenized_dataset = []
for i in range(len(dataset)):
output_len = len(completion_token_ids[i])
tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len))
# Filter out too long sequences.
filtered_dataset: List[Tuple[str, int, int]] = []
for prompt, prompt_token_ids, output_len in tokenized_dataset:
prompt_len = len(prompt_token_ids)
if prompt_len < 4 or output_len < 4:
# Prune too short sequences.
continue
if prompt_len > 1024 or prompt_len + output_len > 2048:
# Prune too long sequences.
continue
filtered_dataset.append((prompt, prompt_len, output_len))
# Sample the requests.
sampled_requests = random.sample(filtered_dataset, num_requests)
return sampled_requests | null |
8,262 | import json
import pickle
import time
from pathlib import Path
import fire
import numpy as np
from transformers import AutoTokenizer
from lmdeploy.pytorch.decode import Engine
The provided code snippet includes necessary dependencies for implementing the `benchmark` function. Write a Python function `def benchmark(model_path, share_gpt_path, downsample=100, accel=None, save_to='decode_result')` to solve the following problem:
Benchmark using ShareGPT data. Please download `ShareGPT_V3_unfiltered_cleaned_split.json` as data for this benchmark.
Here is the function:
def benchmark(model_path,
share_gpt_path,
downsample=100,
accel=None,
save_to='decode_result'):
"""Benchmark using ShareGPT data.
Please download `ShareGPT_V3_unfiltered_cleaned_split.json` as data for
this benchmark.
"""
start = time.monotonic()
content = json.load(open(share_gpt_path, 'r'))
texts = []
for c in content:
for cc in c['conversations']:
texts.append(cc['value'])
print(f'Parse json in {time.monotonic() - start} seconds.')
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.padding_side = 'right'
texts = texts[::downsample]
input_ids = tokenizer(texts, padding=False).input_ids
print(F'Number of prompts: {len(input_ids)}')
print(F'Maximum length: {max(map(len, input_ids))}')
print(F'Total length: {sum(map(len, input_ids))}')
start = time.monotonic()
# Init an engine
engine = Engine(model_path, tokenizer=tokenizer, accel=accel)
# decode prompts
probs = engine.decode(input_ids)
total_tokens = sum(map(len, input_ids))
elapsed = time.monotonic() - start
print(f'Decoded {total_tokens} tokens in {elapsed:.1f} seconds, '
f'{total_tokens / elapsed:.1f} tokens/s.')
print(f'Decoded {len(probs)} prompts in {elapsed:.1f} seconds, '
f'{len(probs) / elapsed:.1f} requests/s.')
pkl_path = Path(save_to).with_suffix('.pkl')
with pkl_path.open('wb') as f:
pickle.dump(probs, f)
txt_path = Path(save_to).with_suffix('.txt')
np.savetxt(txt_path.as_posix(), probs, fmt='%.4e') | Benchmark using ShareGPT data. Please download `ShareGPT_V3_unfiltered_cleaned_split.json` as data for this benchmark. |
8,263 | import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths.cpu(), batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, attn_feats, attn_feat_lens):
packed, inv_ix = sort_pack_padded_sequence(attn_feats, attn_feat_lens)
if isinstance(module, torch.nn.RNNBase):
return pad_unsort_packed_sequence(module(packed)[0], inv_ix)
else:
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix) | null |
8,264 | import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
def repeat_tensor(x, n):
return x.unsqueeze(0).repeat(n, *([1] * len(x.shape))) | null |
8,265 | import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio import transforms
from torchlibrosa.augmentation import SpecAugmentation
from .utils import mean_with_lens, max_with_lens, \
init, pack_wrapper, generate_length_mask, PositionalEncoding
def init(m, method="kaiming"):
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
if method == "kaiming":
nn.init.kaiming_uniform_(m.weight)
elif method == "xavier":
nn.init.xavier_uniform_(m.weight)
else:
raise Exception(f"initialization method {method} not supported")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
if method == "kaiming":
nn.init.kaiming_uniform_(m.weight)
elif method == "xavier":
nn.init.xavier_uniform_(m.weight)
else:
raise Exception(f"initialization method {method} not supported")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Embedding):
if method == "kaiming":
nn.init.kaiming_uniform_(m.weight)
elif method == "xavier":
nn.init.xavier_uniform_(m.weight)
else:
raise Exception(f"initialization method {method} not supported")
The provided code snippet includes necessary dependencies for implementing the `init_layer` function. Write a Python function `def init_layer(layer)` to solve the following problem:
Initialize a Linear or Convolutional layer.
Here is the function:
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.) | Initialize a Linear or Convolutional layer. |
8,266 | import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio import transforms
from torchlibrosa.augmentation import SpecAugmentation
from .utils import mean_with_lens, max_with_lens, \
init, pack_wrapper, generate_length_mask, PositionalEncoding
The provided code snippet includes necessary dependencies for implementing the `init_bn` function. Write a Python function `def init_bn(bn)` to solve the following problem:
Initialize a Batchnorm layer.
Here is the function:
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.) | Initialize a Batchnorm layer. |
8,267 | import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio import transforms
from torchlibrosa.augmentation import SpecAugmentation
from .utils import mean_with_lens, max_with_lens, \
init, pack_wrapper, generate_length_mask, PositionalEncoding
class LinearSoftPool(nn.Module):
"""LinearSoftPool
Linear softmax, takes logits and returns a probability, near to the actual maximum value.
Taken from the paper:
A Comparison of Five Multiple Instance Learning Pooling Functions for Sound Event Detection with Weak Labeling
https://arxiv.org/abs/1810.09050
"""
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, time_decision):
return (time_decision**2).sum(self.pooldim) / time_decision.sum(
self.pooldim)
class MeanPool(nn.Module):
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, decision):
return torch.mean(decision, dim=self.pooldim)
class AttentionPool(nn.Module):
"""docstring for AttentionPool"""
def __init__(self, inputdim, outputdim=10, pooldim=1, **kwargs):
super().__init__()
self.inputdim = inputdim
self.outputdim = outputdim
self.pooldim = pooldim
self.transform = nn.Linear(inputdim, outputdim)
self.activ = nn.Softmax(dim=self.pooldim)
self.eps = 1e-7
def forward(self, logits, decision):
# Input is (B, T, D)
# B, T, D
w = self.activ(torch.clamp(self.transform(logits), -15, 15))
detect = (decision * w).sum(
self.pooldim) / (w.sum(self.pooldim) + self.eps)
# B, T, D
return detect
The provided code snippet includes necessary dependencies for implementing the `parse_poolingfunction` function. Write a Python function `def parse_poolingfunction(poolingfunction_name='mean', **kwargs)` to solve the following problem:
parse_poolingfunction A heler function to parse any temporal pooling Pooling is done on dimension 1 :param poolingfunction_name: :param **kwargs:
Here is the function:
def parse_poolingfunction(poolingfunction_name='mean', **kwargs):
"""parse_poolingfunction
A heler function to parse any temporal pooling
Pooling is done on dimension 1
:param poolingfunction_name:
:param **kwargs:
"""
poolingfunction_name = poolingfunction_name.lower()
if poolingfunction_name == 'mean':
return MeanPool(pooldim=1)
elif poolingfunction_name == 'linear':
return LinearSoftPool(pooldim=1)
elif poolingfunction_name == 'attention':
return AttentionPool(inputdim=kwargs['inputdim'],
outputdim=kwargs['outputdim']) | parse_poolingfunction A heler function to parse any temporal pooling Pooling is done on dimension 1 :param poolingfunction_name: :param **kwargs: |
8,268 | import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio import transforms
from torchlibrosa.augmentation import SpecAugmentation
from .utils import mean_with_lens, max_with_lens, \
init, pack_wrapper, generate_length_mask, PositionalEncoding
def mean_with_lens(features, lens):
def max_with_lens(features, lens):
def embedding_pooling(x, lens, pooling="mean"):
if pooling == "max":
fc_embs = max_with_lens(x, lens)
elif pooling == "mean":
fc_embs = mean_with_lens(x, lens)
elif pooling == "mean+max":
x_mean = mean_with_lens(x, lens)
x_max = max_with_lens(x, lens)
fc_embs = x_mean + x_max
elif pooling == "last":
indices = (lens - 1).reshape(-1, 1, 1).repeat(1, 1, x.size(-1))
# indices: [N, 1, hidden]
fc_embs = torch.gather(x, 1, indices).squeeze(1)
else:
raise Exception(f"pooling method {pooling} not support")
return fc_embs | null |
8,269 | import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio import transforms
from torchlibrosa.augmentation import SpecAugmentation
from .utils import mean_with_lens, max_with_lens, \
init, pack_wrapper, generate_length_mask, PositionalEncoding
def conv_conv_block(in_channel, out_channel):
return nn.Sequential(
nn.Conv2d(in_channel,
out_channel,
kernel_size=3,
bias=False,
padding=1),
nn.BatchNorm2d(out_channel),
nn.ReLU(True),
nn.Conv2d(out_channel,
out_channel,
kernel_size=3,
bias=False,
padding=1),
nn.BatchNorm2d(out_channel),
nn.ReLU(True)
) | null |
8,270 | import json
from tqdm import tqdm
import logging
import pickle
from collections import Counter
import re
import fire
def build_vocab(input_json: str,
threshold: int,
keep_punctuation: bool,
host_address: str,
character_level: bool = False,
zh: bool = True ):
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
Args:
input_json(string): Preprossessed json file. Structure like this:
{
'audios': [
{
'audio_id': 'xxx',
'captions': [
{
'caption': 'xxx',
'cap_id': 'xxx'
}
]
},
...
]
}
threshold (int): Threshold to drop all words with counts < threshold
keep_punctuation (bool): Includes or excludes punctuation.
Returns:
vocab (Vocab): Object with the processed vocabulary
"""
data = json.load(open(input_json, "r"))["audios"]
counter = Counter()
pretokenized = "tokens" in data[0]["captions"][0]
if zh:
from nltk.parse.corenlp import CoreNLPParser
from zhon.hanzi import punctuation
if not pretokenized:
parser = CoreNLPParser(host_address)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
if pretokenized:
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
else:
caption = data[audio_idx]["captions"][cap_idx]["caption"]
# Remove all punctuations
if not keep_punctuation:
caption = re.sub("[{}]".format(punctuation), "", caption)
if character_level:
tokens = list(caption)
else:
tokens = list(parser.tokenize(caption))
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
counter.update(tokens)
else:
if pretokenized:
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
counter.update(tokens)
else:
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
captions = {}
for audio_idx in range(len(data)):
audio_id = data[audio_idx]["audio_id"]
captions[audio_id] = []
for cap_idx in range(len(data[audio_idx]["captions"])):
caption = data[audio_idx]["captions"][cap_idx]["caption"]
captions[audio_id].append({
"audio_id": audio_id,
"id": cap_idx,
"caption": caption
})
tokenizer = PTBTokenizer()
captions = tokenizer.tokenize(captions)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
audio_id = data[audio_idx]["audio_id"]
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = captions[audio_id][cap_idx]
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
counter.update(tokens.split(" "))
if not pretokenized:
json.dump({ "audios": data }, open(input_json, "w"), indent=4, ensure_ascii=not zh)
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word("<pad>")
vocab.add_word("<start>")
vocab.add_word("<end>")
vocab.add_word("<unk>")
# Add the words to the vocabulary.
for word in words:
vocab.add_word(word)
return vocab
def process(input_json: str,
output_file: str,
threshold: int = 1,
keep_punctuation: bool = False,
character_level: bool = False,
host_address: str = "http://localhost:9000",
zh: bool = False):
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info("Build Vocab")
vocabulary = build_vocab(
input_json=input_json, threshold=threshold, keep_punctuation=keep_punctuation,
host_address=host_address, character_level=character_level, zh=zh)
pickle.dump(vocabulary, open(output_file, "wb"))
logging.info("Total vocabulary size: {}".format(len(vocabulary)))
logging.info("Saved vocab to '{}'".format(output_file)) | null |
8,271 | import json
from tqdm import tqdm
import logging
import pickle
from collections import Counter
import re
import fire
def build_vocab(input_json: str,
output_json: str,
threshold: int,
keep_punctuation: bool,
character_level: bool = False,
zh: bool = True ):
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
Args:
input_json(string): Preprossessed json file. Structure like this:
{
'audios': [
{
'audio_id': 'xxx',
'captions': [
{
'caption': 'xxx',
'cap_id': 'xxx'
}
]
},
...
]
}
threshold (int): Threshold to drop all words with counts < threshold
keep_punctuation (bool): Includes or excludes punctuation.
Returns:
vocab (Vocab): Object with the processed vocabulary
"""
data = json.load(open(input_json, "r"))["audios"]
counter = Counter()
pretokenized = "tokens" in data[0]["captions"][0]
if zh:
from ltp import LTP
from zhon.hanzi import punctuation
if not pretokenized:
parser = LTP("base")
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
if pretokenized:
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
else:
caption = data[audio_idx]["captions"][cap_idx]["caption"]
if character_level:
tokens = list(caption)
else:
tokens, _ = parser.seg([caption])
tokens = tokens[0]
# Remove all punctuations
if not keep_punctuation:
tokens = [token for token in tokens if token not in punctuation]
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
counter.update(tokens)
else:
if pretokenized:
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
counter.update(tokens)
else:
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
captions = {}
for audio_idx in range(len(data)):
audio_id = data[audio_idx]["audio_id"]
captions[audio_id] = []
for cap_idx in range(len(data[audio_idx]["captions"])):
caption = data[audio_idx]["captions"][cap_idx]["caption"]
captions[audio_id].append({
"audio_id": audio_id,
"id": cap_idx,
"caption": caption
})
tokenizer = PTBTokenizer()
captions = tokenizer.tokenize(captions)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
audio_id = data[audio_idx]["audio_id"]
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = captions[audio_id][cap_idx]
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
counter.update(tokens.split(" "))
if not pretokenized:
if output_json is None:
output_json = input_json
json.dump({ "audios": data }, open(output_json, "w"), indent=4, ensure_ascii=not zh)
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word("<pad>")
vocab.add_word("<start>")
vocab.add_word("<end>")
vocab.add_word("<unk>")
# Add the words to the vocabulary.
for word in words:
vocab.add_word(word)
return vocab
def process(input_json: str,
output_file: str,
output_json: str = None,
threshold: int = 1,
keep_punctuation: bool = False,
character_level: bool = False,
zh: bool = True):
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info("Build Vocab")
vocabulary = build_vocab(
input_json=input_json, output_json=output_json, threshold=threshold,
keep_punctuation=keep_punctuation, character_level=character_level, zh=zh)
pickle.dump(vocabulary, open(output_file, "wb"))
logging.info("Total vocabulary size: {}".format(len(vocabulary)))
logging.info("Saved vocab to '{}'".format(output_file)) | null |
8,272 | import json
from tqdm import tqdm
import logging
import pickle
from collections import Counter
import re
import fire
def build_vocab(input_json: str,
output_json: str,
threshold: int,
keep_punctuation: bool,
host_address: str,
character_level: bool = False,
retokenize: bool = True,
zh: bool = True ):
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
Args:
input_json(string): Preprossessed json file. Structure like this:
{
'audios': [
{
'audio_id': 'xxx',
'captions': [
{
'caption': 'xxx',
'cap_id': 'xxx'
}
]
},
...
]
}
threshold (int): Threshold to drop all words with counts < threshold
keep_punctuation (bool): Includes or excludes punctuation.
Returns:
vocab (Vocab): Object with the processed vocabulary
"""
data = json.load(open(input_json, "r"))["audios"]
counter = Counter()
if retokenize:
pretokenized = False
else:
pretokenized = "tokens" in data[0]["captions"][0]
if zh:
from nltk.parse.corenlp import CoreNLPParser
from zhon.hanzi import punctuation
if not pretokenized:
parser = CoreNLPParser(host_address)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
if pretokenized:
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
else:
caption = data[audio_idx]["captions"][cap_idx]["caption"]
# Remove all punctuations
if not keep_punctuation:
caption = re.sub("[{}]".format(punctuation), "", caption)
if character_level:
tokens = list(caption)
else:
tokens = list(parser.tokenize(caption))
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
counter.update(tokens)
else:
if pretokenized:
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
counter.update(tokens)
else:
import spacy
tokenizer = spacy.load("en_core_web_sm", disable=["parser", "ner"])
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
captions = data[audio_idx]["captions"]
for cap_idx in range(len(captions)):
caption = captions[cap_idx]["caption"]
doc = tokenizer(caption)
tokens = " ".join([str(token).lower() for token in doc])
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
counter.update(tokens.split(" "))
if not pretokenized:
if output_json is None:
json.dump({ "audios": data }, open(input_json, "w"),
indent=4, ensure_ascii=not zh)
else:
json.dump({ "audios": data }, open(output_json, "w"),
indent=4, ensure_ascii=not zh)
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word("<pad>")
vocab.add_word("<start>")
vocab.add_word("<end>")
vocab.add_word("<unk>")
# Add the words to the vocabulary.
for word in words:
vocab.add_word(word)
return vocab
def process(input_json: str,
output_file: str,
output_json: str = None,
threshold: int = 1,
keep_punctuation: bool = False,
character_level: bool = False,
retokenize: bool = False,
host_address: str = "http://localhost:9000",
zh: bool = True):
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info("Build Vocab")
vocabulary = build_vocab(
input_json=input_json, output_json=output_json, threshold=threshold,
keep_punctuation=keep_punctuation, host_address=host_address,
character_level=character_level, retokenize=retokenize, zh=zh)
pickle.dump(vocabulary, open(output_file, "wb"))
logging.info("Total vocabulary size: {}".format(len(vocabulary)))
logging.info("Saved vocab to '{}'".format(output_file)) | null |
8,273 | import json
from tqdm import tqdm
import re
import fire
The provided code snippet includes necessary dependencies for implementing the `tokenize_caption` function. Write a Python function `def tokenize_caption(input_json: str, keep_punctuation: bool = False, host_address: str = None, character_level: bool = False, zh: bool = True, output_json: str = None)` to solve the following problem:
Build vocabulary from csv file with a given threshold to drop all counts < threshold Args: input_json(string): Preprossessed json file. Structure like this: { 'audios': [ { 'audio_id': 'xxx', 'captions': [ { 'caption': 'xxx', 'cap_id': 'xxx' } ] }, ... ] } threshold (int): Threshold to drop all words with counts < threshold keep_punctuation (bool): Includes or excludes punctuation. Returns: vocab (Vocab): Object with the processed vocabulary
Here is the function:
def tokenize_caption(input_json: str,
keep_punctuation: bool = False,
host_address: str = None,
character_level: bool = False,
zh: bool = True,
output_json: str = None):
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
Args:
input_json(string): Preprossessed json file. Structure like this:
{
'audios': [
{
'audio_id': 'xxx',
'captions': [
{
'caption': 'xxx',
'cap_id': 'xxx'
}
]
},
...
]
}
threshold (int): Threshold to drop all words with counts < threshold
keep_punctuation (bool): Includes or excludes punctuation.
Returns:
vocab (Vocab): Object with the processed vocabulary
"""
data = json.load(open(input_json, "r"))["audios"]
if zh:
from nltk.parse.corenlp import CoreNLPParser
from zhon.hanzi import punctuation
parser = CoreNLPParser(host_address)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
caption = data[audio_idx]["captions"][cap_idx]["caption"]
# Remove all punctuations
if not keep_punctuation:
caption = re.sub("[{}]".format(punctuation), "", caption)
if character_level:
tokens = list(caption)
else:
tokens = list(parser.tokenize(caption))
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
else:
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
captions = {}
for audio_idx in range(len(data)):
audio_id = data[audio_idx]["audio_id"]
captions[audio_id] = []
for cap_idx in range(len(data[audio_idx]["captions"])):
caption = data[audio_idx]["captions"][cap_idx]["caption"]
captions[audio_id].append({
"audio_id": audio_id,
"id": cap_idx,
"caption": caption
})
tokenizer = PTBTokenizer()
captions = tokenizer.tokenize(captions)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
audio_id = data[audio_idx]["audio_id"]
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = captions[audio_id][cap_idx]
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
if output_json:
json.dump(
{ "audios": data }, open(output_json, "w"),
indent=4, ensure_ascii=not zh)
else:
json.dump(
{ "audios": data }, open(input_json, "w"),
indent=4, ensure_ascii=not zh) | Build vocabulary from csv file with a given threshold to drop all counts < threshold Args: input_json(string): Preprossessed json file. Structure like this: { 'audios': [ { 'audio_id': 'xxx', 'captions': [ { 'caption': 'xxx', 'cap_id': 'xxx' } ] }, ... ] } threshold (int): Threshold to drop all words with counts < threshold keep_punctuation (bool): Includes or excludes punctuation. Returns: vocab (Vocab): Object with the processed vocabulary |
8,274 | import os
import sys
import logging
from typing import Callable, Dict, Union
import yaml
import torch
from torch.optim.swa_utils import AveragedModel as torch_average_model
import numpy as np
import pandas as pd
from pprint import pformat
def load_dict_from_csv(csv, cols):
df = pd.read_csv(csv, sep="\t")
output = dict(zip(df[cols[0]], df[cols[1]]))
return output | null |
8,275 | import os
import sys
import logging
from typing import Callable, Dict, Union
import yaml
import torch
from torch.optim.swa_utils import AveragedModel as torch_average_model
import numpy as np
import pandas as pd
from pprint import pformat
def init_logger(filename, level="INFO"):
formatter = logging.Formatter(
"[ %(levelname)s : %(asctime)s ] - %(message)s")
logger = logging.getLogger(__name__ + "." + filename)
logger.setLevel(getattr(logging, level))
# Log results to std
# stdhandler = logging.StreamHandler(sys.stdout)
# stdhandler.setFormatter(formatter)
# Dump log to file
filehandler = logging.FileHandler(filename)
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
# logger.addHandler(stdhandler)
return logger | null |
8,276 | import os
import sys
import logging
from typing import Callable, Dict, Union
import yaml
import torch
from torch.optim.swa_utils import AveragedModel as torch_average_model
import numpy as np
import pandas as pd
from pprint import pformat
The provided code snippet includes necessary dependencies for implementing the `pprint_dict` function. Write a Python function `def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml')` to solve the following problem:
pprint_dict :param outputfun: function to use, defaults to sys.stdout :param in_dict: dict to print
Here is the function:
def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'):
"""pprint_dict
:param outputfun: function to use, defaults to sys.stdout
:param in_dict: dict to print
"""
if formatter == 'yaml':
format_fun = yaml.dump
elif formatter == 'pretty':
format_fun = pformat
for line in format_fun(in_dict).split('\n'):
outputfun(line) | pprint_dict :param outputfun: function to use, defaults to sys.stdout :param in_dict: dict to print |
8,277 | import os
import sys
import logging
from typing import Callable, Dict, Union
import yaml
import torch
from torch.optim.swa_utils import AveragedModel as torch_average_model
import numpy as np
import pandas as pd
from pprint import pformat
def load_config(config_file):
with open(config_file, "r") as reader:
config = yaml.load(reader, Loader=yaml.FullLoader)
if "inherit_from" in config:
base_config_file = config["inherit_from"]
base_config_file = os.path.join(
os.path.dirname(config_file), base_config_file
)
assert not os.path.samefile(config_file, base_config_file), \
"inherit from itself"
base_config = load_config(base_config_file)
del config["inherit_from"]
merge_a_into_b(config, base_config)
return base_config
return config
def parse_config_or_kwargs(config_file, **kwargs):
yaml_config = load_config(config_file)
# passed kwargs will override yaml config
args = dict(yaml_config, **kwargs)
return args | null |
8,278 | import os
import sys
import logging
from typing import Callable, Dict, Union
import yaml
import torch
from torch.optim.swa_utils import AveragedModel as torch_average_model
import numpy as np
import pandas as pd
from pprint import pformat
def store_yaml(config, config_file):
with open(config_file, "w") as con_writer:
yaml.dump(config, con_writer, indent=4, default_flow_style=False) | null |
8,279 | import os
import sys
import logging
from typing import Callable, Dict, Union
import yaml
import torch
from torch.optim.swa_utils import AveragedModel as torch_average_model
import numpy as np
import pandas as pd
from pprint import pformat
def fix_batchnorm(model: torch.nn.Module):
def inner(module):
class_name = module.__class__.__name__
if class_name.find("BatchNorm") != -1:
module.eval()
model.apply(inner) | null |
8,280 | import copy
import json
import numpy as np
import fire
def evaluate_annotation(key2refs, scorer):
if scorer.method() == "Bleu":
scores = np.array([ 0.0 for n in range(4) ])
else:
scores = 0
num_cap_per_audio = len(next(iter(key2refs.values())))
for i in range(num_cap_per_audio):
if i > 0:
for key in key2refs:
key2refs[key].insert(0, res[key][0])
res = { key: [refs.pop(),] for key, refs in key2refs.items() }
score, _ = scorer.compute_score(key2refs, res)
if scorer.method() == "Bleu":
scores += np.array(score)
else:
scores += score
score = scores / num_cap_per_audio
return score | null |
8,281 | import copy
import json
import numpy as np
import fire
def evaluate_prediction(key2pred, key2refs, scorer):
if scorer.method() == "Bleu":
scores = np.array([ 0.0 for n in range(4) ])
else:
scores = 0
num_cap_per_audio = len(next(iter(key2refs.values())))
for i in range(num_cap_per_audio):
key2refs_i = {}
for key, refs in key2refs.items():
key2refs_i[key] = refs[:i] + refs[i+1:]
score, _ = scorer.compute_score(key2refs_i, key2pred)
if scorer.method() == "Bleu":
scores += np.array(score)
else:
scores += score
score = scores / num_cap_per_audio
return score | null |
8,282 | import numpy as np
import pandas as pd
import torch
from gensim.models import FastText
from tqdm import tqdm
import fire
import sys
import os
from utils.build_vocab import Vocabulary
def create_embedding(caption_file: str,
vocab_file: str,
embed_size: int,
output: str,
**fasttext_kwargs):
caption_df = pd.read_json(caption_file)
caption_df["tokens"] = caption_df["tokens"].apply(lambda x: ["<start>"] + [token for token in x] + ["<end>"])
sentences = list(caption_df["tokens"].values)
vocabulary = torch.load(vocab_file, map_location="cpu")
epochs = fasttext_kwargs.get("epochs", 10)
model = FastText(size=embed_size, min_count=1, **fasttext_kwargs)
model.build_vocab(sentences=sentences)
model.train(sentences=sentences, total_examples=len(sentences), epochs=epochs)
word_embeddings = np.zeros((len(vocabulary), embed_size))
with tqdm(total=len(vocabulary), ascii=True) as pbar:
for word, idx in vocabulary.word2idx.items():
if word == "<pad>" or word == "<unk>":
continue
word_embeddings[idx] = model.wv[word]
pbar.update()
np.save(output, word_embeddings)
print("Finish writing fasttext embeddings to " + output) | null |
8,283 | import os
import sys
import copy
import pickle
import numpy as np
import pandas as pd
import fire
def coco_score(refs, pred, scorer):
if scorer.method() == "Bleu":
scores = np.array([ 0.0 for n in range(4) ])
else:
scores = 0
num_cap_per_audio = len(refs[list(refs.keys())[0]])
for i in range(num_cap_per_audio):
if i > 0:
for key in refs:
refs[key].insert(0, res[key][0])
res = {key: [refs[key].pop(),] for key in refs}
score, _ = scorer.compute_score(refs, pred)
if scorer.method() == "Bleu":
scores += np.array(score)
else:
scores += score
score = scores / num_cap_per_audio
for key in refs:
refs[key].insert(0, res[key][0])
score_allref, _ = scorer.compute_score(refs, pred)
diff = score_allref - score
return diff | null |
8,284 | import os
import sys
import copy
import pickle
import numpy as np
import pandas as pd
import fire
def embedding_score(refs, pred, scorer):
num_cap_per_audio = len(refs[list(refs.keys())[0]])
scores = 0
for i in range(num_cap_per_audio):
res = {key: [refs[key][i],] for key in refs.keys() if len(refs[key]) == num_cap_per_audio}
refs_i = {key: np.concatenate([refs[key][:i], refs[key][i+1:]]) for key in refs.keys() if len(refs[key]) == num_cap_per_audio}
score, _ = scorer.compute_score(refs_i, pred)
scores += score
score = scores / num_cap_per_audio
score_allref, _ = scorer.compute_score(refs, pred)
diff = score_allref - score
return diff | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.