method_name
stringlengths
3
45
method_body
stringlengths
9
6.25k
full_code
stringlengths
35
7.02k
docstring
stringlengths
18
4.7k
__init__
super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.c_proj = RowParallelLinear(intermediate_size, hidden_size, bias=False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsu...
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str='silu', linear_method: Optional[LinearMethodBase]=None): super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.c_proj = Ro...
null
__init__
super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.to...
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, rope_theta: float=10000, rope_scaling: Optional[Dict[str, Any]]=None, max_position_embeddings: int=8192, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.hidden_size = hidden_size tp_size = ge...
null
forward
return self.act(x) / self.scales
def forward(self, x: torch.Tensor) ->torch.Tensor: return self.act(x) / self.scales
null
get_model_config
"""Gets the model configuration.""" return self.model_config
def get_model_config(self) ->ModelConfig: """Gets the model configuration.""" return self.model_config
Gets the model configuration.
__init__
super().__init__() self.config = config self.linear_method = linear_method self.transformer = PhiModel(config, linear_method) self.lm_head = PhiCausalLMHead(config) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: PretrainedConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.transformer = PhiModel(config, linear_method) self.lm_head = PhiCausalLMHead(config) self.sampler = Sampler(config.vo...
null
__init__
super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.to...
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, rope_theta: float=10000, rope_scaling: Optional[Dict[str, Any]]=None, max_position_embeddings: int=8192, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.hidden_size = hidden_size tp_size = ge...
null
get_tokenizer
return self.llm_engine.tokenizer
def get_tokenizer(self) ->Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: return self.llm_engine.tokenizer
null
__repr__
return f'PhysicalTokenBlock(device={self.device}, block_number={self.block_number}, ref_count={self.ref_count})'
def __repr__(self) ->str: return ( f'PhysicalTokenBlock(device={self.device}, block_number={self.block_number}, ref_count={self.ref_count})' )
null
generate
"""Generates the completions for the input prompts. NOTE: This class automatically batches the given prompts, considering the memory constraint. For the best performance, put all of your prompts into a single list and pass it to this method. Args: prompts: A list of prompts...
def generate(self, prompts: Optional[Union[str, List[str]]]=None, sampling_params: Optional[SamplingParams]=None, prompt_token_ids: Optional[List[List[int]]]=None, use_tqdm: bool=True) ->List[RequestOutput]: """Generates the completions for the input prompts. NOTE: This class automatically batches ...
Generates the completions for the input prompts. NOTE: This class automatically batches the given prompts, considering the memory constraint. For the best performance, put all of your prompts into a single list and pass it to this method. Args: prompts: A list of prompts to generate completions for. sampling_...
apply_weights
weight = weights['weight'] if self.separate_bias_add: if bias: return F.linear(x, weight) + bias return F.linear(x, weight) return F.linear(x, weight, bias)
def apply_weights(self, weights: Dict[str, torch.Tensor], x: torch.Tensor, bias: Optional[torch.Tensor]=None) ->torch.Tensor: weight = weights['weight'] if self.separate_bias_add: if bias: return F.linear(x, weight) + bias return F.linear(x, weight) return F.linear(x, weight,...
null
_verify_args
if self.max_num_batched_tokens < self.max_model_len: raise ValueError( f'max_num_batched_tokens ({self.max_num_batched_tokens}) is smaller than max_model_len ({self.max_model_len}). This effectively limits the maximum sequence length to max_num_batched_tokens and makes vLLM reject longer sequences. Please i...
def _verify_args(self) ->None: if self.max_num_batched_tokens < self.max_model_len: raise ValueError( f'max_num_batched_tokens ({self.max_num_batched_tokens}) is smaller than max_model_len ({self.max_model_len}). This effectively limits the maximum sequence length to max_num_batched_tokens and m...
null
allocate_gpu_cache
gpu_cache: List[KVCache] = [] key_block_shape = self.get_key_block_shape() value_block_shape = self.get_value_block_shape() for _ in range(self.num_layers): key_blocks = torch.empty(size=(self.num_gpu_blocks, *key_block_shape), dtype=self.dtype, device='cuda') value_blocks = torch.empty(size=(self.num_g...
def allocate_gpu_cache(self) ->List[KVCache]: gpu_cache: List[KVCache] = [] key_block_shape = self.get_key_block_shape() value_block_shape = self.get_value_block_shape() for _ in range(self.num_layers): key_blocks = torch.empty(size=(self.num_gpu_blocks, * key_block_shape), dtype=sel...
null
forward
residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layer...
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states,...
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: Optional[torch.Tensor], sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
verify_with_parallel_config
total_num_attention_heads = self.hf_config.num_attention_heads tensor_parallel_size = parallel_config.tensor_parallel_size if total_num_attention_heads % tensor_parallel_size != 0: raise ValueError( f'Total number of attention heads ({total_num_attention_heads}) must be divisible by tensor parallel size ({t...
def verify_with_parallel_config(self, parallel_config: 'ParallelConfig' ) ->None: total_num_attention_heads = self.hf_config.num_attention_heads tensor_parallel_size = parallel_config.tensor_parallel_size if total_num_attention_heads % tensor_parallel_size != 0: raise ValueError( f'T...
null
get_prompt_len
return len(self.prompt_token_ids)
def get_prompt_len(self) ->int: return len(self.prompt_token_ids)
null
get_priority
raise NotImplementedError
def get_priority(self, now: float, seq_group: SequenceGroup) ->float: raise NotImplementedError
null
get_value_block_shape
return self.num_heads, self.head_size, self.block_size
def get_value_block_shape(self) ->Tuple[int, int, int]: return self.num_heads, self.head_size, self.block_size
null
test_request_tracker
tracker = RequestTracker() tracker.new_requests_event = DummyEvent() stream_1 = tracker.add_request('1') assert tracker.new_requests_event.flag new, finished = tracker.get_new_and_finished_requests() assert not tracker.new_requests_event.flag assert len(new) == 1 assert new[0]['request_id'] == '1' assert not finished a...
def test_request_tracker(): tracker = RequestTracker() tracker.new_requests_event = DummyEvent() stream_1 = tracker.add_request('1') assert tracker.new_requests_event.flag new, finished = tracker.get_new_and_finished_requests() assert not tracker.new_requests_event.flag assert len(new) == 1 ...
null
get_tensor_model_parallel_src_rank
"""Calculate the global rank corresponding to the first local rank in the tensor model parallel group.""" global_rank = torch.distributed.get_rank() local_world_size = get_tensor_model_parallel_world_size() return global_rank // local_world_size * local_world_size
def get_tensor_model_parallel_src_rank(): """Calculate the global rank corresponding to the first local rank in the tensor model parallel group.""" global_rank = torch.distributed.get_rank() local_world_size = get_tensor_model_parallel_world_size() return global_rank // local_world_size * local_worl...
Calculate the global rank corresponding to the first local rank in the tensor model parallel group.
sort_by_priority
return sorted(seq_groups, key=lambda seq_group: self.get_priority(now, seq_group), reverse=True)
def sort_by_priority(self, now: float, seq_groups: List[SequenceGroup]) ->List[ SequenceGroup]: return sorted(seq_groups, key=lambda seq_group: self.get_priority(now, seq_group), reverse=True)
null
sample
next_tokens = self.sampler(self.embed_out.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.embed_out.weight, hidden_states, sampling_metadata) return next_tokens
null
vocab_range_from_global_vocab_size
per_partition_vocab_size = divide(global_vocab_size, world_size) return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank )
def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, world_size: int) ->Sequence[int]: per_partition_vocab_size = divide(global_vocab_size, world_size) return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank)
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.model = MixtralModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: MixtralConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.config = config self.linear_method = linear_method self.model = MixtralModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) ...
null
__init__
self.block_size = block_size self.num_total_gpu_blocks = num_gpu_blocks self.num_total_cpu_blocks = num_cpu_blocks self.block_sliding_window = None if sliding_window is not None: assert sliding_window % block_size == 0, (sliding_window, block_size) self.block_sliding_window = sliding_window // block_size self.w...
def __init__(self, block_size: int, num_gpu_blocks: int, num_cpu_blocks: int, watermark: float=0.01, sliding_window: Optional[int]=None) ->None: self.block_size = block_size self.num_total_gpu_blocks = num_gpu_blocks self.num_total_cpu_blocks = num_cpu_blocks self.block_sliding_window = None if ...
null
_get_logprobs
batched_logprobs_query_seq_indices: List[int] = [] batched_logprobs_query_token_indices: List[int] = [] largest_num_logprobs = 0 sample_idx = 0 for i, (seq_group, sample_result) in enumerate(zip(sampling_metadata. seq_groups, sample_results)): seq_ids, sampling_params = seq_group next_token_ids, parent_ids ...
def _get_logprobs(logprobs: torch.Tensor, sampling_metadata: SamplingMetadata, sample_results: List[Tuple[List[int], List[int]]] ) ->Tuple[List[Optional[List[Optional[Dict[int, float]]]]], List[List[ Dict[int, float]]]]: batched_logprobs_query_seq_indices: List[int] = [] batched_logprobs_query_token...
null
create_token_type_ids_from_sequences
""" Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is N...
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) ->List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` ...
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args:...
forward
qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_ca...
null
__init__
config.num_key_value_heads = max(config.num_key_value_heads_per_layer) delattr(config, 'num_key_value_heads_per_layer') super().__init__(config=config, linear_method=linear_method)
def __init__(self, config: Optional[PretrainedConfig]=None, linear_method: Optional[LinearMethodBase]=None) ->None: config.num_key_value_heads = max(config.num_key_value_heads_per_layer) delattr(config, 'num_key_value_heads_per_layer') super().__init__(config=config, linear_method=linear_method)
null
get_token_ids
return self.prompt_token_ids + self.output_token_ids
def get_token_ids(self) ->List[int]: return self.prompt_token_ids + self.output_token_ids
null
forward
inputs_embeds = self.embedding(input_ids) hidden_states = self.encoder(hidden_states=inputs_embeds, position_ids= position_ids, kv_caches=kv_caches, input_metadata=input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: inputs_embeds = self.embedding(input_ids) hidden_states = self.encoder(hidden_states=inputs_embeds, position_ids= position_ids, kv_caches=kv_caches, input_m...
null
generate_greedy_logprobs
all_logprobs = [] for prompt in prompts: input_ids = self.tokenizer(prompt, return_tensors='pt').input_ids output = self.model.generate(input_ids.cuda(), use_cache=True, do_sample=False, max_new_tokens=max_tokens, output_hidden_states= True, return_dict_in_generate=True) seq_logprobs = [] ...
def generate_greedy_logprobs(self, prompts: List[str], max_tokens: int) ->List[ List[torch.Tensor]]: all_logprobs = [] for prompt in prompts: input_ids = self.tokenizer(prompt, return_tensors='pt').input_ids output = self.model.generate(input_ids.cuda(), use_cache=True, do_sample...
null
forward
hidden_states = self.wte(input_ids) residual = None for i in range(len(self.h)): layer = self.h[i] hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, residual) hidden_states, _ = self.ln_f(hidden_states, residual) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.wte(input_ids) residual = None for i in range(len(self.h)): layer = self.h[i] hidden_states, residual = layer(positions, hidde...
null
_get_model_architecture
architectures = getattr(config, 'architectures', []) for arch in architectures: model_cls = ModelRegistry.load_model_cls(arch) if model_cls is not None: return model_cls raise ValueError( f'Model architectures {architectures} are not supported for now. Supported architectures: {ModelRegistry.get_sup...
def _get_model_architecture(config: PretrainedConfig) ->Type[nn.Module]: architectures = getattr(config, 'architectures', []) for arch in architectures: model_cls = ModelRegistry.load_model_cls(arch) if model_cls is not None: return model_cls raise ValueError( f'Model arc...
null
free
if seq.seq_id not in self.block_tables: return block_table = self.block_tables[seq.seq_id] self._free_block_table(block_table) del self.block_tables[seq.seq_id]
def free(self, seq: Sequence) ->None: if seq.seq_id not in self.block_tables: return block_table = self.block_tables[seq.seq_id] self._free_block_table(block_table) del self.block_tables[seq.seq_id]
null
build_inputs_with_special_tokens
bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = out...
null
_run_engine
if use_tqdm: num_requests = self.llm_engine.get_num_unfinished_requests() pbar = tqdm(total=num_requests, desc='Processed prompts') outputs: List[RequestOutput] = [] while self.llm_engine.has_unfinished_requests(): step_outputs = self.llm_engine.step() for output in step_outputs: if output.finis...
def _run_engine(self, use_tqdm: bool) ->List[RequestOutput]: if use_tqdm: num_requests = self.llm_engine.get_num_unfinished_requests() pbar = tqdm(total=num_requests, desc='Processed prompts') outputs: List[RequestOutput] = [] while self.llm_engine.has_unfinished_requests(): step_out...
null
__init__
super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPT2Attention(config, linear_method) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) sel...
def __init__(self, config: GPT2Config, linear_method: Optional[ LinearMethodBase]=None): super().__init__() hidden_size = config.hidden_size inner_dim = (config.n_inner if config.n_inner is not None else 4 * hidden_size) self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) ...
null
forward
batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) router_logits, _ = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) routin...
def forward(self, hidden_states: torch.Tensor) ->torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) router_logits, _ = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_wei...
null
__init__
super().__init__() self.config = config assert not config.add_cross_attention assert not config.scale_attn_by_inverse_layer_idx assert not config.reorder_and_upcast_attn self.embed_dim = config.hidden_size self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_e...
def __init__(self, config: GPT2Config, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config assert not config.add_cross_attention assert not config.scale_attn_by_inverse_layer_idx assert not config.reorder_and_upcast_attn self.embed_dim = config.hidden_si...
null
forward
if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metada...
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, residual: Optional[ torch.Tensor]) ->torch.Tensor: if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hid...
null
_verify_args
if self.pipeline_parallel_size > 1: raise NotImplementedError('Pipeline parallelism is not supported yet.')
def _verify_args(self) ->None: if self.pipeline_parallel_size > 1: raise NotImplementedError('Pipeline parallelism is not supported yet.')
null
load_chat_template
if args.chat_template is not None: try: with open(args.chat_template, 'r') as f: chat_template = f.read() except OSError: chat_template = codecs.decode(args.chat_template, 'unicode_escape') tokenizer.chat_template = chat_template logger.info(f'Using supplied chat template:\n{...
def load_chat_template(args, tokenizer): if args.chat_template is not None: try: with open(args.chat_template, 'r') as f: chat_template = f.read() except OSError: chat_template = codecs.decode(args.chat_template, 'unicode_escape') tokenizer.chat_templa...
null
__init__
super().__init__() self.hidden_size = config.hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = config.num_attention_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.multi_query_attention = config.multi_query_attention self.total_num...
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.hidden_size = config.hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = config.num_attention_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self...
null
can_append_slot
num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks() num_seqs = seq_group.num_seqs(status=SequenceStatus.RUNNING) return num_seqs <= num_free_gpu_blocks
def can_append_slot(self, seq_group: SequenceGroup) ->bool: num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks() num_seqs = seq_group.num_seqs(status=SequenceStatus.RUNNING) return num_seqs <= num_free_gpu_blocks
null
vocab_size
"""Returns vocab size""" return self.sp_model.get_piece_size()
@property def vocab_size(self): """Returns vocab size""" return self.sp_model.get_piece_size()
Returns vocab size
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, l...
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_pro...
null
_verify_load_format
load_format = self.load_format.lower() supported_load_format = ['auto', 'pt', 'safetensors', 'npcache', 'dummy'] rocm_not_supported_load_format = [] if load_format not in supported_load_format: raise ValueError( f"Unknown load format: {self.load_format}. Must be one of 'auto', 'pt', 'safetensors', 'npcache'...
def _verify_load_format(self) ->None: load_format = self.load_format.lower() supported_load_format = ['auto', 'pt', 'safetensors', 'npcache', 'dummy'] rocm_not_supported_load_format = [] if load_format not in supported_load_format: raise ValueError( f"Unknown load format: {self.load_...
null
__init__
super().__init__() self.config = config self.vocab_size = config.vocab_size self.wte = VocabParallelEmbedding(config.vocab_size, config.hidden_size) self.h = nn.ModuleList([QWenBlock(config, linear_method) for _ in range( config.num_hidden_layers)]) self.ln_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsi...
def __init__(self, config: QWenConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.vocab_size = config.vocab_size self.wte = VocabParallelEmbedding(config.vocab_size, config.hidden_size) self.h = nn.ModuleList([QWenBlock(config, linear_method) f...
null
test_decode_streaming
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) all_input_ids = tokenizer(truth, add_special_tokens=False)['input_ids'] if skip_special_tokens: all_input_ids = ([tokenizer.bos_token_id] if tokenizer.bos_token_id is not None else []) + all_input_ids + [tokenizer.eos_token_id] decoded_text = _run_incr...
@pytest.mark.parametrize('truth', TRUTH) @pytest.mark.parametrize('tokenizer_id', TOKENIZERS) @pytest.mark.parametrize('skip_special_tokens', (True, False)) def test_decode_streaming(tokenizer_id, truth, skip_special_tokens): tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) all_input_ids = tokenizer(trut...
null
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')] params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'lm_head.weight' i...
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')] params_dict = dict(self.named_parameters(remove_dup...
null
__init__
super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.to...
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, max_position: int=4096 * 32, rope_theta: float=10000, linear_method: Optional[LinearMethodBase]=None, sliding_window: Optional[int]=None ) ->None: super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_...
null
_convert_tokens_to_string_with_added_encoders
sub_texts = [] current_sub_text = [] all_special_tokens = set(tokenizer.all_special_tokens) for token in output_tokens: if skip_special_tokens and token in all_special_tokens: continue if token in tokenizer.get_added_vocab(): if current_sub_text: sub_text = tokenizer.convert_tokens_t...
def _convert_tokens_to_string_with_added_encoders(tokenizer: Union[ PreTrainedTokenizer, PreTrainedTokenizerFast], output_tokens: List[str], skip_special_tokens: bool, spaces_between_special_tokens: bool) ->str: sub_texts = [] current_sub_text = [] all_special_tokens = set(tokenizer.all_special_toke...
null
forward
del input_ raise RuntimeError("LMHead's weights should be used in the sampler.")
def forward(self, input_): del input_ raise RuntimeError("LMHead's weights should be used in the sampler.")
null
execute_method
executor = getattr(self, method) return executor(*args, **kwargs)
def execute_method(self, method, *args, **kwargs): executor = getattr(self, method) return executor(*args, **kwargs)
null
__init__
super().__init__() self.post_layer_norm = config.post_layer_norm self.num_layers = config.num_layers self.layers = nn.ModuleList([GLMBlock(config, linear_method) for i in range (self.num_layers)]) if self.post_layer_norm: layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm self.final_layernorm = laye...
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.post_layer_norm = config.post_layer_norm self.num_layers = config.num_layers self.layers = nn.ModuleList([GLMBlock(config, linear_method) for i in range(self.num_layers)]) if self.post_layer_n...
null
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision, fall_back_to_pt=False): if 'rotary_emb.inv_f...
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')] params_dict = dict(self.named_parameters()) for...
null
forward
layernorm_output = self.input_layernorm(hidden_states) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states attention_output = self.self_attention(position_ids=position_ids, hidden_states=layernorm_output, kv_cache=kv_cache, input_metadata= input_m...
def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: layernorm_output = self.input_layernorm(hidden_states) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual...
null
create_kv_caches
torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) scale = head_size ** -0.5 x = 16 // torch.tensor([], dtype=dtype).element_size() key_cache_shape = num_blocks, num_heads, head_size // x, block_size, x key_caches = [] for _ in range(num_layers): key_cache = torch.empty(size=key_cache_shape, dtype=dtype, d...
def create_kv_caches(num_blocks: int, block_size: int, num_layers: int, num_heads: int, head_size: int, dtype: torch.dtype, seed: int, device: str ) ->Tuple[List[torch.Tensor], List[torch.Tensor]]: torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) scale = head_size ** -0.5 x = 16 // to...
null
get_num_layers
total_num_hidden_layers = self.hf_config.num_hidden_layers return total_num_hidden_layers // parallel_config.pipeline_parallel_size
def get_num_layers(self, parallel_config: 'ParallelConfig') ->int: total_num_hidden_layers = self.hf_config.num_hidden_layers return total_num_hidden_layers // parallel_config.pipeline_parallel_size
null
__repr__
return f'GPTQConfig(weight_bits={self.weight_bits}, group_size={self.group_size}, desc_act={self.desc_act})'
def __repr__(self) ->str: return ( f'GPTQConfig(weight_bits={self.weight_bits}, group_size={self.group_size}, desc_act={self.desc_act})' )
null
get_lock
lock_dir = cache_dir if cache_dir is not None else '/tmp' lock_file_name = model_name_or_path.replace('/', '-') + '.lock' lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name)) return lock
def get_lock(model_name_or_path: str, cache_dir: Optional[str]=None): lock_dir = cache_dir if cache_dir is not None else '/tmp' lock_file_name = model_name_or_path.replace('/', '-') + '.lock' lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name)) return lock
null
get_priority
return now - seq_group.arrival_time
def get_priority(self, now: float, seq_group: SequenceGroup) ->float: return now - seq_group.arrival_time
null
forward
with patch('vllm.model_executor.layers.sampler._prune_hidden_states', lambda x, y: x), patch('vllm.model_executor.layers.sampler._get_logits', lambda *args, **kwargs: self.fake_logits): return super().forward(*args, **kwargs)
def forward(self, *args, **kwargs): with patch('vllm.model_executor.layers.sampler._prune_hidden_states', lambda x, y: x), patch('vllm.model_executor.layers.sampler._get_logits' , lambda *args, **kwargs: self.fake_logits): return super().forward(*args, **kwargs)
null
get_num_unfinished_seq_groups
return len(self.waiting) + len(self.running) + len(self.swapped)
def get_num_unfinished_seq_groups(self) ->int: return len(self.waiting) + len(self.running) + len(self.swapped)
null
stats
"""Get the statistics of the engine.""" return JSONResponse(engine.testing_stats())
@app.get('/stats') def stats() ->Response: """Get the statistics of the engine.""" return JSONResponse(engine.testing_stats())
Get the statistics of the engine.
add_request
"""Add a request to be sent to the engine on the next background loop iteration.""" if request_id in self._request_streams: raise KeyError(f'Request {request_id} already exists.') stream = AsyncStream(request_id) self._new_requests.put_nowait((stream, {'request_id': request_id, ** engine_add_request_kwa...
def add_request(self, request_id: str, **engine_add_request_kwargs ) ->AsyncStream: """Add a request to be sent to the engine on the next background loop iteration.""" if request_id in self._request_streams: raise KeyError(f'Request {request_id} already exists.') stream = AsyncStream(req...
Add a request to be sent to the engine on the next background loop iteration.
__repr__
return f'SamplingMetadata(seq_groups={self.seq_groups}, seq_data={self.seq_data}, prompt_lens={self.prompt_lens}, selected_token_indices={self.selected_token_indices}, categorized_sample_indices={self.categorized_sample_indices}), perform_sampling={self.perform_sampling})'
def __repr__(self) ->str: return ( f'SamplingMetadata(seq_groups={self.seq_groups}, seq_data={self.seq_data}, prompt_lens={self.prompt_lens}, selected_token_indices={self.selected_token_indices}, categorized_sample_indices={self.categorized_sample_indices}), perform_sampling={self.perform_sampling})' ...
null
__init__
self.output_sizes = output_sizes tp_size = get_tensor_model_parallel_world_size() assert all(output_size % tp_size == 0 for output_size in output_sizes) super().__init__(input_size, sum(output_sizes), bias, gather_output, skip_bias_add, params_dtype, linear_method)
def __init__(self, input_size: int, output_sizes: List[int], bias: bool= True, gather_output: bool=False, skip_bias_add: bool=False, params_dtype: Optional[torch.dtype]=None, linear_method: Optional[ LinearMethodBase]=None): self.output_sizes = output_sizes tp_size = get_tensor_model_parallel_world_...
null
__init__
super().__init__() self.config = config self.linear_method = linear_method assert not config.tie_word_embeddings self.transformer = GPTJModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.n_embd, bias=True) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: GPTJConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method assert not config.tie_word_embeddings self.transformer = GPTJModel(config, linear_method) self.lm_head = ParallelLMHead(config.v...
null
get_head_size
return self.hf_config.hidden_size // self.hf_config.num_attention_heads
def get_head_size(self) ->int: return self.hf_config.hidden_size // self.hf_config.num_attention_heads
null
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
verify_with_parallel_config
total_cpu_memory = get_cpu_memory() num_gpus_per_node = parallel_config.tensor_parallel_size cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node msg = ( f'{cpu_memory_usage / _GB:.2f} GiB out of the {total_cpu_memory / _GB:.2f} GiB total CPU memory is allocated for the swap space.' ) if cpu_memory_usag...
def verify_with_parallel_config(self, parallel_config: 'ParallelConfig' ) ->None: total_cpu_memory = get_cpu_memory() num_gpus_per_node = parallel_config.tensor_parallel_size cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node msg = ( f'{cpu_memory_usage / _GB:.2f} GiB out of the {t...
null
_get_and_verify_dtype
config_dtype = getattr(config, 'torch_dtype', None) if config_dtype is None: config_dtype = torch.float32 if isinstance(dtype, str): dtype = dtype.lower() if dtype == 'auto': if config_dtype == torch.float32: torch_dtype = torch.float16 else: torch_dtype = config_dtyp...
def _get_and_verify_dtype(config: PretrainedConfig, dtype: Union[str, torch .dtype]) ->torch.dtype: config_dtype = getattr(config, 'torch_dtype', None) if config_dtype is None: config_dtype = torch.float32 if isinstance(dtype, str): dtype = dtype.lower() if dtype == 'auto': ...
null
test_copy_blocks
random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' assert 2 * num_mappings <= num_blocks src_blocks = random.sample(range(num_blocks), num_mappings) remainig_blocks = list(set(range(num_blocks)) - set(src_blocks)) dst_blocks = random.sample(remainig_blocks, 2 * num_m...
@pytest.mark.parametrize('num_mappings', NUM_MAPPINGS) @pytest.mark.parametrize('num_layers', NUM_LAYERS) @pytest.mark.parametrize('num_heads', NUM_HEADS) @pytest.mark.parametrize('head_size', HEAD_SIZES) @pytest.mark.parametrize('block_size', BLOCK_SIZES) @pytest.mark.parametrize('num_blocks', NUM_BLOCKS) @pytest.mark...
null
forward
input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return (self.weight * hidden_states).to(input_dtype)
def forward(self, hidden_states): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self. variance_epsilon) return (self.weight * hidden_states).to(input_dtype)
null
find
if seq_id not in self.seqs_dict: raise ValueError(f'Sequence {seq_id} not found.') return self.seqs_dict[seq_id]
def find(self, seq_id: int) ->Sequence: if seq_id not in self.seqs_dict: raise ValueError(f'Sequence {seq_id} not found.') return self.seqs_dict[seq_id]
null
test_silu_and_mul
torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' x = torch.randn(num_tokens, 2 * d, dtype=dtype, device=gpu_id) layer = SiluAndMul() out = layer(x) ref_out = layer._forward(x) assert torch.allclose(out, ref_out, atol=1e-05, rtol=1e-05)
@pytest.mark.parametrize('num_tokens', NUM_TOKENS) @pytest.mark.parametrize('d', D) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) @torch.inference_mode() def test_silu_and_mul(num_tokens: int, d: int, dtype: torch.dtype, seed: int, devi...
null
__init__
super().__init__() self.hidden_size = config.hidden_size total_num_heads = config.num_attention_heads self.tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() assert total_num_heads % self.tensor_model_parallel_world_size == 0 self.num_heads = total_num_heads // self.tensor_model_parallel_world_si...
def __init__(self, config: GPTBigCodeConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.hidden_size = config.hidden_size total_num_heads = config.num_attention_heads self.tensor_model_parallel_world_size = ( get_tensor_model_parallel_world_size()) assert to...
null
forward
residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output = self.attn(position_ids=position_ids, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) mlp_output = self.mlp(hidden_states) hidden_states = attn_output + mlp_output + residual return hidden_states
def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output = self.attn(position_ids=position_ids, hidden_states= hidden_states, kv_cache...
null
_sample
categorized_seq_group_ids = {t: [] for t in SamplingType} categorized_sample_indices = sampling_metadata.categorized_sample_indices for i, seq_group in enumerate(sampling_metadata.seq_groups): _, sampling_params = seq_group sampling_type = sampling_params.sampling_type categorized_seq_group_ids[sampling_typ...
def _sample(probs: torch.Tensor, logprobs: torch.Tensor, sampling_metadata: SamplingMetadata) ->List[Tuple[List[int], List[int]]]: categorized_seq_group_ids = {t: [] for t in SamplingType} categorized_sample_indices = sampling_metadata.categorized_sample_indices for i, seq_group in enumerate(sampling_me...
null
tensor_model_parallel_gather
"""Gather the input tensor across model parallel group. NOTE: We assume that the input tensor is on the same device across all the ranks. """ world_size = get_tensor_model_parallel_world_size() if world_size == 1: return input_ assert -input_.dim() <= dim < input_.dim( ), f'Invalid dim ({dim}) for ...
def tensor_model_parallel_gather(input_, dst=0, dim=-1): """Gather the input tensor across model parallel group. NOTE: We assume that the input tensor is on the same device across all the ranks. """ world_size = get_tensor_model_parallel_world_size() if world_size == 1: return input_ ...
Gather the input tensor across model parallel group. NOTE: We assume that the input tensor is on the same device across all the ranks.
put
if self._finished: return self._queue.put_nowait(item)
def put(self, item: RequestOutput) ->None: if self._finished: return self._queue.put_nowait(item)
null
__init__
self.seq_id = seq_id self.prompt = prompt self.block_size = block_size self.data = SequenceData(prompt_token_ids) self.output_logprobs: SampleLogprobs = [] self.output_text = '' self.logical_token_blocks: List[LogicalTokenBlock] = [] self._append_tokens_to_blocks(prompt_token_ids) self.status = SequenceStatus.WAITING s...
def __init__(self, seq_id: int, prompt: str, prompt_token_ids: List[int], block_size: int) ->None: self.seq_id = seq_id self.prompt = prompt self.block_size = block_size self.data = SequenceData(prompt_token_ids) self.output_logprobs: SampleLogprobs = [] self.output_text = '' self.logica...
null
__init__
self.request_id = request_id self._queue = asyncio.Queue() self._finished = False
def __init__(self, request_id: str) ->None: self.request_id = request_id self._queue = asyncio.Queue() self._finished = False
null
_init_distributed_environment
"""Initialize the distributed environment.""" if torch.distributed.is_initialized(): torch_world_size = torch.distributed.get_world_size() if torch_world_size != parallel_config.world_size: raise RuntimeError( f'torch.distributed is already initialized but the torch world size does not match...
def _init_distributed_environment(parallel_config: ParallelConfig, rank: int, distributed_init_method: Optional[str]=None) ->None: """Initialize the distributed environment.""" if torch.distributed.is_initialized(): torch_world_size = torch.distributed.get_world_size() if torch_world_size !=...
Initialize the distributed environment.
get_linear_method
return SqueezeLLMLinearMethod(self)
def get_linear_method(self) ->'SqueezeLLMLinearMethod': return SqueezeLLMLinearMethod(self)
null
init_event
self.new_requests_event = asyncio.Event()
def init_event(self): self.new_requests_event = asyncio.Event()
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata, self.lm_head.bias) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata, self.lm_head.bias) return next_tokens
null
add_request
"""Add a request to the engine's request pool. The request is added to the request pool and will be processed by the scheduler as `engine.step()` is called. The exact scheduling policy is determined by the scheduler. Args: request_id: The unique ID of the request. ...
def add_request(self, request_id: str, prompt: Optional[str], sampling_params: SamplingParams, prompt_token_ids: Optional[List[int]]= None, arrival_time: Optional[float]=None) ->None: """Add a request to the engine's request pool. The request is added to the request pool and will be processed by th...
Add a request to the engine's request pool. The request is added to the request pool and will be processed by the scheduler as `engine.step()` is called. The exact scheduling policy is determined by the scheduler. Args: request_id: The unique ID of the request. prompt: The prompt string. Can be None if prompt...
get_output_token_ids
return self.data.output_token_ids
def get_output_token_ids(self) ->List[int]: return self.data.output_token_ids
null
__init__
super().__init__() self.head_size = head_size self.rotary_dim = rotary_dim self.max_position_embeddings = max_position_embeddings self.base = base self.is_neox_style = is_neox_style cache = self._compute_cos_sin_cache() cache = cache.to(torch.get_default_dtype()) self.register_buffer('cos_sin_cache', cache, persistent=...
def __init__(self, head_size: int, rotary_dim: int, max_position_embeddings: int, base: int, is_neox_style: bool) ->None: super().__init__() self.head_size = head_size self.rotary_dim = rotary_dim self.max_position_embeddings = max_position_embeddings self.base = base self.is_neox_style = is...
null
__init__
self.index = index self.text = text self.token_ids = token_ids self.cumulative_logprob = cumulative_logprob self.logprobs = logprobs self.finish_reason = finish_reason
def __init__(self, index: int, text: str, token_ids: List[int], cumulative_logprob: float, logprobs: Optional[SampleLogprobs], finish_reason: Optional[str]=None) ->None: self.index = index self.text = text self.token_ids = token_ids self.cumulative_logprob = cumulative_logprob self.logprobs ...
null
__init__
super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias= False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'...
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowPar...
null
fork
new_seq = copy.deepcopy(self) new_seq.seq_id = new_seq_id return new_seq
def fork(self, new_seq_id: int) ->'Sequence': new_seq = copy.deepcopy(self) new_seq.seq_id = new_seq_id return new_seq
null
from_config
weight_bits = cls.get_from_keys(config, ['bits']) group_size = cls.get_from_keys(config, ['group_size']) desc_act = cls.get_from_keys(config, ['desc_act']) return cls(weight_bits, group_size, desc_act)
@classmethod def from_config(cls, config: Dict[str, Any]) ->'GPTQConfig': weight_bits = cls.get_from_keys(config, ['bits']) group_size = cls.get_from_keys(config, ['group_size']) desc_act = cls.get_from_keys(config, ['desc_act']) return cls(weight_bits, group_size, desc_act)
null
__init__
self.device = device self.block_number = block_number self.block_size = block_size self.ref_count = 0
def __init__(self, device: Device, block_number: int, block_size: int) ->None: self.device = device self.block_number = block_number self.block_size = block_size self.ref_count = 0
null
get_amdgpu_offload_arch
command = '/opt/rocm/llvm/bin/amdgpu-offload-arch' try: output = subprocess.check_output([command]) return output.decode('utf-8').strip() except subprocess.CalledProcessError as e: error_message = f'Error: {e}' raise RuntimeError(error_message) from e except FileNotFoundError as e: error_message = f...
def get_amdgpu_offload_arch(): command = '/opt/rocm/llvm/bin/amdgpu-offload-arch' try: output = subprocess.check_output([command]) return output.decode('utf-8').strip() except subprocess.CalledProcessError as e: error_message = f'Error: {e}' raise RuntimeError(error_message) ...
null
forward
qkv, _ = self.c_attn(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.c_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.c_attn(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = se...
null
finished
return self.finish_reason is not None
def finished(self) ->bool: return self.finish_reason is not None
null