text stringlengths 0 445 |
|---|
Args: |
model: HuggingFace model with automatic device mapping |
input_ids: Input token ids |
past_key_values: KV Cache for knowledge |
max_new_tokens: Maximum new tokens to generate |
""" |
embed_device = model.model.embed_tokens.weight.device |
origin_ids = input_ids |
input_ids = input_ids.to(embed_device) |
output_ids = input_ids.clone() |
next_token = input_ids |
with torch.no_grad(): |
for _ in range(max_new_tokens): |
outputs = model( |
input_ids=next_token, |
past_key_values=past_key_values, |
use_cache=True |
) |
next_token_logits = outputs.logits[:, -1, :] |
next_token = next_token_logits.argmax(dim=-1).unsqueeze(-1) |
next_token = next_token.to(embed_device) |
past_key_values = outputs.past_key_values |
output_ids = torch.cat([output_ids, next_token], dim=1) |
if next_token.item() in model.config.eos_token_id: |
break |
return output_ids[:, origin_ids.shape[-1]:] |
"""KV Cache test""" |
# Allowlist the DynamicCache class |
torch.serialization.add_safe_globals([DynamicCache]) |
torch.serialization.add_safe_globals([set]) |
def preprocess_knowledge( |
model, |
tokenizer, |
prompt: str, |
) -> DynamicCache: |
""" |
Prepare knowledge kv cache for CAG. |
Args: |
model: HuggingFace model with automatic device mapping |
tokenizer: HuggingFace tokenizer |
prompt: The knowledge to preprocess, which is basically a prompt |
Returns: |
DynamicCache: KV Cache |
""" |
embed_device = model.model.embed_tokens.weight.device |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(embed_device) |
past_key_values = DynamicCache() |
with torch.no_grad(): |
outputs = model( |
input_ids=input_ids, |
past_key_values=past_key_values, |
use_cache=True, |
output_attentions=False, |
output_hidden_states=False |
) |
return outputs.past_key_values |
def write_kv_cache(kv: DynamicCache, path: str): |
""" |
Write the KV Cache to a file. |
""" |
torch.save(kv, path) |
def clean_up(kv: DynamicCache, origin_len: int): |
""" |
Truncate the KV Cache to the original length. |
""" |
for i in range(len(kv.key_cache)): |
kv.key_cache[i] = kv.key_cache[i][:, :, :origin_len, :] |
kv.value_cache[i] = kv.value_cache[i][:, :, :origin_len, :] |
def read_kv_cache(path: str) -> DynamicCache: |
""" |
Read the KV Cache from a file. |
""" |
kv = torch.load(path, weights_only=True) |
return kv |
"""Sentence-BERT for evaluate semantic similarity""" |
bert_model = SentenceTransformer('all-MiniLM-L6-v2') # Use a lightweight sentence-transformer |
def get_bert_similarity(response, ground_truth): |
# Encode the query and text |
query_embedding = bert_model.encode(response, convert_to_tensor=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.