Spaces:
Sleeping
Sleeping
File size: 6,592 Bytes
b874271 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
import os
import pickle as pkl
from pathlib import Path
from threading import Thread
from typing import List, Optional, Tuple, Iterator
import gradio as gr
import numpy as np
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
DESCRIPTION = """\
# Llama-2 7B Chat with Streamable Semantic Uncertainty Probe
This Space demonstrates the Llama-2-7b-chat model with an added semantic uncertainty probe.
The highlighted text shows the model's uncertainty in real-time, with more intense yellow indicating higher uncertainty.
"""
if torch.cuda.is_available():
model_id = "meta-llama/Llama-2-7b-chat-hf"
# TODO load the full model?
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.use_default_system_prompt = False
# load the probe data
# TODO load accuracy and SE probe and compare in different tabs
with open("./model/20240625-131035_demo.pkl", "rb") as f:
probe_data = pkl.load(f)
# take the NQ open one
probe_data = probe_data[-2]
model = probe_data['t_bmodel']
layer_range = probe_data['sep_layer_range']
acc_model = probe_data['t_amodel']
acc_layer_range = probe_data['ap_layer_range']
def generate(
message: str,
chat_history: List[Tuple[str, str]],
system_prompt: str,
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2,
) -> Iterator[str]:
conversation = []
if system_prompt:
conversation.append({"role": "system", "content": system_prompt})
for user, assistant in chat_history:
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = dict(
input_ids=input_ids,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
repetition_penalty=repetition_penalty,
streamer=streamer,
output_hidden_states=True,
return_dict_in_generate=True,
)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
generated_text = ""
highlighted_text = ""
for output in streamer:
print(output)
generated_text += output
yield generated_text
# this is doing it twice... just do autoregressive generation instead
for new_text in streamer:
generated_text += new_text
current_input_ids = tokenizer.encode(generated_text, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(current_input_ids, output_hidden_states=True)
hidden = outputs.hidden_states
# Stack second last token embeddings from all layers
# if len(hidden) == 1: # FIX: runtime error for mistral-7b on bioasq
# sec_last_input = hidden[0]
# elif ((n_generated - 2) >= len(hidden)):
# sec_last_input = hidden[-2]
# else:
# sec_last_input = hidden[n_generated - 2]
last_hidden_state = torch.stack([layer[:, -1, :].cpu() for layer in hidden[-1]]).cpu().numpy()
# print(sec_last_token_embedding.shape)
# last_hidden_state = outputs.hidden_states[-1][:, -1, :].cpu().numpy()
print(last_hidden_state.shape)
# TODO potentially need to only compute uncertainty for the last token in sentence?
# concatenate the hidden states from the specified layers
probe_input = np.concatenate(last_hidden_state[layer_range], axis=1)
print(probe_input.shape)
uncertainty_score = model.predict(probe_input)
print(uncertainty_score)
new_highlighted_text = highlight_text(new_text, uncertainty_score[0])
print(new_highlighted_text)
highlighted_text += new_highlighted_text
yield highlighted_text
def highlight_text(text: str, uncertainty_score: float) -> str:
if uncertainty_score > 0:
html_color = "#%02X%02X%02X" % (
255,
int(255 * (1 - uncertainty_score)),
int(255 * (1 - uncertainty_score)),
)
else:
html_color = "#%02X%02X%02X" % (
int(255 * (1 + uncertainty_score)),
255,
int(255 * (1 + uncertainty_score)),
)
return '<span style="background-color: {}; color: black">{}</span>'.format(
html_color, text
)
chat_interface = gr.ChatInterface(
fn=generate,
additional_inputs=[
gr.Textbox(label="System prompt", lines=6),
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.6,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.9,
),
gr.Slider(
label="Top-k",
minimum=1,
maximum=1000,
step=1,
value=50,
),
gr.Slider(
label="Repetition penalty",
minimum=1.0,
maximum=2.0,
step=0.05,
value=1.2,
),
],
stop_btn=None,
examples=[
["What is the capital of France?"],
["Explain the theory of relativity in simple terms."],
["Write a short poem about artificial intelligence."]
],
title="Llama-2 7B Chat with Streamable Semantic Uncertainty Probe",
description=DESCRIPTION,
)
if __name__ == "__main__":
chat_interface.launch()
|