Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,071 Bytes
1556304 cd76efc 1556304 cd76efc 1556304 5bd9cae 5e419a8 d77a5d3 5e419a8 d77a5d3 5e419a8 d77a5d3 1556304 d77a5d3 1556304 cd76efc 5e419a8 cd76efc b241b47 d77a5d3 5e419a8 1556304 e05cd4e b241b47 1556304 cd76efc 5e419a8 1556304 cd76efc d77a5d3 e05cd4e d77a5d3 6f346c7 5e419a8 6f346c7 5e419a8 b241b47 5e419a8 cd76efc 5e419a8 b241b47 1556304 5e419a8 cd76efc 5e419a8 cd76efc 5e419a8 cd76efc 5e419a8 6f346c7 b241b47 5e419a8 cd76efc e05cd4e 5e419a8 cd76efc 1556304 cd76efc 6f346c7 5e419a8 cd76efc 1556304 cd76efc 6f346c7 cd76efc 1556304 d77a5d3 b241b47 1556304 6f346c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
import os
from threading import Thread
from typing import Iterator
import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
DESCRIPTION = """\
# L-MChat
This Space demonstrates L-MChat, a pair of chat-optimized language models:
- Fast-Model: `Artples/L-MChat-Small`
- Quality-Model: `Artples/L-MChat-7b`
By default the Quality-Model is used. You can switch to the Fast-Model if you prefer lower latency over maximum quality.
"""
if not torch.cuda.is_available():
DESCRIPTION += "\n\n<p>Running on CPU – this demo is intended for GPU and may be extremely slow.</p>"
model_dict = {
"Fast-Model": "Artples/L-MChat-Small",
"Quality-Model": "Artples/L-MChat-7b",
}
_model_cache: dict[str, AutoModelForCausalLM] = {}
_tokenizer_cache: dict[str, AutoTokenizer] = {}
def get_model_and_tokenizer(model_id: str):
"""Lazy-load and cache model and tokenizer per model id."""
if model_id not in _model_cache:
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.use_default_system_prompt = False
_model_cache[model_id] = model
_tokenizer_cache[model_id] = tokenizer
return _model_cache[model_id], _tokenizer_cache[model_id]
@spaces.GPU(enable_queue=True, duration=90)
def generate(
message: str,
chat_history: list[tuple[str, str]],
system_prompt: str,
model_choice: str,
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2,
) -> Iterator[str]:
model_id = model_dict[model_choice]
model, tokenizer = get_model_and_tokenizer(model_id)
conversation: list[dict[str, str]] = []
if system_prompt:
conversation.append({"role": "system", "content": system_prompt})
for user, assistant in chat_history:
conversation.append({"role": "user", "content": user})
if assistant is not None:
conversation.append({"role": "assistant", "content": assistant})
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(
conversation,
return_tensors="pt",
add_generation_prompt=True,
)
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(
f"Trimmed input from conversation as it was longer than "
f"{MAX_INPUT_TOKEN_LENGTH} tokens."
)
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(
tokenizer,
timeout=10.0,
skip_prompt=True,
skip_special_tokens=True,
)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=min(max_new_tokens, MAX_MAX_NEW_TOKENS),
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repetition_penalty=repetition_penalty,
)
thread = Thread(target=model.generate, kwargs=generate_kwargs)
thread.start()
outputs: list[str] = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
chat_interface = gr.ChatInterface(
fn=generate,
additional_inputs=[
gr.Textbox(label="System prompt", lines=6),
gr.Radio(
["Fast-Model", "Quality-Model"],
label="Model",
value="Quality-Model",
),
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.6,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.9,
),
gr.Slider(
label="Top-k",
minimum=1,
maximum=1000,
step=1,
value=50,
),
gr.Slider(
label="Repetition penalty",
minimum=1.0,
maximum=2.0,
step=0.05,
value=1.2,
),
],
stop_btn=None,
examples=[
["Hello there! How are you doing?"],
["Can you explain briefly to me what is the Python programming language?"],
["Explain the plot of Cinderella in a sentence."],
["How many hours does it take a man to eat a Helicopter?"],
["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
],
)
with gr.Blocks() as demo:
gr.Markdown(DESCRIPTION)
chat_interface.render()
if __name__ == "__main__":
demo.queue(max_size=20).launch()
|