L-MChat-ZeroGPU / app.py
Artples's picture
Update app.py
3655f73 verified
import os
from threading import Thread
from typing import Iterator
import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
DESCRIPTION = """\
# L-MChat
This Space demonstrates L-MChat, a pair of chat-optimized language models:
- Fast-Model: `Artples/L-MChat-Small`
- Quality-Model: `Artples/L-MChat-7b`
By default the Quality-Model is used. You can switch to the Fast-Model if you prefer lower latency over maximum quality.
"""
if not torch.cuda.is_available():
DESCRIPTION += "\n\n<p>Running on CPU – this demo is intended for GPU and may be extremely slow.</p>"
model_dict = {
"Fast-Model": "Artples/L-MChat-Small",
"Quality-Model": "Artples/L-MChat-7b",
}
_model_cache: dict[str, AutoModelForCausalLM] = {}
_tokenizer_cache: dict[str, AutoTokenizer] = {}
def get_model_and_tokenizer(model_id: str):
"""Lazy-load and cache model and tokenizer per model id."""
if model_id not in _model_cache:
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.use_default_system_prompt = False
_model_cache[model_id] = model
_tokenizer_cache[model_id] = tokenizer
return _model_cache[model_id], _tokenizer_cache[model_id]
@spaces.GPU(enable_queue=True, duration=90)
def generate(
message: str,
chat_history: list[tuple[str, str]],
system_prompt: str,
model_choice: str,
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2,
) -> Iterator[str]:
model_id = model_dict[model_choice]
model, tokenizer = get_model_and_tokenizer(model_id)
conversation: list[dict[str, str]] = []
if system_prompt:
conversation.append({"role": "system", "content": system_prompt})
for user, assistant in chat_history:
conversation.append({"role": "user", "content": user})
if assistant is not None:
conversation.append({"role": "assistant", "content": assistant})
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(
conversation,
return_tensors="pt",
add_generation_prompt=True,
)
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(
f"Trimmed input from conversation as it was longer than "
f"{MAX_INPUT_TOKEN_LENGTH} tokens."
)
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(
tokenizer,
timeout=10.0,
skip_prompt=True,
skip_special_tokens=True,
)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=min(max_new_tokens, MAX_MAX_NEW_TOKENS),
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repetition_penalty=repetition_penalty,
)
thread = Thread(target=model.generate, kwargs=generate_kwargs)
thread.start()
outputs: list[str] = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
chat_interface = gr.ChatInterface(
fn=generate,
additional_inputs=[
gr.Textbox(label="System prompt", lines=6),
gr.Radio(
["Fast-Model", "Quality-Model"],
label="Model",
value="Quality-Model",
),
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.6,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.9,
),
gr.Slider(
label="Top-k",
minimum=1,
maximum=1000,
step=1,
value=50,
),
gr.Slider(
label="Repetition penalty",
minimum=1.0,
maximum=2.0,
step=0.05,
value=1.2,
),
],
stop_btn=None,
examples=[
["Hello there! How are you doing?"],
["Can you explain briefly to me what is the Python programming language?"],
["Explain the plot of Cinderella in a sentence."],
["How many hours does it take a man to eat a Helicopter?"],
["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
],
)
with gr.Blocks() as demo:
gr.Markdown(DESCRIPTION)
chat_interface.render()
if __name__ == "__main__":
demo.queue(max_size=20).launch()