CarAssistanceQA / app.py
Nihal2000's picture
gemma3 is not compatible for spaces
b4f13ec
raw
history blame
1.42 kB
import os
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
model_id = "Nihal2000/gemma3-merged"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
gen = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
device=0 if "CUDA_VISIBLE_DEVICES" in os.environ else -1
)
def respond(message, history, system_message, max_tokens, temperature, top_p):
prompt = system_message + "\n" + "\n".join(
[f"User: {u}\nAssistant: {a}" for u, a in history]
) + f"\nUser: {message}\nAssistant:"
out = gen(
prompt,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
do_sample=False,
)
# Return current assistant final response
response = out[0]["generated_text"][len(prompt):]
return response
chatbot = gr.ChatInterface(
respond,
type="messages",
additional_inputs=[
gr.Textbox(value="You are an automotive assistant.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
]
)
with gr.Blocks() as demo:
chatbot.render()
if __name__ == "__main__":
demo.launch()