import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_id = "Datangtang/GGUF_New_1B" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cpu") def chat_fn(message): inputs = tokenizer(message, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=150) return tokenizer.decode(outputs[0], skip_special_tokens=True) demo = gr.Interface(fn=chat_fn, inputs="text", outputs="text", title="My Finetuned LLM Chat") demo.launch()