import gradio as gr print(gradio.__version__) from transformers import AutoModelForCausalLM, AutoTokenizer import torch model_path = "./llama" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True, device_map="auto" ) device = "cuda" if torch.cuda.is_available() else "cpu" model = model.to(device) def generate_response(prompt, max_tokens=50): inputs = tokenizer(prompt, return_tensors="pt").to(device) outputs = model.generate(**inputs, max_new_tokens=max_tokens) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response interface = gr.Interface( fn=generate_response, inputs=[ gr.Textbox(label="Prompt", placeholder="Enter your prompt here..."), gr.Slider(10, 100, step=10, value=50, label="Max Tokens"), ], outputs=gr.Textbox(label="Response"), title="Text Generation with Hugging Face Model", description="Enter a prompt and see how the model responds!", ) if __name__ == "__main__": interface.launch()