File size: 1,094 Bytes
9d47dff
ae4dc36
9d47dff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import gradio as gr
print(gradio.__version__)
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

model_path = "./llama"  
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(
    model_path,
    trust_remote_code=True,
    device_map="auto"
)

device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)

def generate_response(prompt, max_tokens=50):
    inputs = tokenizer(prompt, return_tensors="pt").to(device)
    outputs = model.generate(**inputs, max_new_tokens=max_tokens)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

interface = gr.Interface(
    fn=generate_response, 
    inputs=[
        gr.Textbox(label="Prompt", placeholder="Enter your prompt here..."),
        gr.Slider(10, 100, step=10, value=50, label="Max Tokens"),
    ],
    outputs=gr.Textbox(label="Response"),
    title="Text Generation with Hugging Face Model",
    description="Enter a prompt and see how the model responds!",
)

if __name__ == "__main__":
    interface.launch()