| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| import gradio as gr |
|
|
| |
| model_name = "your-username/my-textgen-model" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float32) |
|
|
| def generate_text(prompt, max_length=200): |
| inputs = tokenizer(prompt, return_tensors="pt") |
| outputs = model.generate(**inputs, max_length=max_length) |
| text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| return text |
|
|
| iface = gr.Interface( |
| fn=generate_text, |
| inputs=[ |
| gr.Textbox(label="Prompt", placeholder="Type your text here..."), |
| gr.Slider(50, 500, value=200, label="Max Length") |
| ], |
| outputs="text", |
| title="Text Generation API", |
| description="PyTorch text generation model deployed on Hugging Face" |
| ) |
|
|
| iface.launch() |
|
|