| import torch |
| import gradio as gr |
| from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
| MODEL_NAME = "gpt2" |
|
|
| |
| tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME) |
| model = GPT2LMHeadModel.from_pretrained(MODEL_NAME) |
| model.eval() |
|
|
| def generate_text(prompt, max_length): |
| inputs = tokenizer(prompt, return_tensors="pt") |
|
|
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, |
| max_length=max_length, |
| do_sample=True, |
| temperature=0.7, |
| top_p=0.95, |
| top_k=50 |
| ) |
|
|
| return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
| demo = gr.Interface( |
| fn=generate_text, |
| inputs=[ |
| gr.Textbox( |
| label="Prompt", |
| placeholder="Enter your prompt..." |
| ), |
| gr.Slider( |
| minimum=50, |
| maximum=250, |
| value=100, |
| step=10, |
| label="Max tokens" |
| ), |
| ], |
| outputs=gr.Textbox(label="Generated text"), |
| title="GPT-2 Text Generator", |
| description="GPT-2 deployed on Hugging Face Spaces using Gradio", |
| ) |
|
|
| if __name__ == "__main__": |
| demo.launch() |
|
|