| import gradio as gr |
| import torch |
| from transformers import pipeline |
|
|
| |
| generator = pipeline( |
| "text-generation", |
| model="openai-community/gpt2", |
| device=0 if torch.cuda.is_available() else -1 |
| ) |
|
|
| def generate_story(topic): |
| prompt = f"Write a creative short tweet about: {topic}\n\nTweet:\n" |
|
|
| result = generator( |
| prompt, |
| max_new_tokens=200, |
| temperature=0.9, |
| top_p=0.95, |
| do_sample=True, |
| num_return_sequences=1 |
| )[0]["generated_text"] |
|
|
| story = result.replace(prompt, "").strip() |
| return story |
|
|
|
|
| demo = gr.Interface( |
| fn=generate_story, |
| inputs=gr.Textbox( |
| lines=2, |
| placeholder="Enter a topic (e.g., funny joke , AI takeover)..." |
| ), |
| outputs=gr.Textbox(lines=10), |
| title="AI tweet Generator (GPT-2)", |
| description="Generates short creative tweet using GPT-2" |
| ) |
|
|
| demo.launch() |