import gradio as gr from transformers import ( GPT2LMHeadModel, GPT2Tokenizer, AutoModelForCausalLM, AutoTokenizer ) # Load GPT2 gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2") gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # Load Bloom-560M bloom_model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m") bloom_tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") # Inference Function def generate_text(prompt, model_name): if model_name == "🧠 GPT2": inputs = gpt2_tokenizer.encode(prompt, return_tensors="pt") output = gpt2_model.generate(inputs, max_length=100) return gpt2_tokenizer.decode(output[0], skip_special_tokens=True) elif model_name == "🌸 Bloom-560M": inputs = bloom_tokenizer(prompt, return_tensors="pt") output = bloom_model.generate(inputs["input_ids"], max_length=100) return bloom_tokenizer.decode(output[0], skip_special_tokens=True) # Gradio UI with gr.Blocks(css=""" body { background-color: #FFFACD; } h1 { color: brown !important; } """) as demo: gr.Markdown("

LLM for Content Generation

") gr.Markdown("
Generate high-quality text using powerful LLMs
") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Enter a topic or prompt") model_choice = gr.Radio(["🧠 GPT2", "🌸 Bloom-560M"], label="Choose a Model") submit = gr.Button("Generate") with gr.Column(): output = gr.Textbox(label="Generated Text", lines=10) submit.click(fn=generate_text, inputs=[prompt, model_choice], outputs=output) gr.Markdown("
Designed by Mehak Mazhar
") demo.launch()