import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Use a lightweight and public model model_name = "distilgpt2" # You can also use "tiiuae/falcon-rw-1b" or "EleutherAI/gpt-neo-1.3B" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Define text generation function def generate_response(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate( inputs["input_ids"], max_length=100, pad_token_id=tokenizer.eos_token_id, do_sample=True, top_k=50, top_p=0.95, temperature=0.7, ) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Gradio interface with styling def build_interface(): with gr.Blocks(theme=gr.themes.Base(), css=""" body { background-color: #FFFACD; } h1 { color: brown; font-weight: bold; text-align: center; } footer { text-align: center; padding-top: 10px; font-style: italic; color: #555; } """) as demo: gr.Markdown("# AI Text Generation Chatbot") with gr.Row(): with gr.Column(): input_text = gr.Textbox(label="Enter your prompt", placeholder="e.g., Once upon a time...") submit_btn = gr.Button("Generate Text") with gr.Column(): output_text = gr.Textbox(label="Generated Text") submit_btn.click(fn=generate_response, inputs=input_text, outputs=output_text) gr.Markdown("") return demo # Launch app if __name__ == "__main__": demo = build_interface() demo.launch()