Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
| # Load a proper text-generation model | |
| model_name = "gpt2" # Replace with your own trained model if available | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # Create a text-generation pipeline | |
| nlp_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| # Function to generate response | |
| def generate_response(text): | |
| try: | |
| # Increase max_length for longer output | |
| result = nlp_pipeline(text, max_length=200, do_sample=True) | |
| return result[0]['generated_text'] | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # Create Gradio Interface using Blocks for flexible layout | |
| with gr.Blocks() as iface: | |
| gr.Markdown("# AI Text Generator") | |
| gr.Markdown("Enter text and get AI-generated responses! Customize the input and see how the model responds.") | |
| with gr.Row(): | |
| # Textbox for input | |
| input_text = gr.Textbox(label="Enter Text", placeholder="Type something here...", lines=3, max_lines=5) | |
| with gr.Row(): | |
| # Output box | |
| output_text = gr.Textbox(label="Generated Response", lines=6, max_lines=8) | |
| # Button to trigger the text generation | |
| generate_btn = gr.Button("Generate Response") | |
| # Define button click action | |
| generate_btn.click(generate_response, inputs=input_text, outputs=output_text) | |
| # Launch Gradio UI (No need to specify theme and layout directly now) | |
| iface.launch(server_name="0.0.0.0", server_port=7860) |