File size: 1,597 Bytes
ad255f7
3034141
36a0aa5
ad255f7
1c8564c
b74d5a3
1c8564c
36a0aa5
ad255f7
1c8564c
36a0aa5
ad255f7
 
 
 
 
 
 
 
36a0aa5
ad255f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f43df55
ad255f7
7e79349
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Load a proper text-generation model
model_name = "gpt2"  # Replace with your own trained model if available
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Create a text-generation pipeline
nlp_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Function to generate response
def generate_response(text):
    try:
        # Increase max_length for longer output
        result = nlp_pipeline(text, max_length=200, do_sample=True)
        return result[0]['generated_text']
    except Exception as e:
        return f"Error: {str(e)}"

# Create Gradio Interface using Blocks for flexible layout
with gr.Blocks() as iface:
    gr.Markdown("# AI Text Generator")
    gr.Markdown("Enter text and get AI-generated responses! Customize the input and see how the model responds.")
    
    with gr.Row():
        # Textbox for input
        input_text = gr.Textbox(label="Enter Text", placeholder="Type something here...", lines=3, max_lines=5)
        
    with gr.Row():
        # Output box
        output_text = gr.Textbox(label="Generated Response", lines=6, max_lines=8)
    
    # Button to trigger the text generation
    generate_btn = gr.Button("Generate Response")
    
    # Define button click action
    generate_btn.click(generate_response, inputs=input_text, outputs=output_text)

# Launch Gradio UI (No need to specify theme and layout directly now)
iface.launch(server_name="0.0.0.0", server_port=7860)