vikas83 commited on
Commit
ad255f7
·
verified ·
1 Parent(s): a1f2e62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -20
app.py CHANGED
@@ -1,30 +1,41 @@
1
- import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
- # Load the model and tokenizer
5
  model_name = "gpt2" # Replace with your own trained model if available
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
- # Create a text generation pipeline
10
  nlp_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
 
12
- # Streamlit UI
13
- st.title("🧠 AI Text Generator")
14
- st.markdown("Enter text and get AI-generated responses! Customize the input and see how the model responds.")
 
 
 
 
 
15
 
16
- # Text input
17
- input_text = st.text_area("Enter Text", placeholder="Type something here...", height=120)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- # Button click
20
- if st.button("Generate Response"):
21
- if input_text.strip() == "":
22
- st.warning("Please enter some text.")
23
- else:
24
- try:
25
- with st.spinner("Generating response..."):
26
- result = nlp_pipeline(input_text, max_length=200, do_sample=True)
27
- generated_text = result[0]['generated_text']
28
- st.text_area("Generated Response", value=generated_text, height=180)
29
- except Exception as e:
30
- st.error(f"Error: {str(e)}")
 
1
+ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
+ # Load a proper text-generation model
5
  model_name = "gpt2" # Replace with your own trained model if available
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
+ # Create a text-generation pipeline
10
  nlp_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
 
12
+ # Function to generate response
13
+ def generate_response(text):
14
+ try:
15
+ # Increase max_length for longer output
16
+ result = nlp_pipeline(text, max_length=200, do_sample=True)
17
+ return result[0]['generated_text']
18
+ except Exception as e:
19
+ return f"Error: {str(e)}"
20
 
21
+ # Create Gradio Interface using Blocks for flexible layout
22
+ with gr.Blocks() as iface:
23
+ gr.Markdown("# AI Text Generator")
24
+ gr.Markdown("Enter text and get AI-generated responses! Customize the input and see how the model responds.")
25
+
26
+ with gr.Row():
27
+ # Textbox for input
28
+ input_text = gr.Textbox(label="Enter Text", placeholder="Type something here...", lines=3, max_lines=5)
29
+
30
+ with gr.Row():
31
+ # Output box
32
+ output_text = gr.Textbox(label="Generated Response", lines=6, max_lines=8)
33
+
34
+ # Button to trigger the text generation
35
+ generate_btn = gr.Button("Generate Response")
36
+
37
+ # Define button click action
38
+ generate_btn.click(generate_response, inputs=input_text, outputs=output_text)
39
 
40
+ # Launch Gradio UI (No need to specify theme and layout directly now)
41
+ iface.launch(server_name="0.0.0.0", server_port=7860) i have created it with gradio now i want to convert it into streamlit