vikas83 commited on
Commit
1c8564c
·
verified ·
1 Parent(s): 2985d99

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -24
app.py CHANGED
@@ -1,31 +1,30 @@
1
- import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
- # Load model and tokenizer
5
- model_name = "gpt2" # You can change this to your fine-tuned model
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
8
 
9
- # Create text-generation pipeline
10
- text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
 
12
- # Streamlit UI
13
- st.set_page_config(page_title="AI Text Generator", layout="centered")
14
- st.title("🧠 AI Text Generator")
15
- st.write("Enter a prompt and get AI-generated text!")
 
 
 
16
 
17
- # Input box
18
- user_input = st.text_area("Enter your text prompt here:", height=150)
 
 
 
 
 
 
19
 
20
- # Generate button
21
- if st.button("Generate"):
22
- if user_input.strip() != "":
23
- with st.spinner("Generating..."):
24
- try:
25
- output = text_generator(user_input, max_length=100, do_sample=True)
26
- st.success("Generated Text:")
27
- st.write(output[0]["generated_text"])
28
- except Exception as e:
29
- st.error(f"Error: {str(e)}")
30
- else:
31
- st.warning("Please enter some text.")
 
1
+ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
+ # Load a proper text-generation model
5
+ model_name = "gpt2" # Replace with your own trained model if available
 
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
+ # Create a text-generation pipeline
10
+ nlp_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
 
12
+ # Function to generate response
13
+ def generate_response(text):
14
+ try:
15
+ result = nlp_pipeline(text, max_length=50, do_sample=True)
16
+ return result[0]['generated_text']
17
+ except Exception as e:
18
+ return f"Error: {str(e)}"
19
 
20
+ # Gradio UI
21
+ iface = gr.Interface(
22
+ fn=generate_response,
23
+ inputs="text",
24
+ outputs="text",
25
+ title="AI Text Generator",
26
+ description="Enter text and get AI-generated responses!"
27
+ )
28
 
29
+ # Launch Gradio (Fix: Remove share=True, add enable_queue=True)
30
+ iface.launch(server_name="0.0.0.0", server_port=7860)