Mehak-Mazhar commited on
Commit
193ebcd
·
verified ·
1 Parent(s): ac982af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -44
app.py CHANGED
@@ -2,52 +2,45 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- # Load a lightweight model
6
- model_name = "mrm8488/GPT-2-finetuned-on-conversational-data" # ~500MB
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
- model.to(device)
12
-
13
- # Response generation logic
14
- def generate_response(message, chat_history):
15
- try:
16
- chat_history = chat_history or []
17
- full_prompt = ""
18
- for turn in chat_history:
19
- full_prompt += f"User: {turn['content']}\n"
20
- full_prompt += f"AI: {turn.get('response', '')}\n"
21
- full_prompt += f"User: {message}\nAI:"
22
-
23
- inputs = tokenizer.encode(full_prompt, return_tensors="pt").to(device)
24
- outputs = model.generate(inputs, max_new_tokens=100, pad_token_id=tokenizer.eos_token_id)
25
- decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
-
27
- # Extract only the latest reply after "AI:"
28
- response = decoded.split("AI:")[-1].strip()
29
-
30
- # Add to chat history
31
- chat_history.append({"role": "user", "content": message})
32
- chat_history.append({"role": "assistant", "content": response})
33
- return chat_history
34
- except Exception as e:
35
- return chat_history + [{"role": "assistant", "content": f"⚠️ Error: {str(e)}"}]
36
-
37
- # Gradio interface
38
- with gr.Blocks(css="body { background-color: #FFF9C4; }") as demo:
39
- gr.Markdown("<h1 style='color:brown; font-weight:bold;'>🧠 AI Chatbot</h1>")
40
- chatbot = gr.Chatbot(label="Talk to AI", type="messages")
41
- msg = gr.Textbox(label="Ask something...", placeholder="Type your message here...")
42
-
43
- clear = gr.Button("Clear Chat")
44
- footer = gr.Markdown("<p style='text-align: right; color: brown;'>Designed by Mehak Mazhar</p>")
45
-
46
- state = gr.State([])
47
-
48
- msg.submit(generate_response, [msg, state], [chatbot, state])
49
- clear.click(lambda: ([], []), None, [chatbot, state])
50
-
51
- # Run
52
  if __name__ == "__main__":
 
53
  demo.launch()
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ # Use a lightweight and public model
6
+ model_name = "distilgpt2" # You can also use "tiiuae/falcon-rw-1b" or "EleutherAI/gpt-neo-1.3B"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
+ # Define text generation function
11
+ def generate_response(prompt):
12
+ inputs = tokenizer(prompt, return_tensors="pt")
13
+ outputs = model.generate(
14
+ inputs["input_ids"],
15
+ max_length=100,
16
+ pad_token_id=tokenizer.eos_token_id,
17
+ do_sample=True,
18
+ top_k=50,
19
+ top_p=0.95,
20
+ temperature=0.7,
21
+ )
22
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
23
+
24
+ # Gradio interface with styling
25
+ def build_interface():
26
+ with gr.Blocks(theme=gr.themes.Base(), css="""
27
+ body { background-color: #FFFACD; }
28
+ h1 { color: brown; font-weight: bold; text-align: center; }
29
+ footer { text-align: center; padding-top: 10px; font-style: italic; color: #555; }
30
+ """) as demo:
31
+ gr.Markdown("# AI Text Generation Chatbot")
32
+ with gr.Row():
33
+ with gr.Column():
34
+ input_text = gr.Textbox(label="Enter your prompt", placeholder="e.g., Once upon a time...")
35
+ submit_btn = gr.Button("Generate Text")
36
+ with gr.Column():
37
+ output_text = gr.Textbox(label="Generated Text")
38
+
39
+ submit_btn.click(fn=generate_response, inputs=input_text, outputs=output_text)
40
+ gr.Markdown("<footer>Designed by Mehak Mazhar</footer>")
41
+ return demo
42
+
43
+ # Launch app
 
 
 
 
 
 
 
 
44
  if __name__ == "__main__":
45
+ demo = build_interface()
46
  demo.launch()