amaresh8053 commited on
Commit
75a88f9
·
1 Parent(s): b16a6f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -24
app.py CHANGED
@@ -258,7 +258,7 @@ def beam_generate_v2(src_tensor, beam=5, max_len=50, alpha=0.7):
258
 
259
 
260
  # ------------- wrapper to go from user text → reply -------------
261
- def generate_reply(user_text: str) -> str:
262
  # replicate notebook logic: reverse the input sentence
263
  user_text_rev = reverse(user_text)
264
  tokens = tokenize(user_text_rev)
@@ -269,32 +269,37 @@ def generate_reply(user_text: str) -> str:
269
  return "I'm a chatbot trained on Ubuntu Linux support conversations, so I may not understand this question."
270
  return reply
271
 
 
 
 
 
 
 
 
 
 
272
 
273
  # ------------- Gradio ChatInterface -------------
274
- def respond(message, history):
275
- reply = generate_reply(message)
276
- return reply
277
-
278
-
279
- # Create two UI windows side by side (both use the same model for now)
280
- chat1 = gr.ChatInterface(
281
- fn=respond,
282
- title="Ubuntu Chatbot without attention",
283
- description="A generative chatbot trained on Ubuntu dialogue pairs (seq2seq with attention)."
284
- )
285
-
286
- chat2 = gr.ChatInterface(
287
- fn=respond,
288
- title="Ubuntu Chatbot with attention",
289
- description="A generative chatbot trained on Ubuntu dialogue pairs (seq2seq with attention)."
290
- )
291
-
292
  with gr.Blocks() as demo:
293
- gr.Markdown("# Ubuntu Chatbot (Seq2Seq + GRU) Developed by Group E")
294
- gr.Row([
295
- chat1,
296
- chat2
297
- ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
 
299
  if __name__ == "__main__":
300
  demo.launch()
 
258
 
259
 
260
  # ------------- wrapper to go from user text → reply -------------
261
+ def generate_reply_no_attn(user_text: str) -> str:
262
  # replicate notebook logic: reverse the input sentence
263
  user_text_rev = reverse(user_text)
264
  tokens = tokenize(user_text_rev)
 
269
  return "I'm a chatbot trained on Ubuntu Linux support conversations, so I may not understand this question."
270
  return reply
271
 
272
+ def generate_reply_attn(user_text: str) -> str:
273
+ """
274
+ Inference using the ATTENTION model.
275
+ Replace body with your encoder/decoder calls (beam or greedy).
276
+ """
277
+ # Example placeholder:
278
+ # src = text_to_tensor(user_text)
279
+ # return beam_generate_attn(src)
280
+ return "Reply from ATTENTION model (replace with real function)"
281
 
282
  # ------------- Gradio ChatInterface -------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
  with gr.Blocks() as demo:
284
+ gr.Markdown("# Ubuntu Chatbot Comparison No Attention vs Attention")
285
+
286
+ with gr.Row():
287
+
288
+ # Left column — No Attention model
289
+ with gr.Column():
290
+ gr.Markdown("### No Attention Model")
291
+ inp1 = gr.Textbox(label="Your question (No-Attn)", placeholder="Type here...")
292
+ out1 = gr.Textbox(label="Bot Reply (No-Attn)")
293
+ btn1 = gr.Button("Ask No-Attn Bot")
294
+ btn1.click(fn=generate_reply_no_attn, inputs=inp1, outputs=out1)
295
+
296
+ # Right column — Attention model
297
+ with gr.Column():
298
+ gr.Markdown("### Attention Model")
299
+ inp2 = gr.Textbox(label="Your question (Attn)", placeholder="Type here...")
300
+ out2 = gr.Textbox(label="Bot Reply (Attn)")
301
+ btn2 = gr.Button("Ask Attn Bot")
302
+ btn2.click(fn=generate_reply_attn, inputs=inp2, outputs=out2)
303
 
304
  if __name__ == "__main__":
305
  demo.launch()