hamza2923 commited on
Commit
eeae524
Β·
verified Β·
1 Parent(s): da466c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -30
app.py CHANGED
@@ -3,13 +3,14 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
  import re
5
 
6
- # Load model (small footprint for demo)
7
  model_name = "microsoft/DialoGPT-small"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  tokenizer.pad_token = tokenizer.eos_token
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
11
 
12
- # System prompt for IT services context
13
  SYSTEM_PROMPT = """You are MediaDecode's AI assistant, a professional IT services and software development company.
14
  Provide concise, technical yet friendly answers about:
15
  - Web/Mobile Development (Java, PHP, .NET, C++)
@@ -19,7 +20,7 @@ Provide concise, technical yet friendly answers about:
19
  - Contact: +1 (647) 928-8768 | info@mediadecode.com
20
  If unsure, direct users to contact sales/support."""
21
 
22
- # Predefined FAQ responses (regex patterns)
23
  FAQ_RESPONSES = {
24
  r"(?i)(contact|phone|email|address)": "πŸ“ž **Contact Us:**\n- Canada: +1 (647) 928-8768\n- Pakistan: +92 307 5090706\nβœ‰οΈ **Email:** info@mediadecode.com\nπŸ“ **Addresses:** [See our website](https://mediadecode.com)",
25
  r"(?i)(service|offer|provide)": "πŸ›  **Our Services:**\n1. Custom Software Development\n2. Web/Mobile Apps (Java/PHP/.NET)\n3. UI/UX Design\n4. Staff Augmentation\n5. AI/ML Solutions\n\nπŸ’‘ See all: [Services Page](https://mediadecode.com/services)",
@@ -28,61 +29,64 @@ FAQ_RESPONSES = {
28
  r"(?i)(technology|tech stack|tools)": "βš™οΈ **Technologies:**\n- Web: Java, PHP, .NET, C++\n- Mobile: iOS, Android\n- AI/ML, IoT, Wearables\n\nπŸ”— Full list: [Tech Stack](https://mediadecode.com/technology)"
29
  }
30
 
31
- # Fallback for unmatched queries
32
  DEFAULT_RESPONSE = """πŸ€– I couldn't find a specific answer. For detailed inquiries:
33
  - Call: +1 (647) 928-8768
34
  - Email: info@mediadecode.com
35
  - Visit: [Support Page](https://mediadecode.com/contact)"""
36
 
 
37
  def respond(message, chat_history):
38
- # Check FAQ matches first
39
  for pattern, response in FAQ_RESPONSES.items():
40
  if re.search(pattern, message):
41
- return f"{response}\n\nHow else can I help?"
42
-
43
- # AI-generated response for complex queries
44
  prompt = f"{SYSTEM_PROMPT}\nUser: {message}\nAI:"
45
  inputs = tokenizer(prompt, return_tensors="pt")
46
- outputs = model.generate(
47
- inputs.input_ids,
48
- max_length=200,
49
- pad_token_id=tokenizer.eos_token_id,
50
- temperature=0.7
51
- )
 
52
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
53
-
54
- # Post-process AI response
55
  response = response.split("AI:")[-1].strip()
56
- if not response or len(response) < 5:
57
- response = DEFAULT_RESPONSE
58
- elif any(phrase in response.lower() for phrase in ["i don't know", "not sure"]):
59
- response = DEFAULT_RESPONSE
60
-
61
  return response
62
 
63
- # Gradio Interface
64
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
65
  gr.Markdown("""
66
  # πŸš€ MediaDecode Support Bot
67
  **Ask about:** Services | Pricing | Technologies | Contact
68
  *Powered by AI + Company Knowledge Base*
69
  """)
70
-
71
  chatbot = gr.Chatbot(height=400)
72
  msg = gr.Textbox(label="Your Query", placeholder="e.g., What's your pricing for Java development?")
73
  clear = gr.Button("Clear Chat")
74
-
75
  def user(user_message, history):
76
  return "", history + [[user_message, None]]
77
-
78
  def bot(history):
79
- bot_message = respond(history[-1][0], history[:-1])
80
- history[-1][1] = bot_message
 
 
81
  return history
82
-
83
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
84
  bot, chatbot, chatbot
85
  )
86
- clear.click(lambda: None, None, chatbot, queue=False)
87
 
88
- demo.launch()
 
 
 
3
  import torch
4
  import re
5
 
6
+ # Load model and tokenizer
7
  model_name = "microsoft/DialoGPT-small"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  tokenizer.pad_token = tokenizer.eos_token
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
+ model.config.pad_token_id = tokenizer.eos_token_id # Ensure no pad token issues
12
 
13
+ # System prompt for company context
14
  SYSTEM_PROMPT = """You are MediaDecode's AI assistant, a professional IT services and software development company.
15
  Provide concise, technical yet friendly answers about:
16
  - Web/Mobile Development (Java, PHP, .NET, C++)
 
20
  - Contact: +1 (647) 928-8768 | info@mediadecode.com
21
  If unsure, direct users to contact sales/support."""
22
 
23
+ # FAQ patterns
24
  FAQ_RESPONSES = {
25
  r"(?i)(contact|phone|email|address)": "πŸ“ž **Contact Us:**\n- Canada: +1 (647) 928-8768\n- Pakistan: +92 307 5090706\nβœ‰οΈ **Email:** info@mediadecode.com\nπŸ“ **Addresses:** [See our website](https://mediadecode.com)",
26
  r"(?i)(service|offer|provide)": "πŸ›  **Our Services:**\n1. Custom Software Development\n2. Web/Mobile Apps (Java/PHP/.NET)\n3. UI/UX Design\n4. Staff Augmentation\n5. AI/ML Solutions\n\nπŸ’‘ See all: [Services Page](https://mediadecode.com/services)",
 
29
  r"(?i)(technology|tech stack|tools)": "βš™οΈ **Technologies:**\n- Web: Java, PHP, .NET, C++\n- Mobile: iOS, Android\n- AI/ML, IoT, Wearables\n\nπŸ”— Full list: [Tech Stack](https://mediadecode.com/technology)"
30
  }
31
 
32
+ # Fallback message
33
  DEFAULT_RESPONSE = """πŸ€– I couldn't find a specific answer. For detailed inquiries:
34
  - Call: +1 (647) 928-8768
35
  - Email: info@mediadecode.com
36
  - Visit: [Support Page](https://mediadecode.com/contact)"""
37
 
38
+ # Response generator
39
  def respond(message, chat_history):
40
+ # Check for predefined FAQs
41
  for pattern, response in FAQ_RESPONSES.items():
42
  if re.search(pattern, message):
43
+ return f"{response}\n\nHow else can I help?"
44
+
45
+ # Generate AI response
46
  prompt = f"{SYSTEM_PROMPT}\nUser: {message}\nAI:"
47
  inputs = tokenizer(prompt, return_tensors="pt")
48
+ with torch.no_grad():
49
+ outputs = model.generate(
50
+ inputs.input_ids,
51
+ max_length=200,
52
+ pad_token_id=tokenizer.eos_token_id,
53
+ temperature=0.7
54
+ )
55
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
56
  response = response.split("AI:")[-1].strip()
57
+
58
+ # Check for fallback conditions
59
+ if not response or len(response) < 5 or "i don't know" in response.lower():
60
+ return DEFAULT_RESPONSE
61
+
62
  return response
63
 
64
+ # Gradio interface
65
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
66
  gr.Markdown("""
67
  # πŸš€ MediaDecode Support Bot
68
  **Ask about:** Services | Pricing | Technologies | Contact
69
  *Powered by AI + Company Knowledge Base*
70
  """)
71
+
72
  chatbot = gr.Chatbot(height=400)
73
  msg = gr.Textbox(label="Your Query", placeholder="e.g., What's your pricing for Java development?")
74
  clear = gr.Button("Clear Chat")
75
+
76
  def user(user_message, history):
77
  return "", history + [[user_message, None]]
78
+
79
  def bot(history):
80
+ if not history or not history[-1][0]:
81
+ return history
82
+ bot_reply = respond(history[-1][0], history[:-1])
83
+ history[-1][1] = bot_reply
84
  return history
85
+
86
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
87
  bot, chatbot, chatbot
88
  )
 
89
 
90
+ clear.click(lambda: [], None, chatbot, queue=False)
91
+
92
+ demo.launch()