hamza2923 commited on
Commit
da466c6
Β·
verified Β·
1 Parent(s): 6fe40f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -59
app.py CHANGED
@@ -1,75 +1,88 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
 
4
 
5
- # Load model and tokenizer
6
  model_name = "microsoft/DialoGPT-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  tokenizer.pad_token = tokenizer.eos_token
9
  model = AutoModelForCausalLM.from_pretrained(model_name)
10
 
11
- # Move model to GPU if available
12
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
- model = model.to(device)
 
 
 
 
 
 
14
 
15
- def respond(message, chat_history, chat_history_ids):
16
- if not message.strip():
17
- return "", chat_history or [], chat_history_ids, "Please enter a message."
 
 
 
 
 
18
 
19
- if chat_history is None:
20
- chat_history = []
 
 
 
21
 
22
- new_input_ids = tokenizer.encode(message + tokenizer.eos_token, return_tensors="pt").to(device)
 
 
 
 
23
 
24
- input_ids = torch.cat([chat_history_ids, new_input_ids], dim=-1) if chat_history_ids is not None else new_input_ids
 
 
 
 
 
 
 
 
 
25
 
26
- try:
27
- chat_history_ids = model.generate(
28
- input_ids,
29
- max_length=200,
30
- pad_token_id=tokenizer.eos_token_id,
31
- no_repeat_ngram_size=3,
32
- do_sample=True,
33
- top_k=50,
34
- top_p=0.95,
35
- temperature=0.8
36
- )
37
-
38
- response = tokenizer.decode(
39
- chat_history_ids[:, input_ids.shape[-1]:][0],
40
- skip_special_tokens=True
41
- )
42
-
43
- chat_history.append((message, response))
44
-
45
- if len(chat_history) > 10:
46
- chat_history = chat_history[-10:]
47
- history_text = "".join([msg + resp + tokenizer.eos_token for msg, resp in chat_history])
48
- chat_history_ids = tokenizer.encode(history_text, return_tensors="pt").to(device)
49
-
50
- return "", chat_history, chat_history_ids, None
51
- except Exception as e:
52
- return "", chat_history, chat_history_ids, f"Error: {str(e)}"
53
-
54
- def clear_history():
55
- return [], None, None
56
-
57
- with gr.Blocks() as demo:
58
- state = gr.State()
59
- gr.Markdown("## DialoGPT Chatbot")
60
- chatbot = gr.Chatbot()
61
- msg = gr.Textbox(label="Your Message", placeholder="Type your message here...")
62
- clear = gr.Button("Clear History")
63
- error = gr.Textbox(label="Error", interactive=False, visible=False)
64
 
65
- msg.submit(
66
- respond,
67
- inputs=[msg, chatbot, state],
68
- outputs=[msg, chatbot, state, error]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  )
70
- clear.click(
71
- fn=clear_history,
72
- inputs=None,
73
- outputs=[chatbot, state, error],
74
- queue=False
75
- )
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
+ import re
5
 
6
+ # Load model (small footprint for demo)
7
  model_name = "microsoft/DialoGPT-small"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  tokenizer.pad_token = tokenizer.eos_token
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
+ # System prompt for IT services context
13
+ SYSTEM_PROMPT = """You are MediaDecode's AI assistant, a professional IT services and software development company.
14
+ Provide concise, technical yet friendly answers about:
15
+ - Web/Mobile Development (Java, PHP, .NET, C++)
16
+ - Product Design & UI/UX
17
+ - Staff Augmentation
18
+ - Pricing Plans ($129.99-$189.99/month)
19
+ - Contact: +1 (647) 928-8768 | info@mediadecode.com
20
+ If unsure, direct users to contact sales/support."""
21
 
22
+ # Predefined FAQ responses (regex patterns)
23
+ FAQ_RESPONSES = {
24
+ r"(?i)(contact|phone|email|address)": "πŸ“ž **Contact Us:**\n- Canada: +1 (647) 928-8768\n- Pakistan: +92 307 5090706\nβœ‰οΈ **Email:** info@mediadecode.com\nπŸ“ **Addresses:** [See our website](https://mediadecode.com)",
25
+ r"(?i)(service|offer|provide)": "πŸ›  **Our Services:**\n1. Custom Software Development\n2. Web/Mobile Apps (Java/PHP/.NET)\n3. UI/UX Design\n4. Staff Augmentation\n5. AI/ML Solutions\n\nπŸ’‘ See all: [Services Page](https://mediadecode.com/services)",
26
+ r"(?i)(price|plan|cost)": "πŸ’° **Pricing Plans:**\n- Basic: $129.99/mo\n- Economy: $159.99/mo\n- Premium: $189.99/mo\n\nIncludes consulting, 24/7 monitoring, and security. [Learn more](https://mediadecode.com/pricing)",
27
+ r"(?i)(portfolio|projects|showcase)": "🎨 **Portfolio:** We've delivered 100+ projects in healthcare, e-commerce, and more. Explore: [Our Showcase](https://mediadecode.com/showcase)",
28
+ r"(?i)(technology|tech stack|tools)": "βš™οΈ **Technologies:**\n- Web: Java, PHP, .NET, C++\n- Mobile: iOS, Android\n- AI/ML, IoT, Wearables\n\nπŸ”— Full list: [Tech Stack](https://mediadecode.com/technology)"
29
+ }
30
 
31
+ # Fallback for unmatched queries
32
+ DEFAULT_RESPONSE = """πŸ€– I couldn't find a specific answer. For detailed inquiries:
33
+ - Call: +1 (647) 928-8768
34
+ - Email: info@mediadecode.com
35
+ - Visit: [Support Page](https://mediadecode.com/contact)"""
36
 
37
+ def respond(message, chat_history):
38
+ # Check FAQ matches first
39
+ for pattern, response in FAQ_RESPONSES.items():
40
+ if re.search(pattern, message):
41
+ return f"{response}\n\nHow else can I help?"
42
 
43
+ # AI-generated response for complex queries
44
+ prompt = f"{SYSTEM_PROMPT}\nUser: {message}\nAI:"
45
+ inputs = tokenizer(prompt, return_tensors="pt")
46
+ outputs = model.generate(
47
+ inputs.input_ids,
48
+ max_length=200,
49
+ pad_token_id=tokenizer.eos_token_id,
50
+ temperature=0.7
51
+ )
52
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
53
 
54
+ # Post-process AI response
55
+ response = response.split("AI:")[-1].strip()
56
+ if not response or len(response) < 5:
57
+ response = DEFAULT_RESPONSE
58
+ elif any(phrase in response.lower() for phrase in ["i don't know", "not sure"]):
59
+ response = DEFAULT_RESPONSE
60
+
61
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
+ # Gradio Interface
64
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
65
+ gr.Markdown("""
66
+ # πŸš€ MediaDecode Support Bot
67
+ **Ask about:** Services | Pricing | Technologies | Contact
68
+ *Powered by AI + Company Knowledge Base*
69
+ """)
70
+
71
+ chatbot = gr.Chatbot(height=400)
72
+ msg = gr.Textbox(label="Your Query", placeholder="e.g., What's your pricing for Java development?")
73
+ clear = gr.Button("Clear Chat")
74
+
75
+ def user(user_message, history):
76
+ return "", history + [[user_message, None]]
77
+
78
+ def bot(history):
79
+ bot_message = respond(history[-1][0], history[:-1])
80
+ history[-1][1] = bot_message
81
+ return history
82
+
83
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
84
+ bot, chatbot, chatbot
85
  )
86
+ clear.click(lambda: None, None, chatbot, queue=False)
87
+
88
+ demo.launch()