bscs-27-005 commited on
Commit
2731fda
·
verified ·
1 Parent(s): 3b4c884

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -23
app.py CHANGED
@@ -5,25 +5,54 @@ from gtts import gTTS
5
  import torch
6
  import os
7
 
8
- # --- 1. YOUR COMPANY DATA (Strict Instructions added) ---
 
9
  COMPANY_KNOWLEDGE = """
10
- You are the reception AI for 'TechNova Solutions'.
11
- Rules for answering:
12
- 1. Answer ONLY based on the facts below.
13
- 2. Keep answers EXTREMELY SHORT (Maximum 1 sentence).
14
- 3. Be direct. Do not say "Hello" or "Sure" every time. Just give the answer.
15
-
16
- FACTS:
17
- - WHAT WE DO: We build custom solar-powered drones for agriculture.
18
- - PRICING: Basic drone is $500. Pro drone is $1200.
19
- - RETURN POLICY: 30-day money-back guarantee.
20
- - CONTACT: Email help@technova.com or call +1-555-0199.
21
- - HOURS: Mon-Fri, 9 AM to 5 PM EST.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  """
23
 
24
  # --- 2. SETUP ---
25
  # Replace with your actual token
26
- hf_token = os.getenv("HF_TOKEN") or "YOUR_HF_TOKEN_HERE"
 
 
 
27
  client = InferenceClient(api_key=hf_token)
28
 
29
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -34,28 +63,28 @@ def voice_chat(audio, history):
34
  return None, "", history
35
 
36
  # A. Initialize History with Company Knowledge (if empty)
 
37
  if not history:
38
  history = [{"role": "system", "content": COMPANY_KNOWLEDGE}]
39
 
40
  try:
41
- # B. Hear
42
  transcription = whisper_model.transcribe(audio, fp16=False)["text"]
43
 
44
- # C. Think
45
- # We append the user text
46
  history.append({"role": "user", "content": transcription})
47
 
48
  response = client.chat.completions.create(
49
  model="HuggingFaceH4/zephyr-7b-beta",
50
  messages=history,
51
- max_tokens=60, # Reduced to 60 to force brevity
52
- temperature=0.5 # Lower temperature = more precise/less creative
53
  )
54
  ai_text = response.choices[0].message.content
55
 
56
  history.append({"role": "assistant", "content": ai_text})
57
 
58
- # D. Speak
59
  tts = gTTS(text=ai_text, lang='en')
60
  audio_path = "response.mp3"
61
  tts.save(audio_path)
@@ -63,22 +92,25 @@ def voice_chat(audio, history):
63
  return audio_path, ai_text, history
64
 
65
  except Exception as e:
 
66
  return None, f"Error: {str(e)}", history
67
 
68
  # --- 3. INTERFACE ---
69
  with gr.Blocks(theme="soft") as demo:
70
- gr.Markdown("## 🏢 Quick-Response Company AI")
71
 
 
72
  conversation_history = gr.State([])
73
 
74
  with gr.Row():
75
- input_audio = gr.Audio(sources=["microphone"], type="filepath", label="Ask Question")
76
 
77
  with gr.Row():
78
  output_audio = gr.Audio(label="AI Response", autoplay=True)
79
  output_text = gr.Textbox(label="Transcript")
80
 
81
- clear_btn = gr.Button("Reset")
 
82
 
83
  input_audio.change(
84
  voice_chat,
@@ -86,6 +118,7 @@ with gr.Blocks(theme="soft") as demo:
86
  outputs=[output_audio, output_text, conversation_history]
87
  )
88
 
 
89
  clear_btn.click(lambda: ([], None, ""), outputs=[conversation_history, output_audio, output_text])
90
 
91
  demo.launch(debug=True)
 
5
  import torch
6
  import os
7
 
8
+ # --- 1. YOUR COMPANY DATA ---
9
+ # This is the "Brain" of your company. Edit this text!
10
  COMPANY_KNOWLEDGE = """
11
+ You are the Senior Tech Consultant AI for 'SoftStream Tech', a software development agency.
12
+ Your goal is to answer client questions professionally, technically, and concisely.
13
+ INSTRUCTIONS:
14
+ 1. Answer in 20 WORDS OR LESS.
15
+ 2. Be direct. Do not use filler words like "Thank you for asking" or "I would be happy to help".
16
+ 3. If the answer is a list, pick only the top 2 items.
17
+
18
+ RULES FOR ANSWERING:
19
+ 1. Keep answers SHORT (1-2 sentences max). Clients are busy. The answer shoulb be within the 25 seconds.
20
+ 2. If asked about price, give ranges, not exact quotes.
21
+ 3. If you don't know the answer, say: "I'll need to check with a senior engineer on that."
22
+
23
+ DATA SHEET:
24
+ [Services]
25
+ - Custom Web Development: React, Vue, Next.js, Python (Django/FastAPI), Node.js.
26
+ - Mobile App Development: Flutter (Cross-platform), Swift (iOS), Kotlin (Android).
27
+ - AI & Machine Learning: Chatbots, Predictive Analytics, Computer Vision.
28
+ - Cloud DevOps: AWS, Google Cloud, Azure setup and CI/CD pipelines.
29
+
30
+ [Pricing Models]
31
+ - Time & Material (Hourly): $40 - $80 per hour depending on developer seniority.
32
+ - Fixed Price: Minimum project size is $5,000. Requires detailed scope.
33
+ - Retainer: Dedicated team for $4,000/month per developer.
34
+
35
+ [Process]
36
+ - Methodology: Agile/Scrum with 2-week sprints.
37
+ - Tools: We use Jira for tracking and Slack for communication.
38
+ - Timeline: MVP (Minimum Viable Product) usually takes 4-8 weeks.
39
+
40
+ [Support & Maintenance]
41
+ - Standard Support: Bug fixing for 3 months after launch (Free).
42
+ - Premium SLA: 24/7 server monitoring and priority support ($500/month).
43
+
44
+ [Contact]
45
+ - Email: projects@softstream.tech
46
+ - Phone: +1-555-CODE-NOW
47
+ - Location: San Francisco, CA (but we work remote globally).
48
  """
49
 
50
  # --- 2. SETUP ---
51
  # Replace with your actual token
52
+ hf_token = os.getenv("HF_TOKEN")
53
+
54
+ if not hf_token:
55
+ raise ValueError("Error: HF_TOKEN is missing. Go to Settings > Secrets and add it!")
56
  client = InferenceClient(api_key=hf_token)
57
 
58
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
63
  return None, "", history
64
 
65
  # A. Initialize History with Company Knowledge (if empty)
66
+ # This ensures the AI reads your company info before the first message
67
  if not history:
68
  history = [{"role": "system", "content": COMPANY_KNOWLEDGE}]
69
 
70
  try:
71
+ # B. Hear (Whisper)
72
  transcription = whisper_model.transcribe(audio, fp16=False)["text"]
73
 
74
+ # C. Think (AI with Context)
 
75
  history.append({"role": "user", "content": transcription})
76
 
77
  response = client.chat.completions.create(
78
  model="HuggingFaceH4/zephyr-7b-beta",
79
  messages=history,
80
+ max_tokens=50, # <--- STRICT LIMIT: ~30-40 words max
81
+ temperature=0.5 # <--- Low temp keeps it robotic and concise
82
  )
83
  ai_text = response.choices[0].message.content
84
 
85
  history.append({"role": "assistant", "content": ai_text})
86
 
87
+ # D. Speak (gTTS)
88
  tts = gTTS(text=ai_text, lang='en')
89
  audio_path = "response.mp3"
90
  tts.save(audio_path)
 
92
  return audio_path, ai_text, history
93
 
94
  except Exception as e:
95
+ # If there is an error, return the existing history so we don't lose it
96
  return None, f"Error: {str(e)}", history
97
 
98
  # --- 3. INTERFACE ---
99
  with gr.Blocks(theme="soft") as demo:
100
+ gr.Markdown("## 🏢 TechNova AI Receptionist")
101
 
102
+ # Initialize state as empty list
103
  conversation_history = gr.State([])
104
 
105
  with gr.Row():
106
+ input_audio = gr.Audio(sources=["microphone"], type="filepath", label="Ask about our company")
107
 
108
  with gr.Row():
109
  output_audio = gr.Audio(label="AI Response", autoplay=True)
110
  output_text = gr.Textbox(label="Transcript")
111
 
112
+ # Add a clear button to reset the conversation
113
+ clear_btn = gr.Button("Reset Conversation")
114
 
115
  input_audio.change(
116
  voice_chat,
 
118
  outputs=[output_audio, output_text, conversation_history]
119
  )
120
 
121
+ # Reset logic
122
  clear_btn.click(lambda: ([], None, ""), outputs=[conversation_history, output_audio, output_text])
123
 
124
  demo.launch(debug=True)