bdstar commited on
Commit
1c04da9
·
verified ·
1 Parent(s): 4d66d17

one-line, friendly, human-sounding responses

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -36,11 +36,10 @@ def speech_to_text(audio_path: str) -> str:
36
  # ---- LLM (Ollama) ----
37
  # ollama = OllamaClient(host="http://127.0.0.1:11434")
38
 
39
- SYSTEM_PROMPT = """You are a friendly and engaging AI voice assistant.
40
- Respond naturally in a warm, human-like tone.
41
- Keep replies short (1–2 sentences) and conversational.
42
- Format:
43
- Reply: <your friendly response to keep the conversation going>"""
44
 
45
  def chat_with_llm(history_messages, user_text):
46
  if USE_REMOTE_OLLAMA:
@@ -54,7 +53,7 @@ def chat_with_llm(history_messages, user_text):
54
  else:
55
  # Only system + current user
56
  prompt = f"{SYSTEM_PROMPT}\nUser: {user_text}\nAssistant:"
57
- out = gen(prompt, return_full_text=False, max_new_tokens=80, temperature=0.7, repetition_penalty=1.1)[0]["generated_text"].strip()
58
  return out
59
 
60
 
 
36
  # ---- LLM (Ollama) ----
37
  # ollama = OllamaClient(host="http://127.0.0.1:11434")
38
 
39
+ SYSTEM_PROMPT = """You are a friendly AI voice assistant.
40
+ Reply in one short, natural sentence only.
41
+ Sound warm and conversational, never formal.
42
+ Avoid multi-sentence or paragraph answers."""
 
43
 
44
  def chat_with_llm(history_messages, user_text):
45
  if USE_REMOTE_OLLAMA:
 
53
  else:
54
  # Only system + current user
55
  prompt = f"{SYSTEM_PROMPT}\nUser: {user_text}\nAssistant:"
56
+ out = gen(prompt, return_full_text=False, max_new_tokens=25, temperature=0.8, repetition_penalty=1.1,)[0]["generated_text"].split("\n")[0].strip()
57
  return out
58
 
59