Cyantist8208 commited on
Commit
5dce132
·
1 Parent(s): 76f07e5

sys prompt

Browse files
Files changed (1) hide show
  1. app.py +21 -11
app.py CHANGED
@@ -78,33 +78,43 @@ def add_docs(user_id: str, docs: list[str]) -> int:
78
  return len(docs)
79
  # ----- Qwen-chat prompt helper ---------------------------------------------
80
  def build_llm_prompt(system: str, context: list[str], user_question: str) -> str:
81
- """Return a Qwen-style prompt with multiple context items."""
 
 
 
82
  load_chat() # 確保 tokenizer 載入
83
 
 
 
 
 
 
 
 
84
  conversation = [
85
- {"role": "system", "content": system}
86
  ]
87
 
88
- # 將每段 context 當作 user 提供的提示
89
  for ctx in context:
90
- if ctx.strip(): # 跳過空內容
91
- conversation.append({"role": "user", "content": ctx})
92
 
93
- # 加入最終問題
94
- conversation.append({"role": "user", "content": user_question})
95
 
 
96
  prompt = ""
97
  for turn in conversation:
98
  role = turn["role"]
99
- content = turn["content"].strip()
100
  if role == "system":
101
  prompt += f"<<SYS>>\n{content}\n<</SYS>>\n\n"
102
  elif role == "user":
103
- prompt += f"[INST] {content.strip()} [/INST]\n"
104
  elif role == "assistant":
105
- prompt += f"{content.strip()}\n"
106
  return prompt
107
-
108
 
109
  # ---------- 4. Gradio playground (same UI as before) --------------------------
110
  def store_doc(doc_text: str, user_id="demo"):
 
78
  return len(docs)
79
  # ----- Qwen-chat prompt helper ---------------------------------------------
80
  def build_llm_prompt(system: str, context: list[str], user_question: str) -> str:
81
+ """
82
+ 建立適用於 LLaMA/Qwen 等模型的 prompt,支援多段 context,
83
+ 並強化 system prompt 限制模型僅輸出回應內容。
84
+ """
85
  load_chat() # 確保 tokenizer 載入
86
 
87
+ # 強化指令:防止解釋與步驟
88
+ system_prompt = (
89
+ f"{system.strip()}\n"
90
+ "Do not include any explanations, steps, or analysis. "
91
+ "Only output the final reply content."
92
+ )
93
+
94
  conversation = [
95
+ {"role": "system", "content": system_prompt}
96
  ]
97
 
98
+ # 段 context 當作 user 發言
99
  for ctx in context:
100
+ if ctx.strip(): # 忽略空內容
101
+ conversation.append({"role": "user", "content": ctx.strip()})
102
 
103
+ # 最後加入使用者問題
104
+ conversation.append({"role": "user", "content": user_question.strip()})
105
 
106
+ # 套用 LLaMA-style prompt 格式
107
  prompt = ""
108
  for turn in conversation:
109
  role = turn["role"]
110
+ content = turn["content"]
111
  if role == "system":
112
  prompt += f"<<SYS>>\n{content}\n<</SYS>>\n\n"
113
  elif role == "user":
114
+ prompt += f"[INST] {content} [/INST]\n"
115
  elif role == "assistant":
116
+ prompt += f"{content}\n"
117
  return prompt
 
118
 
119
  # ---------- 4. Gradio playground (same UI as before) --------------------------
120
  def store_doc(doc_text: str, user_id="demo"):