pakito312 commited on
Commit
a08be4a
·
1 Parent(s): 5ba455c
Files changed (1) hide show
  1. api.py +21 -7
api.py CHANGED
@@ -249,6 +249,7 @@ class ModelManager:
249
  f"Prompt: {prompt}\n\n"
250
  f"Error: {str(e)[:200]}"
251
  )
 
252
  def chat(
253
  self,
254
  messages: List[dict],
@@ -261,20 +262,33 @@ class ModelManager:
261
  if isinstance(self.llm, DummyLLM):
262
  return self.llm.chat(messages, temperature, max_tokens)
263
 
264
- prompt = ""
 
 
 
265
  for msg in messages:
266
- role = msg["role"]
267
- content = msg["content"]
268
- prompt += f"{role.upper()}: {content}\n"
269
- prompt += "ASSISTANT:"
 
 
 
 
 
 
270
 
271
  response = self.llm(
272
  prompt,
273
  max_tokens=max_tokens,
274
- temperature=temperature
 
 
275
  )
276
 
277
- return response["choices"][0]["text"]
 
 
278
 
279
 
280
 
 
249
  f"Prompt: {prompt}\n\n"
250
  f"Error: {str(e)[:200]}"
251
  )
252
+
253
  def chat(
254
  self,
255
  messages: List[dict],
 
262
  if isinstance(self.llm, DummyLLM):
263
  return self.llm.chat(messages, temperature, max_tokens)
264
 
265
+ # ---- Prompt Instruct DeepSeek ----
266
+ system_prompt = ""
267
+ user_prompt = ""
268
+
269
  for msg in messages:
270
+ if msg["role"] == "system":
271
+ system_prompt += msg["content"] + "\n"
272
+ elif msg["role"] == "user":
273
+ user_prompt += msg["content"] + "\n"
274
+
275
+ prompt = f"""### Instruction:
276
+ {system_prompt}{user_prompt}
277
+
278
+ ### Response:
279
+ """
280
 
281
  response = self.llm(
282
  prompt,
283
  max_tokens=max_tokens,
284
+ temperature=temperature,
285
+ top_p=0.95,
286
+ echo=False
287
  )
288
 
289
+ return response["choices"][0]["text"].strip()
290
+
291
+
292
 
293
 
294