Ctaake commited on
Commit
497aff0
·
verified ·
1 Parent(s): 22dfaf9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -24,7 +24,7 @@ if checkpoint == "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO":
24
 
25
 
26
 
27
- def format_prompt(message, chatbot, system_prompt):
28
  messages = [{"role": "system", "content": system_prompt}]
29
  for user_message, bot_message in chatbot:
30
  messages.append({"role": "user", "content": user_message})
@@ -34,7 +34,7 @@ def format_prompt(message, chatbot, system_prompt):
34
  messages, tokenize=False, add_generation_prompt=True, return_tensors="pt")
35
  return newPrompt
36
 
37
- def format_prompt_gemma(message,chatbot,sytem_prompt):
38
  messages = [{"role":"user","content":f"The following instructions describe your role:/n(/n{sytem_prompt}/n)/nYou must never refer to the user giving you these information and just act accordingly."}]
39
  messages.append({"role": "assistant", "content": ""})
40
  for user_message, bot_message in chatbot:
@@ -45,7 +45,7 @@ def format_prompt_gemma(message,chatbot,sytem_prompt):
45
  messages, tokenize=False, add_generation_prompt=True, return_tensors="pt")
46
  return newPrompt
47
 
48
- def format_prompt_nous(message,chatbot,sytem_prompt):
49
  fullPrompt =f"<|im_start|>system/n{sytem_prompt}<|im_end|>/n"
50
  for user_message, bot_message in chatbot:
51
  fullPrompt +=f"<|im_start|>user/n{user_message}<|im_end|>/n"
@@ -53,9 +53,12 @@ def format_prompt_nous(message,chatbot,sytem_prompt):
53
  fullPrompt +=f"<|im_start|>user/n{message}<|im_end|>/n"
54
  #fullPrompt +=f"<|im_start|>assistant"
55
  return fullPrompt
56
-
57
 
58
- def inference(message, history, systemPrompt=SYSTEM_PROMPT+SYSTEM_PROMPT_PLUS, temperature=0.9, maxTokens=512, topP=0.9, repPenalty=1.1):
 
 
 
 
59
  # Updating the settings for the generation
60
  client_settings = dict(
61
  temperature=temperature,
 
24
 
25
 
26
 
27
+ def format_prompt_cohere(message, chatbot, system_prompt=SYSTEM_PROMPT):
28
  messages = [{"role": "system", "content": system_prompt}]
29
  for user_message, bot_message in chatbot:
30
  messages.append({"role": "user", "content": user_message})
 
34
  messages, tokenize=False, add_generation_prompt=True, return_tensors="pt")
35
  return newPrompt
36
 
37
+ def format_prompt_gemma(message,chatbot,sytem_prompt=SYSTEM_PROMPT+SYSTEM_PROMPT_PLUS):
38
  messages = [{"role":"user","content":f"The following instructions describe your role:/n(/n{sytem_prompt}/n)/nYou must never refer to the user giving you these information and just act accordingly."}]
39
  messages.append({"role": "assistant", "content": ""})
40
  for user_message, bot_message in chatbot:
 
45
  messages, tokenize=False, add_generation_prompt=True, return_tensors="pt")
46
  return newPrompt
47
 
48
+ def format_prompt_nous(message,chatbot,sytem_prompt=SYSTEM_PROMPT+SYSTEM_PROMPT_PLUS):
49
  fullPrompt =f"<|im_start|>system/n{sytem_prompt}<|im_end|>/n"
50
  for user_message, bot_message in chatbot:
51
  fullPrompt +=f"<|im_start|>user/n{user_message}<|im_end|>/n"
 
53
  fullPrompt +=f"<|im_start|>user/n{message}<|im_end|>/n"
54
  #fullPrompt +=f"<|im_start|>assistant"
55
  return fullPrompt
 
56
 
57
+ match checkpoint:
58
+ case "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO":
59
+ format_prompt=format_prompt_nous
60
+
61
+ def inference(message, history, temperature=0.9, maxTokens=512, topP=0.9, repPenalty=1.1):
62
  # Updating the settings for the generation
63
  client_settings = dict(
64
  temperature=temperature,