Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -82,10 +82,12 @@ def count_tokens(text: str) -> int:
|
|
| 82 |
|
| 83 |
def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
|
| 84 |
"""Truncates the conversation history to fit within the maximum token limit.
|
|
|
|
| 85 |
Args:
|
| 86 |
history: The conversation history (list of user/assistant tuples).
|
| 87 |
system_message: The system message.
|
| 88 |
max_length: The maximum number of tokens allowed.
|
|
|
|
| 89 |
Returns:
|
| 90 |
The truncated history.
|
| 91 |
"""
|
|
@@ -100,7 +102,7 @@ def truncate_history(history: list[tuple[str, str]], system_message: str, max_le
|
|
| 100 |
turn_tokens = user_tokens + assistant_tokens
|
| 101 |
|
| 102 |
if current_length + turn_tokens <= max_length:
|
| 103 |
-
truncated_history.insert(0, (user_msg, assistant_msg))
|
| 104 |
current_length += turn_tokens
|
| 105 |
else:
|
| 106 |
break # Stop adding turns if we exceed the limit
|
|
@@ -114,40 +116,39 @@ def respond(
|
|
| 114 |
max_tokens,
|
| 115 |
temperature,
|
| 116 |
top_p,
|
| 117 |
-
clear_memory # Added extra parameter to match the 7 inputs provided
|
| 118 |
):
|
| 119 |
"""Responds to a user message, maintaining conversation history, using special tokens and message list."""
|
| 120 |
-
|
| 121 |
-
if message.lower() == "clear memory"
|
| 122 |
return "", [] # Return empty message and empty history to reset the chat
|
| 123 |
|
| 124 |
formatted_system_message = system_message # Use the system_message argument
|
| 125 |
-
truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
|
| 126 |
|
| 127 |
-
messages = [{"role": "system", "content": formatted_system_message}]
|
| 128 |
for user_msg, assistant_msg in truncated_history:
|
| 129 |
if user_msg:
|
| 130 |
-
messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
|
| 131 |
if assistant_msg:
|
| 132 |
-
messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
|
| 133 |
|
| 134 |
-
messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
|
| 135 |
|
| 136 |
response = ""
|
| 137 |
try:
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
except Exception as e:
|
| 149 |
-
|
| 150 |
-
|
| 151 |
|
| 152 |
# --- Gradio Interface ---
|
| 153 |
demo = gr.ChatInterface(
|
|
@@ -157,14 +158,19 @@ demo = gr.ChatInterface(
|
|
| 157 |
value=default_nvc_prompt_template,
|
| 158 |
label="System message",
|
| 159 |
visible=True,
|
| 160 |
-
lines=10 # Increased height for more space to read the prompt
|
| 161 |
),
|
| 162 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 163 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 164 |
-
gr.Slider(
|
| 165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
],
|
| 167 |
)
|
| 168 |
|
| 169 |
if __name__ == "__main__":
|
| 170 |
-
demo.launch(share=True)
|
|
|
|
| 82 |
|
| 83 |
def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
|
| 84 |
"""Truncates the conversation history to fit within the maximum token limit.
|
| 85 |
+
|
| 86 |
Args:
|
| 87 |
history: The conversation history (list of user/assistant tuples).
|
| 88 |
system_message: The system message.
|
| 89 |
max_length: The maximum number of tokens allowed.
|
| 90 |
+
|
| 91 |
Returns:
|
| 92 |
The truncated history.
|
| 93 |
"""
|
|
|
|
| 102 |
turn_tokens = user_tokens + assistant_tokens
|
| 103 |
|
| 104 |
if current_length + turn_tokens <= max_length:
|
| 105 |
+
truncated_history.insert(0, (user_msg, assistant_msg)) # Add to the beginning
|
| 106 |
current_length += turn_tokens
|
| 107 |
else:
|
| 108 |
break # Stop adding turns if we exceed the limit
|
|
|
|
| 116 |
max_tokens,
|
| 117 |
temperature,
|
| 118 |
top_p,
|
|
|
|
| 119 |
):
|
| 120 |
"""Responds to a user message, maintaining conversation history, using special tokens and message list."""
|
| 121 |
+
|
| 122 |
+
if message.lower() == "clear memory": # Check for the clear memory command
|
| 123 |
return "", [] # Return empty message and empty history to reset the chat
|
| 124 |
|
| 125 |
formatted_system_message = system_message # Use the system_message argument
|
| 126 |
+
truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100) # Reserve space for the new message and some generation
|
| 127 |
|
| 128 |
+
messages = [{"role": "system", "content": formatted_system_message}] # Start with system message as before
|
| 129 |
for user_msg, assistant_msg in truncated_history:
|
| 130 |
if user_msg:
|
| 131 |
+
messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"}) # Format history user message
|
| 132 |
if assistant_msg:
|
| 133 |
+
messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"}) # Format history assistant message
|
| 134 |
|
| 135 |
+
messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"}) # Format current user message
|
| 136 |
|
| 137 |
response = ""
|
| 138 |
try:
|
| 139 |
+
for chunk in client.chat_completion(
|
| 140 |
+
messages, # Send the messages list again, but with formatted content
|
| 141 |
+
max_tokens=max_tokens,
|
| 142 |
+
stream=True,
|
| 143 |
+
temperature=temperature,
|
| 144 |
+
top_p=top_p,
|
| 145 |
+
):
|
| 146 |
+
token = chunk.choices[0].delta.content
|
| 147 |
+
response += token
|
| 148 |
+
yield response
|
| 149 |
except Exception as e:
|
| 150 |
+
print(f"An error occurred: {e}") # It's a good practice add a try-except block
|
| 151 |
+
yield "I'm sorry, I encountered an error. Please try again."
|
| 152 |
|
| 153 |
# --- Gradio Interface ---
|
| 154 |
demo = gr.ChatInterface(
|
|
|
|
| 158 |
value=default_nvc_prompt_template,
|
| 159 |
label="System message",
|
| 160 |
visible=True,
|
| 161 |
+
lines=10, # Increased height for more space to read the prompt
|
| 162 |
),
|
| 163 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 164 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 165 |
+
gr.Slider(
|
| 166 |
+
minimum=0.1,
|
| 167 |
+
maximum=1.0,
|
| 168 |
+
value=0.95,
|
| 169 |
+
step=0.05,
|
| 170 |
+
label="Top-p (nucleus sampling)",
|
| 171 |
+
),
|
| 172 |
],
|
| 173 |
)
|
| 174 |
|
| 175 |
if __name__ == "__main__":
|
| 176 |
+
demo.launch(share=True)
|