Spaces:
Build error
Build error
Will chat work better with system prompt?
Browse files
app.py
CHANGED
|
@@ -35,11 +35,11 @@ model_info ={
|
|
| 35 |
}
|
| 36 |
|
| 37 |
def format_promt(message, custom_instructions=None):
|
| 38 |
-
|
| 39 |
if custom_instructions:
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
return
|
| 43 |
|
| 44 |
def reset_conversation():
|
| 45 |
'''
|
|
@@ -79,7 +79,7 @@ temp_values = st.sidebar.slider(
|
|
| 79 |
|
| 80 |
custom_instructions = st.sidebar.text_area(
|
| 81 |
"Custom Instructions",
|
| 82 |
-
value="You are helpful assistant, act like a Human in conversation. Keep
|
| 83 |
help="Customize how the AI should behave"
|
| 84 |
)
|
| 85 |
|
|
@@ -130,11 +130,13 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
|
|
| 130 |
client = InferenceClient(
|
| 131 |
model=model_links[selected_model],)
|
| 132 |
|
| 133 |
-
output = client.
|
| 134 |
-
formated_text,
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
|
|
|
|
|
|
| 138 |
)
|
| 139 |
|
| 140 |
# Create a placeholder for the streaming response
|
|
@@ -143,7 +145,14 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
|
|
| 143 |
|
| 144 |
# Stream the response and accumulate it
|
| 145 |
for chunk in output:
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
message_placeholder.markdown(full_response + "▌")
|
| 148 |
|
| 149 |
# Display final response and store it
|
|
|
|
| 35 |
}
|
| 36 |
|
| 37 |
def format_promt(message, custom_instructions=None):
|
| 38 |
+
messages = []
|
| 39 |
if custom_instructions:
|
| 40 |
+
messages.append({"role": "system", "content": custom_instructions})
|
| 41 |
+
messages.append({"role": "user", "content": message})
|
| 42 |
+
return {"inputs": {"messages": messages}}
|
| 43 |
|
| 44 |
def reset_conversation():
|
| 45 |
'''
|
|
|
|
| 79 |
|
| 80 |
custom_instructions = st.sidebar.text_area(
|
| 81 |
"Custom Instructions",
|
| 82 |
+
value="You are helpful assistant, act like a Human in conversation. Keep answers very short and in English only!",
|
| 83 |
help="Customize how the AI should behave"
|
| 84 |
)
|
| 85 |
|
|
|
|
| 130 |
client = InferenceClient(
|
| 131 |
model=model_links[selected_model],)
|
| 132 |
|
| 133 |
+
output = client.post(
|
| 134 |
+
json=formated_text,
|
| 135 |
+
params={
|
| 136 |
+
"temperature": temp_values,
|
| 137 |
+
"max_new_tokens": 1000,
|
| 138 |
+
"stream": True
|
| 139 |
+
}
|
| 140 |
)
|
| 141 |
|
| 142 |
# Create a placeholder for the streaming response
|
|
|
|
| 145 |
|
| 146 |
# Stream the response and accumulate it
|
| 147 |
for chunk in output:
|
| 148 |
+
if isinstance(chunk, dict) and "generated_text" in chunk:
|
| 149 |
+
text_chunk = chunk["generated_text"]
|
| 150 |
+
elif isinstance(chunk, str):
|
| 151 |
+
text_chunk = chunk
|
| 152 |
+
else:
|
| 153 |
+
continue
|
| 154 |
+
|
| 155 |
+
full_response += text_chunk
|
| 156 |
message_placeholder.markdown(full_response + "▌")
|
| 157 |
|
| 158 |
# Display final response and store it
|