Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -111,18 +111,19 @@ chunk_embeddings = create_embeddings(cleaned_chunks)
|
|
| 111 |
client= InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
|
| 112 |
|
| 113 |
#defining role of AI and user
|
| 114 |
-
|
|
|
|
| 115 |
|
| 116 |
-
|
| 117 |
|
| 118 |
-
|
| 119 |
-
|
| 120 |
|
| 121 |
-
messages.append({"role":"user", "content": message})
|
| 122 |
|
| 123 |
-
response=client.chat_completion(messages, max_tokens=100) #capping how many words the LLM is allowed to generate as a respond (100 words)
|
| 124 |
|
| 125 |
-
return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
|
| 126 |
|
| 127 |
### STEP 6
|
| 128 |
# Call the preprocess_text function and store the result in a cleaned_chunks variable
|
|
@@ -170,12 +171,21 @@ custom_css = """
|
|
| 170 |
"""
|
| 171 |
|
| 172 |
def respond(message, history):
|
|
|
|
| 173 |
messages = [{"role": "assistant", "content": "You are a friendly chatbot."}]
|
| 174 |
if history:
|
| 175 |
-
|
|
|
|
|
|
|
|
|
|
| 176 |
messages.append({"role": "user", "content": message})
|
|
|
|
|
|
|
| 177 |
response = client.chat_completion(messages, max_tokens=100)
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
| 179 |
|
| 180 |
with gr.Blocks(css=custom_css) as demo:
|
| 181 |
gr.HTML("<div id='header'>DivaBot</div>")
|
|
|
|
| 111 |
client= InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
|
| 112 |
|
| 113 |
#defining role of AI and user
|
| 114 |
+
# i moved it to the bottom
|
| 115 |
+
#def respond(message,history):
|
| 116 |
|
| 117 |
+
# messages = [{"role": "assistant", "content": "You are a friendly chatbot."}]
|
| 118 |
|
| 119 |
+
# if history:
|
| 120 |
+
# messages.extend(history) #keep adding history
|
| 121 |
|
| 122 |
+
#messages.append({"role":"user", "content": message})
|
| 123 |
|
| 124 |
+
#response=client.chat_completion(messages, max_tokens=100) #capping how many words the LLM is allowed to generate as a respond (100 words)
|
| 125 |
|
| 126 |
+
# return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
|
| 127 |
|
| 128 |
### STEP 6
|
| 129 |
# Call the preprocess_text function and store the result in a cleaned_chunks variable
|
|
|
|
| 171 |
"""
|
| 172 |
|
| 173 |
def respond(message, history):
|
| 174 |
+
# Prepare messages for the API
|
| 175 |
messages = [{"role": "assistant", "content": "You are a friendly chatbot."}]
|
| 176 |
if history:
|
| 177 |
+
# Convert Gradio history into API format
|
| 178 |
+
for user_msg, bot_msg in history:
|
| 179 |
+
messages.append({"role": "user", "content": user_msg})
|
| 180 |
+
messages.append({"role": "assistant", "content": bot_msg})
|
| 181 |
messages.append({"role": "user", "content": message})
|
| 182 |
+
|
| 183 |
+
# Call the API
|
| 184 |
response = client.chat_completion(messages, max_tokens=100)
|
| 185 |
+
assistant_reply = response['choices'][0]['message']['content'].strip()
|
| 186 |
+
|
| 187 |
+
# Return for Gradio
|
| 188 |
+
return history + [(message, assistant_reply)], ""
|
| 189 |
|
| 190 |
with gr.Blocks(css=custom_css) as demo:
|
| 191 |
gr.HTML("<div id='header'>DivaBot</div>")
|