Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -34,9 +34,10 @@ def save_memory(memory):
|
|
| 34 |
json.dump(memory, f, indent=2)
|
| 35 |
|
| 36 |
# Append to memory
|
| 37 |
-
def update_memory(
|
| 38 |
memory = load_memory()
|
| 39 |
-
memory.append(
|
|
|
|
| 40 |
# Optionally limit memory size
|
| 41 |
if len(memory) > 1000:
|
| 42 |
memory = memory[-1000:]
|
|
@@ -81,14 +82,14 @@ def train_model_on_files():
|
|
| 81 |
|
| 82 |
# Chat response function
|
| 83 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 84 |
-
#
|
| 85 |
model = load_or_initialize_model()
|
| 86 |
|
| 87 |
-
#
|
| 88 |
try:
|
| 89 |
pred_label = model.predict([message])[0]
|
| 90 |
response = f"Predicted response: {pred_label}"
|
| 91 |
-
update_memory(
|
| 92 |
return response
|
| 93 |
except Exception:
|
| 94 |
pass # Continue with GPT model if ML model doesn't have a response
|
|
@@ -96,10 +97,10 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
| 96 |
# Generate response using GPT
|
| 97 |
messages = [{"role": "system", "content": system_message}]
|
| 98 |
for turn in history:
|
| 99 |
-
if turn["user"
|
| 100 |
-
messages.append({"role": "user", "content": turn["
|
| 101 |
-
|
| 102 |
-
messages.append({"role": "assistant", "content": turn["
|
| 103 |
messages.append({"role": "user", "content": message})
|
| 104 |
|
| 105 |
response = ""
|
|
@@ -115,11 +116,11 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
| 115 |
response += token
|
| 116 |
except Exception as e:
|
| 117 |
response = f"Error generating response: {str(e)}"
|
| 118 |
-
update_memory(
|
| 119 |
return response
|
| 120 |
|
| 121 |
# Update memory
|
| 122 |
-
update_memory(
|
| 123 |
return response
|
| 124 |
|
| 125 |
# Gradio interface
|
|
@@ -159,7 +160,8 @@ with gr.Blocks() as demo:
|
|
| 159 |
|
| 160 |
def handle_message(message, history, system_message, max_tokens, temperature, top_p):
|
| 161 |
response = respond(message, history, system_message, max_tokens, temperature, top_p)
|
| 162 |
-
history.append({"
|
|
|
|
| 163 |
return history, history
|
| 164 |
|
| 165 |
send_button.click(
|
|
@@ -179,7 +181,7 @@ with gr.Blocks() as demo:
|
|
| 179 |
label="Upload CSV File",
|
| 180 |
file_types=[".csv"],
|
| 181 |
file_count="single", # Replaced 'multiple=False' with 'file_count="single"'
|
| 182 |
-
interactive=True
|
| 183 |
)
|
| 184 |
upload_output = gr.Textbox(label="Upload Result", interactive=False)
|
| 185 |
train_button = gr.Button("π Train Model on Knowledge Base")
|
|
@@ -221,7 +223,8 @@ with gr.Blocks() as demo:
|
|
| 221 |
|
| 222 |
def export_memory_func():
|
| 223 |
if os.path.exists(memory_file):
|
| 224 |
-
|
|
|
|
| 225 |
return None
|
| 226 |
|
| 227 |
refresh_memory.click(display_memory, inputs=None, outputs=memory_display)
|
|
@@ -231,11 +234,12 @@ with gr.Blocks() as demo:
|
|
| 231 |
with gr.Tab("πΎ Download Model"):
|
| 232 |
gr.Markdown("### Download the Trained Model")
|
| 233 |
download_button = gr.Button("π₯ Download Model")
|
| 234 |
-
model_download_output = gr.File(label="Downloadable Model"
|
| 235 |
|
| 236 |
def download_model():
|
| 237 |
if os.path.exists(model_file):
|
| 238 |
-
|
|
|
|
| 239 |
return None
|
| 240 |
|
| 241 |
download_button.click(download_model, inputs=None, outputs=model_download_output)
|
|
@@ -247,7 +251,7 @@ with gr.Blocks() as demo:
|
|
| 247 |
value="",
|
| 248 |
label="Settings Placeholder",
|
| 249 |
placeholder="Add settings here...",
|
| 250 |
-
interactive=False,
|
| 251 |
)
|
| 252 |
|
| 253 |
if __name__ == "__main__":
|
|
|
|
| 34 |
json.dump(memory, f, indent=2)
|
| 35 |
|
| 36 |
# Append to memory
|
| 37 |
+
def update_memory(message, response):
|
| 38 |
memory = load_memory()
|
| 39 |
+
memory.append({"role": "user", "content": message})
|
| 40 |
+
memory.append({"role": "assistant", "content": response})
|
| 41 |
# Optionally limit memory size
|
| 42 |
if len(memory) > 1000:
|
| 43 |
memory = memory[-1000:]
|
|
|
|
| 82 |
|
| 83 |
# Chat response function
|
| 84 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 85 |
+
# Generate response using ML model if possible
|
| 86 |
model = load_or_initialize_model()
|
| 87 |
|
| 88 |
+
# Attempt to get a prediction from the ML model
|
| 89 |
try:
|
| 90 |
pred_label = model.predict([message])[0]
|
| 91 |
response = f"Predicted response: {pred_label}"
|
| 92 |
+
update_memory(message, response)
|
| 93 |
return response
|
| 94 |
except Exception:
|
| 95 |
pass # Continue with GPT model if ML model doesn't have a response
|
|
|
|
| 97 |
# Generate response using GPT
|
| 98 |
messages = [{"role": "system", "content": system_message}]
|
| 99 |
for turn in history:
|
| 100 |
+
if turn["role"] == "user":
|
| 101 |
+
messages.append({"role": "user", "content": turn["content"]})
|
| 102 |
+
elif turn["role"] == "assistant":
|
| 103 |
+
messages.append({"role": "assistant", "content": turn["content"]})
|
| 104 |
messages.append({"role": "user", "content": message})
|
| 105 |
|
| 106 |
response = ""
|
|
|
|
| 116 |
response += token
|
| 117 |
except Exception as e:
|
| 118 |
response = f"Error generating response: {str(e)}"
|
| 119 |
+
update_memory(message, response)
|
| 120 |
return response
|
| 121 |
|
| 122 |
# Update memory
|
| 123 |
+
update_memory(message, response)
|
| 124 |
return response
|
| 125 |
|
| 126 |
# Gradio interface
|
|
|
|
| 160 |
|
| 161 |
def handle_message(message, history, system_message, max_tokens, temperature, top_p):
|
| 162 |
response = respond(message, history, system_message, max_tokens, temperature, top_p)
|
| 163 |
+
history.append({"role": "user", "content": message})
|
| 164 |
+
history.append({"role": "assistant", "content": response})
|
| 165 |
return history, history
|
| 166 |
|
| 167 |
send_button.click(
|
|
|
|
| 181 |
label="Upload CSV File",
|
| 182 |
file_types=[".csv"],
|
| 183 |
file_count="single", # Replaced 'multiple=False' with 'file_count="single"'
|
| 184 |
+
# Removed 'interactive=True' as it's not a valid parameter
|
| 185 |
)
|
| 186 |
upload_output = gr.Textbox(label="Upload Result", interactive=False)
|
| 187 |
train_button = gr.Button("π Train Model on Knowledge Base")
|
|
|
|
| 223 |
|
| 224 |
def export_memory_func():
|
| 225 |
if os.path.exists(memory_file):
|
| 226 |
+
with open(memory_file, "rb") as f:
|
| 227 |
+
return f
|
| 228 |
return None
|
| 229 |
|
| 230 |
refresh_memory.click(display_memory, inputs=None, outputs=memory_display)
|
|
|
|
| 234 |
with gr.Tab("πΎ Download Model"):
|
| 235 |
gr.Markdown("### Download the Trained Model")
|
| 236 |
download_button = gr.Button("π₯ Download Model")
|
| 237 |
+
model_download_output = gr.File(label="Downloadable Model") # Removed 'interactive=False'
|
| 238 |
|
| 239 |
def download_model():
|
| 240 |
if os.path.exists(model_file):
|
| 241 |
+
with open(model_file, "rb") as f:
|
| 242 |
+
return f
|
| 243 |
return None
|
| 244 |
|
| 245 |
download_button.click(download_model, inputs=None, outputs=model_download_output)
|
|
|
|
| 251 |
value="",
|
| 252 |
label="Settings Placeholder",
|
| 253 |
placeholder="Add settings here...",
|
| 254 |
+
interactive=False, # If 'interactive' is not supported, remove it
|
| 255 |
)
|
| 256 |
|
| 257 |
if __name__ == "__main__":
|