Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ from datetime import datetime
|
|
| 5 |
from typing import Literal
|
| 6 |
import os
|
| 7 |
import importlib
|
| 8 |
-
from llm_handler import send_to_llm
|
| 9 |
from main import generate_data, PROMPT_1
|
| 10 |
from topics import TOPICS
|
| 11 |
from system_messages import SYSTEM_MESSAGES_VODALUS
|
|
@@ -264,20 +264,15 @@ def save_dataset_config(system_messages, prompt_1, topics):
|
|
| 264 |
return "Dataset configuration saved successfully"
|
| 265 |
|
| 266 |
# Modify the chat_with_llm function to use Gradio's built-in async capabilities
|
| 267 |
-
def chat_with_llm(message, history
|
| 268 |
try:
|
| 269 |
-
if selected_llm == "local-model":
|
| 270 |
-
set_local_model_base_url(base_url)
|
| 271 |
-
elif selected_llm == "anything-llm":
|
| 272 |
-
set_anything_llm_workspace(anything_llm_workspace)
|
| 273 |
-
|
| 274 |
msg_list = [{"role": "system", "content": "You are an AI assistant helping with dataset annotation and quality checking."}]
|
| 275 |
for h in history:
|
| 276 |
msg_list.append({"role": "user", "content": h[0]})
|
| 277 |
msg_list.append({"role": "assistant", "content": h[1]})
|
| 278 |
msg_list.append({"role": "user", "content": message})
|
| 279 |
|
| 280 |
-
response, _ = send_to_llm(
|
| 281 |
|
| 282 |
return history + [[message, response]]
|
| 283 |
except Exception as e:
|
|
@@ -297,12 +292,12 @@ def update_chat_context(row_data, index, total, quality, high_quality_tags, low_
|
|
| 297 |
return [[None, context]] # Return as a list of message pairs
|
| 298 |
|
| 299 |
# Add this function to handle dataset generation
|
| 300 |
-
async def run_generate_dataset(num_workers, num_generations, output_file_path
|
| 301 |
generated_data = []
|
| 302 |
for _ in range(num_generations):
|
| 303 |
topic_selected = random.choice(TOPICS)
|
| 304 |
system_message_selected = random.choice(SYSTEM_MESSAGES_VODALUS)
|
| 305 |
-
data = await generate_data(topic_selected, PROMPT_1, system_message_selected, output_file_path
|
| 306 |
if data:
|
| 307 |
generated_data.append(json.dumps(data))
|
| 308 |
|
|
@@ -412,9 +407,6 @@ with demo:
|
|
| 412 |
|
| 413 |
with gr.Column(scale=1):
|
| 414 |
gr.Markdown("## AI Assistant")
|
| 415 |
-
selected_llm = gr.Radio(["local-model", "anything-llm", "llamacpp"], label="Select LLM", value="local-model")
|
| 416 |
-
base_url = gr.Textbox(label="Base URL for local-model", value="http://localhost:11434/v1", visible=False)
|
| 417 |
-
anything_llm_workspace = gr.Textbox(label="AnythingLLM Workspace", value="<input-workspace-name-here>", visible=False)
|
| 418 |
chatbot = gr.Chatbot(height=600)
|
| 419 |
msg = gr.Textbox(label="Chat with AI Assistant")
|
| 420 |
clear = gr.Button("Clear")
|
|
@@ -510,11 +502,11 @@ with demo:
|
|
| 510 |
|
| 511 |
start_generation_btn.click(
|
| 512 |
run_generate_dataset,
|
| 513 |
-
inputs=[num_workers, num_generations, output_file_path
|
| 514 |
outputs=[generation_status, generation_output]
|
| 515 |
)
|
| 516 |
|
| 517 |
-
msg.submit(chat_with_llm, [msg, chatbot
|
| 518 |
clear.click(lambda: None, None, chatbot, queue=False)
|
| 519 |
|
| 520 |
# Update chat context when navigating rows or loading dataset
|
|
@@ -525,11 +517,6 @@ with demo:
|
|
| 525 |
outputs=[chatbot]
|
| 526 |
)
|
| 527 |
|
| 528 |
-
def toggle_input_visibility(llm):
|
| 529 |
-
return gr.update(visible=llm == "local-model"), gr.update(visible=llm == "anything-llm")
|
| 530 |
-
|
| 531 |
-
selected_llm.change(toggle_input_visibility, inputs=[selected_llm], outputs=[base_url, anything_llm_workspace])
|
| 532 |
-
|
| 533 |
if __name__ == "__main__":
|
| 534 |
demo.launch(share=True)
|
| 535 |
|
|
|
|
| 5 |
from typing import Literal
|
| 6 |
import os
|
| 7 |
import importlib
|
| 8 |
+
from llm_handler import send_to_llm
|
| 9 |
from main import generate_data, PROMPT_1
|
| 10 |
from topics import TOPICS
|
| 11 |
from system_messages import SYSTEM_MESSAGES_VODALUS
|
|
|
|
| 264 |
return "Dataset configuration saved successfully"
|
| 265 |
|
| 266 |
# Modify the chat_with_llm function to use Gradio's built-in async capabilities
|
| 267 |
+
def chat_with_llm(message, history):
|
| 268 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
msg_list = [{"role": "system", "content": "You are an AI assistant helping with dataset annotation and quality checking."}]
|
| 270 |
for h in history:
|
| 271 |
msg_list.append({"role": "user", "content": h[0]})
|
| 272 |
msg_list.append({"role": "assistant", "content": h[1]})
|
| 273 |
msg_list.append({"role": "user", "content": message})
|
| 274 |
|
| 275 |
+
response, _ = send_to_llm("llamanet", msg_list)
|
| 276 |
|
| 277 |
return history + [[message, response]]
|
| 278 |
except Exception as e:
|
|
|
|
| 292 |
return [[None, context]] # Return as a list of message pairs
|
| 293 |
|
| 294 |
# Add this function to handle dataset generation
|
| 295 |
+
async def run_generate_dataset(num_workers, num_generations, output_file_path):
|
| 296 |
generated_data = []
|
| 297 |
for _ in range(num_generations):
|
| 298 |
topic_selected = random.choice(TOPICS)
|
| 299 |
system_message_selected = random.choice(SYSTEM_MESSAGES_VODALUS)
|
| 300 |
+
data = await generate_data(topic_selected, PROMPT_1, system_message_selected, output_file_path)
|
| 301 |
if data:
|
| 302 |
generated_data.append(json.dumps(data))
|
| 303 |
|
|
|
|
| 407 |
|
| 408 |
with gr.Column(scale=1):
|
| 409 |
gr.Markdown("## AI Assistant")
|
|
|
|
|
|
|
|
|
|
| 410 |
chatbot = gr.Chatbot(height=600)
|
| 411 |
msg = gr.Textbox(label="Chat with AI Assistant")
|
| 412 |
clear = gr.Button("Clear")
|
|
|
|
| 502 |
|
| 503 |
start_generation_btn.click(
|
| 504 |
run_generate_dataset,
|
| 505 |
+
inputs=[num_workers, num_generations, output_file_path],
|
| 506 |
outputs=[generation_status, generation_output]
|
| 507 |
)
|
| 508 |
|
| 509 |
+
msg.submit(chat_with_llm, [msg, chatbot], [chatbot])
|
| 510 |
clear.click(lambda: None, None, chatbot, queue=False)
|
| 511 |
|
| 512 |
# Update chat context when navigating rows or loading dataset
|
|
|
|
| 517 |
outputs=[chatbot]
|
| 518 |
)
|
| 519 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 520 |
if __name__ == "__main__":
|
| 521 |
demo.launch(share=True)
|
| 522 |
|