Spaces:
Running
Running
File size: 14,042 Bytes
5c0a82c 0f93e9d 5c0a82c 0f93e9d 3de8ef3 0f93e9d 8f0f58a 5c0a82c 80e6314 cf6c44b 3de8ef3 cf6c44b 5c0a82c 9949f77 0f93e9d 9949f77 0f93e9d 2ca51bf 0f93e9d 9949f77 0f93e9d 9949f77 0f93e9d 9949f77 0f93e9d 9949f77 0f93e9d 5c0a82c 2ca51bf c4b548a 2ca51bf c4b548a 2ca51bf c4b548a db04008 c4b548a db04008 2ca51bf c4b548a db04008 2ca51bf c4b548a 2ca51bf c4b548a 2ca51bf 5c0a82c 0f93e9d 5c0a82c 9949f77 0f93e9d 9949f77 0f93e9d 9949f77 90dc8fa 0f93e9d 9949f77 0f93e9d c4b548a 20f2637 2ca51bf d0189b7 2ca51bf d0189b7 2ca51bf d0189b7 2ca51bf 0f93e9d 9949f77 5c0a82c 0f93e9d c4b548a 9949f77 5c0a82c 847ac0b 3ea5339 796cc5c e486f13 796cc5c e486f13 796cc5c e486f13 3ea5339 796cc5c 721bbf2 d0189b7 721bbf2 c4b548a 3ea5339 f58bd9b 0f93e9d f58bd9b 0f93e9d 9949f77 5c0a82c 9f4ba9d c4b548a 2ca51bf 4eb711e c4b548a 2ca51bf c4b548a 796cc5c d0189b7 4eb711e 2ca51bf d0189b7 9f4ba9d 9949f77 0f93e9d 8f0f58a 0f93e9d 8f0f58a d45acb2 8f0f58a 9f4ba9d 8f0f58a e8aaf11 8f0f58a 9f4ba9d d45acb2 0f93e9d f58bd9b 8f0f58a 44051d3 1ca2b19 d45acb2 1ca2b19 d45acb2 1ca2b19 8f0f58a 44051d3 8f0f58a 5c0a82c 9949f77 5c0a82c 9949f77 c05e788 9949f77 0f93e9d d0189b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 |
import gradio as gr
import os
from huggingface_hub import InferenceClient
from config.constants import DEFAULT_SYSTEM_MESSAGE
from config.settings import HF_TOKEN, MODEL_CONFIG, EMBEDDING_MODEL
from src.knowledge_base.vector_store import create_vector_store, load_vector_store
from web.training_interface import (
get_models_df,
generate_chat_analysis,
register_model_action,
start_finetune_action
)
if not HF_TOKEN:
raise ValueError("HUGGINGFACE_TOKEN not found in environment variables")
# Initialize HF client with token
client = InferenceClient(
MODEL_CONFIG["id"],
token=HF_TOKEN
)
# State for storing context
context_store = {}
def get_context(message, conversation_id):
"""Get context from knowledge base"""
vector_store = load_vector_store()
if vector_store is None:
print("Knowledge base not found or failed to load")
return ""
# Check if vector_store is a string (error message) instead of an actual store
if isinstance(vector_store, str):
print(f"Error with vector store: {vector_store}")
return ""
try:
# Extract context
context_docs = vector_store.similarity_search(message, k=3)
context_text = "\n\n".join([f"From {doc.metadata.get('source', 'unknown')}: {doc.page_content}" for doc in context_docs])
# Save context for this conversation
context_store[conversation_id] = context_text
return context_text
except Exception as e:
print(f"Error getting context: {str(e)}")
return ""
def load_vector_store():
"""Load knowledge base from dataset"""
try:
from src.knowledge_base.dataset import DatasetManager
print("Debug - Attempting to load vector store...")
dataset = DatasetManager()
success, result = dataset.download_vector_store()
print(f"Debug - Download result: success={success}, result_type={type(result)}")
if success:
if isinstance(result, str):
print(f"Debug - Error message received: {result}")
return None
return result
else:
print(f"Debug - Failed to load vector store: {result}")
return None
except Exception as e:
import traceback
print(f"Exception loading knowledge base: {str(e)}")
print(traceback.format_exc())
return None
def respond(
message,
history,
conversation_id,
system_message,
max_tokens,
temperature,
top_p,
):
# Create ID for new conversation
if not conversation_id:
import uuid
conversation_id = str(uuid.uuid4())
# Get context from knowledge base
context = get_context(message, conversation_id)
# Convert history from Gradio format to OpenAI format
messages = [{"role": "system", "content": system_message}]
if context:
messages[0]["content"] += f"\n\nContext for response:\n{context}"
# Debug: print the history format
print("Debug - Processing history format:", history)
# Convert history to OpenAI format for API call
if history:
try:
for item in history:
# Check if we have a pair of messages as expected
if len(item) == 2:
user_msg, assistant_msg = item
# Add user message
messages.append({"role": "user", "content": user_msg})
# Add assistant message
messages.append({"role": "assistant", "content": assistant_msg})
except Exception as e:
print(f"Error processing history: {str(e)}")
# Continue with empty history if there was an error
# Add current user message
messages.append({"role": "user", "content": message})
# Debug: print API messages
print("Debug - API messages:", messages)
# Send API request and stream response
response = ""
is_complete = False
try:
# Non-streaming version for debugging
full_response = client.chat_completion(
messages,
max_tokens=max_tokens,
stream=False,
temperature=temperature,
top_p=top_p,
)
response = full_response.choices[0].message.content
print(f"Debug - Full response from API: {response}")
# Return complete response immediately
final_history = history.copy() if history else []
final_history.append((message, response))
yield final_history, conversation_id
except Exception as e:
print(f"Debug - Error during API call: {str(e)}")
error_history = history.copy() if history else []
error_history.append((message, f"An error occurred: {str(e)}"))
yield error_history, conversation_id
def update_kb():
"""Function to update existing knowledge base with new documents"""
try:
success, message = create_vector_store(mode="update")
return message
except Exception as e:
return f"Error updating knowledge base: {str(e)}"
def rebuild_kb():
"""Function to create knowledge base from scratch"""
try:
success, message = create_vector_store(mode="rebuild")
return message
except Exception as e:
return f"Error creating knowledge base: {str(e)}"
def respond_and_clear(message, history, conversation_id):
"""Handle chat message and clear input"""
# Get model parameters from config
max_tokens = MODEL_CONFIG['parameters']['max_length']
temperature = MODEL_CONFIG['parameters']['temperature']
top_p = MODEL_CONFIG['parameters']['top_p']
# Print debug information to help diagnose the issue
print("Debug - Message type:", type(message), "Content:", message)
print("Debug - History type:", type(history), "Content:", history)
try:
# Get response generator
response_generator = respond(
message=message,
history=history if history else [],
conversation_id=conversation_id,
system_message=DEFAULT_SYSTEM_MESSAGE,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
# Get first response from generator
new_history, conv_id = next(response_generator)
# Debug the response
print("Debug - Final history:", new_history)
return new_history, conv_id, "" # Clear message input
except Exception as e:
print(f"Error in respond_and_clear: {str(e)}")
return history + [(message, f"An error occurred: {str(e)}")], conversation_id, ""
# Create interface
with gr.Blocks() as demo:
with gr.Tabs():
with gr.Tab("Chat"):
gr.Markdown("# βοΈ Status Law Assistant")
conversation_id = gr.State(None)
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot(
label="Chat",
avatar_images=["user.png", "assistant.png"]
)
with gr.Row():
msg = gr.Textbox(
label="Your question",
placeholder="Enter your question...",
scale=4
)
submit_btn = gr.Button("Send", variant="primary")
clear_btn = gr.Button("Clear") # Add clear button
with gr.Column(scale=1):
gr.Markdown("### Knowledge Base Management")
gr.Markdown("""
- **Update**: Add new documents to existing base
- **Rebuild**: Create new base from scratch
""")
with gr.Row():
update_kb_btn = gr.Button("π Update Base", variant="secondary", scale=1)
rebuild_kb_btn = gr.Button("π Rebuild Base", variant="primary", scale=1)
kb_status = gr.Textbox(
label="Status",
placeholder="Knowledge base status will appear here...",
interactive=False
)
submit_btn.click(
respond_and_clear,
[msg, chatbot, conversation_id],
[chatbot, conversation_id, msg]
)
update_kb_btn.click(update_kb, None, kb_status)
rebuild_kb_btn.click(rebuild_kb, None, kb_status)
clear_btn.click(lambda: ([], None), None, [chatbot, conversation_id])
with gr.Tab("Model Settings"):
gr.Markdown("### Model Configuration")
with gr.Row():
with gr.Column(scale=2):
# Model Information
gr.Markdown(f"""
**Current Model:** {MODEL_CONFIG['name']}
**Model ID:** `{MODEL_CONFIG['id']}`
**Description:** {MODEL_CONFIG['description']}
**Type:** {MODEL_CONFIG['type']}
**Embeddings Model:** `{EMBEDDING_MODEL}`
*Used for vector store creation and similarity search*
""")
gr.Markdown("### Model Parameters")
with gr.Row():
max_length = gr.Slider(
minimum=1,
maximum=4096,
value=MODEL_CONFIG['parameters']['max_length'],
step=1,
label="Maximum Length",
interactive=False
)
temperature = gr.Slider(
minimum=0.1,
maximum=2.0,
value=MODEL_CONFIG['parameters']['temperature'],
step=0.1,
label="Temperature",
interactive=False
)
with gr.Row():
top_p = gr.Slider(
minimum=0.1,
maximum=1.0,
value=MODEL_CONFIG['parameters']['top_p'],
step=0.1,
label="Top-p",
interactive=False
)
rep_penalty = gr.Slider(
minimum=1.0,
maximum=2.0,
value=MODEL_CONFIG['parameters']['repetition_penalty'],
step=0.1,
label="Repetition Penalty",
interactive=False
)
gr.Markdown("""
<small>
**Parameters explanation:**
- **Maximum Length**: Maximum number of tokens in the generated response
- **Temperature**: Controls randomness (0.1 = very focused, 2.0 = very creative)
- **Top-p**: Controls diversity via nucleus sampling (lower = more focused)
- **Repetition Penalty**: Prevents word repetition (higher = less repetition)
</small>
""")
with gr.Column(scale=1):
gr.Markdown("### Training Configuration")
gr.Markdown(f"""
**Base Model Path:**
```
{MODEL_CONFIG['training']['base_model_path']}
```
**Fine-tuned Model Path:**
```
{MODEL_CONFIG['training']['fine_tuned_path']}
```
**LoRA Configuration:**
- Rank (r): {MODEL_CONFIG['training']['lora_config']['r']}
- Alpha: {MODEL_CONFIG['training']['lora_config']['lora_alpha']}
- Dropout: {MODEL_CONFIG['training']['lora_config']['lora_dropout']}
""")
with gr.Tab("Model Training"):
gr.Markdown("### Model Training Interface")
with gr.Row():
with gr.Column():
epochs = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Number of Epochs")
batch_size = gr.Slider(minimum=1, maximum=32, value=4, step=1, label="Batch Size")
learning_rate = gr.Slider(minimum=1e-6, maximum=1e-3, value=2e-4, label="Learning Rate")
train_btn = gr.Button("Start Training", variant="primary")
training_output = gr.Textbox(label="Training Status", interactive=False)
with gr.Column():
analysis_btn = gr.Button("Generate Chat Analysis")
analysis_output = gr.Markdown()
train_btn.click(
start_finetune_action,
inputs=[epochs, batch_size, learning_rate],
outputs=[training_output]
)
analysis_btn.click(
generate_chat_analysis,
inputs=[],
outputs=[analysis_output]
)
# Launch application
if __name__ == "__main__":
# Check knowledge base availability in dataset
if not load_vector_store():
print("Knowledge base not found. Please create it through the interface.")
demo.launch() |