Rulga commited on
Commit
acdfff5
·
1 Parent(s): 7d91871

No code changes made; skipping commit.

Browse files
Files changed (1) hide show
  1. app.py +245 -22
app.py CHANGED
@@ -26,17 +26,112 @@ from web.training_interface import (
26
  if not HF_TOKEN:
27
  raise ValueError("HUGGINGFACE_TOKEN not found in environment variables")
28
 
29
- # Initialize HF client with token
30
- client = InferenceClient(
31
- ACTIVE_MODEL["id"],
32
- token=HF_TOKEN
33
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- # State for storing context and chat history
 
36
  context_store = {}
37
 
38
  print(f"Chat histories will be saved to: {CHAT_HISTORY_PATH}")
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  def get_context(message, conversation_id):
41
  """Get context from knowledge base"""
42
  vector_store = load_vector_store()
@@ -220,7 +315,7 @@ def save_chat_history(history, conversation_id):
220
  def respond_and_clear(message, history, conversation_id):
221
  """Handle chat message and clear input"""
222
  # Get model parameters from config
223
- max_tokens = ACTIVE_MODEL['parameters']['max_length'] # используем ACTIVE_MODEL вместо MODEL_CONFIG
224
  temperature = ACTIVE_MODEL['parameters']['temperature']
225
  top_p = ACTIVE_MODEL['parameters']['top_p']
226
 
@@ -261,7 +356,6 @@ def respond_and_clear(message, history, conversation_id):
261
 
262
  return error_history, conversation_id, ""
263
 
264
- # Функции для обновления информации о модели
265
  def update_model_info(model_key):
266
  """Update model information display"""
267
  model = MODELS[model_key]
@@ -275,9 +369,40 @@ def update_model_info(model_key):
275
  **Type:** {model['type']}
276
  """
277
 
278
- # Функция для смены модели
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  def change_model(model_key):
280
- """Change active model"""
281
  global client, ACTIVE_MODEL
282
 
283
  try:
@@ -290,9 +415,79 @@ def change_model(model_key):
290
  token=HF_TOKEN
291
  )
292
 
293
- return update_model_info(model_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
  except Exception as e:
295
- return f"Error changing model: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
  # Create interface
298
  with gr.Blocks() as demo:
@@ -357,15 +552,23 @@ with gr.Blocks() as demo:
357
  # Add model selector
358
  model_selector = gr.Dropdown(
359
  choices=list(MODELS.keys()),
360
- value=DEFAULT_MODEL,
361
  label="Select Model",
362
  interactive=True
363
  )
364
 
365
  # Current model info display
366
- model_info = gr.Markdown(value=update_model_info(DEFAULT_MODEL))
367
 
368
- # Model Parameters
 
 
 
 
 
 
 
 
369
  with gr.Row():
370
  max_length = gr.Slider(
371
  minimum=1,
@@ -373,7 +576,7 @@ with gr.Blocks() as demo:
373
  value=ACTIVE_MODEL['parameters']['max_length'],
374
  step=1,
375
  label="Maximum Length",
376
- interactive=False
377
  )
378
  temperature = gr.Slider(
379
  minimum=0.1,
@@ -381,7 +584,7 @@ with gr.Blocks() as demo:
381
  value=ACTIVE_MODEL['parameters']['temperature'],
382
  step=0.1,
383
  label="Temperature",
384
- interactive=False
385
  )
386
  with gr.Row():
387
  top_p = gr.Slider(
@@ -390,7 +593,7 @@ with gr.Blocks() as demo:
390
  value=ACTIVE_MODEL['parameters']['top_p'],
391
  step=0.1,
392
  label="Top-p",
393
- interactive=False
394
  )
395
  rep_penalty = gr.Slider(
396
  minimum=1.0,
@@ -398,8 +601,11 @@ with gr.Blocks() as demo:
398
  value=ACTIVE_MODEL['parameters']['repetition_penalty'],
399
  step=0.1,
400
  label="Repetition Penalty",
401
- interactive=False
402
  )
 
 
 
403
 
404
  gr.Markdown("""
405
  <small>
@@ -412,6 +618,9 @@ with gr.Blocks() as demo:
412
  """)
413
 
414
  with gr.Column(scale=1):
 
 
 
415
  gr.Markdown("### Training Configuration")
416
  gr.Markdown(f"""
417
  **Base Model Path:**
@@ -456,11 +665,25 @@ with gr.Blocks() as demo:
456
  outputs=[analysis_output]
457
  )
458
 
459
- # ПЕРЕМЕЩЕНО ВНУТРЬ БЛОКА: Добавляем обработчик события изменения модели
460
  model_selector.change(
461
  fn=change_model,
462
  inputs=[model_selector],
463
- outputs=[model_info]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464
  )
465
 
466
  # Launch application
@@ -469,4 +692,4 @@ if __name__ == "__main__":
469
  if not load_vector_store():
470
  print("Knowledge base not found. Please create it through the interface.")
471
 
472
- demo.launch()
 
26
  if not HF_TOKEN:
27
  raise ValueError("HUGGINGFACE_TOKEN not found in environment variables")
28
 
29
+ # Extended model information
30
+ MODEL_DETAILS = {
31
+ "llama-7b": {
32
+ "full_name": "Meta Llama 2 7B Chat",
33
+ "capabilities": [
34
+ "Multilingual (supports Russian, English and other languages)",
35
+ "Good performance on legal texts",
36
+ "Free model with open license",
37
+ "Easy to run on computers with 16GB+ RAM"
38
+ ],
39
+ "limitations": [
40
+ "Limited knowledge of specific legal terminology",
41
+ "May give incorrect answers to complex legal questions",
42
+ "Knowledge limited by training data"
43
+ ],
44
+ "use_cases": [
45
+ "Legal document analysis",
46
+ "Answering general legal questions",
47
+ "Legal knowledge base search",
48
+ "Document drafting assistance"
49
+ ],
50
+ "documentation": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
51
+ },
52
+ "zephyr-7b": {
53
+ "full_name": "HuggingFaceH4 Zephyr 7B Beta",
54
+ "capabilities": [
55
+ "High performance on instruction tasks",
56
+ "Good response accuracy",
57
+ "Advanced reasoning",
58
+ "Excellent text generation quality"
59
+ ],
60
+ "limitations": [
61
+ "May require API payment for usage",
62
+ "Limited support for languages other than English",
63
+ "Less optimization for legal topics than specialized models"
64
+ ],
65
+ "use_cases": [
66
+ "Complex legal reasoning",
67
+ "Case law analysis",
68
+ "Legislative research",
69
+ "Structured legal text generation"
70
+ ],
71
+ "documentation": "https://huggingface.co/HuggingFaceH4/zephyr-7b-beta"
72
+ }
73
+ }
74
+
75
+ # Путь к файлу с пользовательскими настройками
76
+ USER_PREFERENCES_PATH = os.path.join(os.path.dirname(__file__), "user_preferences.json")
77
 
78
+ # Глобальные переменные
79
+ client = None
80
  context_store = {}
81
 
82
  print(f"Chat histories will be saved to: {CHAT_HISTORY_PATH}")
83
 
84
+ def load_user_preferences():
85
+ """Load user preferences from file"""
86
+ try:
87
+ if os.path.exists(USER_PREFERENCES_PATH):
88
+ with open(USER_PREFERENCES_PATH, 'r') as f:
89
+ return json.load(f)
90
+ return {
91
+ "selected_model": DEFAULT_MODEL,
92
+ "parameters": {}
93
+ }
94
+ except Exception as e:
95
+ print(f"Error loading user preferences: {str(e)}")
96
+ return {
97
+ "selected_model": DEFAULT_MODEL,
98
+ "parameters": {}
99
+ }
100
+
101
+ def save_user_preferences(model_key, parameters=None):
102
+ """Save user preferences to file"""
103
+ try:
104
+ preferences = load_user_preferences()
105
+ preferences["selected_model"] = model_key
106
+
107
+ # Update parameters if provided
108
+ if parameters:
109
+ if model_key not in preferences["parameters"]:
110
+ preferences["parameters"][model_key] = {}
111
+
112
+ preferences["parameters"][model_key] = parameters
113
+
114
+ with open(USER_PREFERENCES_PATH, 'w') as f:
115
+ json.dump(preferences, f, indent=2)
116
+
117
+ print(f"User preferences saved successfully!")
118
+ return True
119
+ except Exception as e:
120
+ print(f"Error saving user preferences: {str(e)}")
121
+ return False
122
+
123
+ def initialize_client(model_id=None):
124
+ """Initialize or reinitialize the client with the specified model"""
125
+ global client
126
+ if model_id is None:
127
+ model_id = ACTIVE_MODEL["id"]
128
+
129
+ client = InferenceClient(
130
+ model_id,
131
+ token=HF_TOKEN
132
+ )
133
+ return client
134
+
135
  def get_context(message, conversation_id):
136
  """Get context from knowledge base"""
137
  vector_store = load_vector_store()
 
315
  def respond_and_clear(message, history, conversation_id):
316
  """Handle chat message and clear input"""
317
  # Get model parameters from config
318
+ max_tokens = ACTIVE_MODEL['parameters']['max_length']
319
  temperature = ACTIVE_MODEL['parameters']['temperature']
320
  top_p = ACTIVE_MODEL['parameters']['top_p']
321
 
 
356
 
357
  return error_history, conversation_id, ""
358
 
 
359
  def update_model_info(model_key):
360
  """Update model information display"""
361
  model = MODELS[model_key]
 
369
  **Type:** {model['type']}
370
  """
371
 
372
+ def get_model_details_html(model_key):
373
+ """Get detailed HTML for model information panel"""
374
+ if model_key not in MODEL_DETAILS:
375
+ return "<p>Информация о модели недоступна</p>"
376
+
377
+ details = MODEL_DETAILS[model_key]
378
+
379
+ html = f"""
380
+ <div style="padding: 15px; border: 1px solid #ccc; border-radius: 5px; margin-top: 10px;">
381
+ <h3>{details['full_name']}</h3>
382
+
383
+ <h4>Возможности:</h4>
384
+ <ul>
385
+ {"".join([f"<li>{cap}</li>" for cap in details['capabilities']])}
386
+ </ul>
387
+
388
+ <h4>Ограничения:</h4>
389
+ <ul>
390
+ {"".join([f"<li>{lim}</li>" for lim in details['limitations']])}
391
+ </ul>
392
+
393
+ <h4>Рекомендуемое использование:</h4>
394
+ <ul>
395
+ {"".join([f"<li>{use}</li>" for use in details['use_cases']])}
396
+ </ul>
397
+
398
+ <p><a href="{details['documentation']}" target="_blank">Документация модели</a></p>
399
+ </div>
400
+ """
401
+
402
+ return html
403
+
404
  def change_model(model_key):
405
+ """Change active model and update parameters"""
406
  global client, ACTIVE_MODEL
407
 
408
  try:
 
415
  token=HF_TOKEN
416
  )
417
 
418
+ # Сохраняем выбранную модель в предпочтениях
419
+ save_user_preferences(model_key)
420
+
421
+ # Return both model info and updated parameters
422
+ return (
423
+ update_model_info(model_key),
424
+ ACTIVE_MODEL['parameters']['max_length'],
425
+ ACTIVE_MODEL['parameters']['temperature'],
426
+ ACTIVE_MODEL['parameters']['top_p'],
427
+ ACTIVE_MODEL['parameters']['repetition_penalty'],
428
+ f"Model changed to {ACTIVE_MODEL['name']}"
429
+ )
430
+ except Exception as e:
431
+ return (
432
+ f"Error changing model: {str(e)}",
433
+ 2048, 0.7, 0.9, 1.1,
434
+ f"Error: {str(e)}"
435
+ )
436
+
437
+ def save_parameters(model_key, max_len, temp, top_p_val, rep_pen):
438
+ """Save user-defined parameters to active model"""
439
+ global ACTIVE_MODEL
440
+
441
+ try:
442
+ # Update parameters
443
+ ACTIVE_MODEL['parameters']['max_length'] = max_len
444
+ ACTIVE_MODEL['parameters']['temperature'] = temp
445
+ ACTIVE_MODEL['parameters']['top_p'] = top_p_val
446
+ ACTIVE_MODEL['parameters']['repetition_penalty'] = rep_pen
447
+
448
+ # Сохраняем параметры в предпочтениях
449
+ params = {
450
+ 'max_length': max_len,
451
+ 'temperature': temp,
452
+ 'top_p': top_p_val,
453
+ 'repetition_penalty': rep_pen
454
+ }
455
+ save_user_preferences(model_key, params)
456
+
457
+ return "Parameters saved successfully!"
458
  except Exception as e:
459
+ return f"Error saving parameters: {str(e)}"
460
+
461
+ def initialize_app():
462
+ """Initialize app with user preferences"""
463
+ global client, ACTIVE_MODEL
464
+
465
+ preferences = load_user_preferences()
466
+ selected_model = preferences.get("selected_model", DEFAULT_MODEL)
467
+
468
+ # Убедиться, что выбранная модель существует
469
+ if selected_model not in MODELS:
470
+ selected_model = DEFAULT_MODEL
471
+
472
+ # Установить активную модель
473
+ ACTIVE_MODEL = MODELS[selected_model]
474
+
475
+ # Загрузить сохраненные параметры, если они есть
476
+ saved_params = preferences.get("parameters", {}).get(selected_model)
477
+ if saved_params:
478
+ ACTIVE_MODEL['parameters'].update(saved_params)
479
+
480
+ # Инициализировать клиент
481
+ client = InferenceClient(
482
+ ACTIVE_MODEL["id"],
483
+ token=HF_TOKEN
484
+ )
485
+
486
+ print(f"App initialized with model: {ACTIVE_MODEL['name']}")
487
+ return selected_model
488
+
489
+ # Initialize HF client with token at startup
490
+ selected_model = initialize_app()
491
 
492
  # Create interface
493
  with gr.Blocks() as demo:
 
552
  # Add model selector
553
  model_selector = gr.Dropdown(
554
  choices=list(MODELS.keys()),
555
+ value=selected_model, # Use loaded model from preferences
556
  label="Select Model",
557
  interactive=True
558
  )
559
 
560
  # Current model info display
561
+ model_info = gr.Markdown(value=update_model_info(selected_model))
562
 
563
+ # Status indicator for model loading
564
+ model_loading = gr.Textbox(
565
+ label="Status",
566
+ placeholder="Model ready",
567
+ interactive=False,
568
+ value="Model ready"
569
+ )
570
+
571
+ # Model Parameters - make them interactive
572
  with gr.Row():
573
  max_length = gr.Slider(
574
  minimum=1,
 
576
  value=ACTIVE_MODEL['parameters']['max_length'],
577
  step=1,
578
  label="Maximum Length",
579
+ interactive=True
580
  )
581
  temperature = gr.Slider(
582
  minimum=0.1,
 
584
  value=ACTIVE_MODEL['parameters']['temperature'],
585
  step=0.1,
586
  label="Temperature",
587
+ interactive=True
588
  )
589
  with gr.Row():
590
  top_p = gr.Slider(
 
593
  value=ACTIVE_MODEL['parameters']['top_p'],
594
  step=0.1,
595
  label="Top-p",
596
+ interactive=True
597
  )
598
  rep_penalty = gr.Slider(
599
  minimum=1.0,
 
601
  value=ACTIVE_MODEL['parameters']['repetition_penalty'],
602
  step=0.1,
603
  label="Repetition Penalty",
604
+ interactive=True
605
  )
606
+
607
+ # Save parameters button
608
+ save_params_btn = gr.Button("Save Parameters", variant="primary")
609
 
610
  gr.Markdown("""
611
  <small>
 
618
  """)
619
 
620
  with gr.Column(scale=1):
621
+ # Model details panel
622
+ model_details = gr.HTML(get_model_details_html(selected_model))
623
+
624
  gr.Markdown("### Training Configuration")
625
  gr.Markdown(f"""
626
  **Base Model Path:**
 
665
  outputs=[analysis_output]
666
  )
667
 
668
+ # Model change handler
669
  model_selector.change(
670
  fn=change_model,
671
  inputs=[model_selector],
672
+ outputs=[model_info, max_length, temperature, top_p, rep_penalty, model_loading]
673
+ )
674
+
675
+ # Update model details panel when model changes
676
+ model_selector.change(
677
+ fn=get_model_details_html,
678
+ inputs=[model_selector],
679
+ outputs=[model_details]
680
+ )
681
+
682
+ # Parameters save handler
683
+ save_params_btn.click(
684
+ fn=save_parameters,
685
+ inputs=[model_selector, max_length, temperature, top_p, rep_penalty],
686
+ outputs=[model_loading]
687
  )
688
 
689
  # Launch application
 
692
  if not load_vector_store():
693
  print("Knowledge base not found. Please create it through the interface.")
694
 
695
+ demo.launch()