Rulga commited on
Commit
2a3ccaf
·
1 Parent(s): 8792435

Refactor model configuration to use ACTIVE_MODEL for improved clarity and consistency

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -7,14 +7,13 @@ from huggingface_hub import InferenceClient
7
  from config.constants import DEFAULT_SYSTEM_MESSAGE
8
  from config.settings import (
9
  HF_TOKEN,
10
- MODEL_CONFIG,
 
11
  EMBEDDING_MODEL,
12
  DATASET_ID,
13
  CHAT_HISTORY_PATH,
14
  VECTOR_STORE_PATH,
15
- MODELS,
16
- DEFAULT_MODEL,
17
- ACTIVE_MODEL
18
  )
19
  from src.knowledge_base.vector_store import create_vector_store, load_vector_store
20
  from web.training_interface import (
@@ -29,7 +28,7 @@ if not HF_TOKEN:
29
 
30
  # Initialize HF client with token
31
  client = InferenceClient(
32
- MODEL_CONFIG["id"],
33
  token=HF_TOKEN
34
  )
35
 
@@ -221,9 +220,9 @@ def save_chat_history(history, conversation_id):
221
  def respond_and_clear(message, history, conversation_id):
222
  """Handle chat message and clear input"""
223
  # Get model parameters from config
224
- max_tokens = MODEL_CONFIG['parameters']['max_length']
225
- temperature = MODEL_CONFIG['parameters']['temperature']
226
- top_p = MODEL_CONFIG['parameters']['top_p']
227
 
228
  # Print debug information to help diagnose the issue
229
  print("Debug - Message type:", type(message), "Content:", message)
 
7
  from config.constants import DEFAULT_SYSTEM_MESSAGE
8
  from config.settings import (
9
  HF_TOKEN,
10
+ MODELS,
11
+ ACTIVE_MODEL,
12
  EMBEDDING_MODEL,
13
  DATASET_ID,
14
  CHAT_HISTORY_PATH,
15
  VECTOR_STORE_PATH,
16
+ DEFAULT_MODEL
 
 
17
  )
18
  from src.knowledge_base.vector_store import create_vector_store, load_vector_store
19
  from web.training_interface import (
 
28
 
29
  # Initialize HF client with token
30
  client = InferenceClient(
31
+ ACTIVE_MODEL["id"],
32
  token=HF_TOKEN
33
  )
34
 
 
220
  def respond_and_clear(message, history, conversation_id):
221
  """Handle chat message and clear input"""
222
  # Get model parameters from config
223
+ max_tokens = ACTIVE_MODEL['parameters']['max_length'] # используем ACTIVE_MODEL вместо MODEL_CONFIG
224
+ temperature = ACTIVE_MODEL['parameters']['temperature']
225
+ top_p = ACTIVE_MODEL['parameters']['top_p']
226
 
227
  # Print debug information to help diagnose the issue
228
  print("Debug - Message type:", type(message), "Content:", message)