kofdai commited on
Commit
a959d93
·
1 Parent(s): f8448b0

Add model selection feature - allows switching between different HuggingFace models

Browse files
Files changed (1) hide show
  1. app.py +26 -4
app.py CHANGED
@@ -29,6 +29,15 @@ DOMAINS = {
29
  "general": {"name": "General", "model": "mistralai/Mistral-7B-Instruct-v0.3", "icon": "🌐"},
30
  }
31
 
 
 
 
 
 
 
 
 
 
32
  # 検証マークの状態管理(デモ用)
33
  verification_store = {}
34
 
@@ -69,7 +78,8 @@ def generate_response(
69
  temperature: float = 0.7,
70
  max_tokens: int = 1024,
71
  is_expert: bool = False,
72
- expert_name: str = ""
 
73
  ) -> tuple:
74
  """
75
  質問に対する回答を生成
@@ -81,7 +91,8 @@ def generate_response(
81
  return "Please enter a question.", "", 0.0, "none"
82
 
83
  domain_info = DOMAINS.get(domain, DOMAINS["general"])
84
- model_name = domain_info["model"]
 
85
  system_prompt = get_system_prompt(domain)
86
 
87
  # プロンプト構築
@@ -186,6 +197,13 @@ with gr.Blocks(
186
  )
187
 
188
  with gr.Accordion("Advanced Settings", open=False):
 
 
 
 
 
 
 
189
  temperature_slider = gr.Slider(
190
  minimum=0.0,
191
  maximum=1.0,
@@ -238,9 +256,12 @@ with gr.Blocks(
238
  )
239
 
240
  # Event handlers
241
- def process_question(question, domain, temp, max_tok, is_expert, expert_name):
 
 
 
242
  answer, thinking, confidence, status = generate_response(
243
- question, domain, temp, max_tok, is_expert, expert_name
244
  )
245
  badge_html = format_verification_badge(status, expert_name if is_expert else "")
246
  return answer, thinking, confidence, badge_html
@@ -250,6 +271,7 @@ with gr.Blocks(
250
  inputs=[
251
  question_input,
252
  domain_dropdown,
 
253
  temperature_slider,
254
  max_tokens_slider,
255
  is_expert_checkbox,
 
29
  "general": {"name": "General", "model": "mistralai/Mistral-7B-Instruct-v0.3", "icon": "🌐"},
30
  }
31
 
32
+ # 利用可能なモデル
33
+ MODELS = {
34
+ "DeepSeek R1 7B": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
35
+ "Qwen2.5 Coder 7B": "Qwen/Qwen2.5-Coder-7B-Instruct",
36
+ "Mistral 7B": "mistralai/Mistral-7B-Instruct-v0.3",
37
+ "Llama 3.1 8B": "meta-llama/Llama-3.1-8B-Instruct",
38
+ "Gemma 2 9B": "google/gemma-2-9b-it",
39
+ }
40
+
41
  # 検証マークの状態管理(デモ用)
42
  verification_store = {}
43
 
 
78
  temperature: float = 0.7,
79
  max_tokens: int = 1024,
80
  is_expert: bool = False,
81
+ expert_name: str = "",
82
+ custom_model: str = None
83
  ) -> tuple:
84
  """
85
  質問に対する回答を生成
 
91
  return "Please enter a question.", "", 0.0, "none"
92
 
93
  domain_info = DOMAINS.get(domain, DOMAINS["general"])
94
+ # カスタムモデルが指定されていればそれを使用
95
+ model_name = MODELS.get(custom_model, domain_info["model"]) if custom_model else domain_info["model"]
96
  system_prompt = get_system_prompt(domain)
97
 
98
  # プロンプト構築
 
197
  )
198
 
199
  with gr.Accordion("Advanced Settings", open=False):
200
+ model_dropdown = gr.Dropdown(
201
+ choices=["Auto (Best for Domain)"] + list(MODELS.keys()),
202
+ value="Auto (Best for Domain)",
203
+ label="Model Selection",
204
+ info="Choose a specific model or use Auto for domain-optimized selection"
205
+ )
206
+
207
  temperature_slider = gr.Slider(
208
  minimum=0.0,
209
  maximum=1.0,
 
256
  )
257
 
258
  # Event handlers
259
+ def process_question(question, domain, model_choice, temp, max_tok, is_expert, expert_name):
260
+ # "Auto"が選択された場合はNoneを渡す
261
+ custom_model = None if model_choice == "Auto (Best for Domain)" else model_choice
262
+
263
  answer, thinking, confidence, status = generate_response(
264
+ question, domain, temp, max_tok, is_expert, expert_name, custom_model
265
  )
266
  badge_html = format_verification_badge(status, expert_name if is_expert else "")
267
  return answer, thinking, confidence, badge_html
 
271
  inputs=[
272
  question_input,
273
  domain_dropdown,
274
+ model_dropdown,
275
  temperature_slider,
276
  max_tokens_slider,
277
  is_expert_checkbox,