BtB-ExpC commited on
Commit
4599cef
·
1 Parent(s): a11582b

SyntaxError :

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. config/llm_config.py +33 -0
app.py CHANGED
@@ -22,7 +22,7 @@ def update_exercise_format(selected_model: str):
22
 
23
 
24
  # Async wrappers for each chain.
25
- async def run_diagnoser(user_query: str, model_choice_validate: str, exercise_format_validate: str, sampling_count_validate: str) -> tuple
26
  """
27
  Diagnose exercise(s) in parallel using a configured DiagnoserChain.
28
 
 
22
 
23
 
24
  # Async wrappers for each chain.
25
+ async def run_diagnoser(user_query: str, model_choice_validate: str, exercise_format_validate: str, sampling_count_validate: str) -> tuple:
26
  """
27
  Diagnose exercise(s) in parallel using a configured DiagnoserChain.
28
 
config/llm_config.py CHANGED
@@ -32,6 +32,7 @@ def create_anthropic_llm(model_name: str, temperature: float):
32
  def create_deepseek_llm(model_name: str, temperature: float):
33
  return ChatAnthropic(api_key=ANTHROPIC_API_KEY, model_name=model_name, temperature=temperature)
34
 
 
35
  llms = {
36
  # OpenAI models with temperature
37
 
@@ -46,8 +47,40 @@ llms = {
46
 
47
  # OpenAI reasoning models (no temperature)
48
  "o1": create_openai_reasoning_llm("o1-2024-12-17"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  "o3-mini (high-reasoning version)": create_openai_reasoning_llm("o3-mini", reasoning_effort="high"),
50
 
 
 
51
  # Anthropic models (Claude)
52
  "Claude 3.5 (low temp)": create_anthropic_llm("claude-3-5-sonnet-latest", LOW),
53
  "Claude 3.5 (mid temp)": create_anthropic_llm("claude-3-5-sonnet-latest", MID),
 
32
  def create_deepseek_llm(model_name: str, temperature: float):
33
  return ChatAnthropic(api_key=ANTHROPIC_API_KEY, model_name=model_name, temperature=temperature)
34
 
35
+ # all of them in one dictionary
36
  llms = {
37
  # OpenAI models with temperature
38
 
 
47
 
48
  # OpenAI reasoning models (no temperature)
49
  "o1": create_openai_reasoning_llm("o1-2024-12-17"),
50
+ "o3-mini (low-reasoning effort version)": create_openai_reasoning_llm("o3-mini", reasoning_effort="low"),
51
+ "o3-mini (medium-reasoning effort version)": create_openai_reasoning_llm("o3-mini", reasoning_effort="medium"),
52
+ "o3-mini (high-reasoning effort version)": create_openai_reasoning_llm("o3-mini", reasoning_effort="high"),
53
+
54
+ # Anthropic models (Claude)
55
+ "Claude 3.5 (low temp)": create_anthropic_llm("claude-3-5-sonnet-latest", LOW),
56
+ "Claude 3.5 (mid temp)": create_anthropic_llm("claude-3-5-sonnet-latest", MID),
57
+ "Claude 3.5 (high temp)": create_anthropic_llm("claude-3-5-sonnet-latest", HIGH),
58
+
59
+ # DeepSeek
60
+ "Deepseek R1 (low temp)🚧": create_anthropic_llm("deepseek-reasoner", LOW),
61
+ }
62
+
63
+ # specific for Diagnosis tab
64
+ llms_diagnosis_tab = {
65
+ # OpenAI models with temperature
66
+
67
+ "GPT-4o (low temp)": create_openai_llm("gpt-4o", LOW),
68
+ "GPT-4o (mid temp)": create_openai_llm("gpt-4o", MID),
69
+ "GPT-4o (high temp)": create_openai_llm("gpt-4o", HIGH),
70
+ "GPT-4o-mini-zero": create_openai_llm("gpt-4o-mini", ZERO),
71
+ "GPT-4o-mini": create_openai_llm("gpt-4o-mini", LOW),
72
+ "GPT-4o_high_temp": create_openai_llm("gpt-4o", HIGH),
73
+ "GPT-4o-mini_high_temp": create_openai_llm("gpt-4o-mini", HIGH),
74
+ "GPT-4 Turbo": create_openai_llm("gpt-4-turbo-2024-04-09", LOW),
75
+
76
+ # OpenAI reasoning models (no temperature)
77
+ "o1": create_openai_reasoning_llm("o1-2024-12-17"),
78
+ "o3-mini (low-reasoning version)": create_openai_reasoning_llm("o3-mini", reasoning_effort="low"),
79
+ "o3-mini (medium-reasoning version)": create_openai_reasoning_llm("o3-mini", reasoning_effort="medium"),
80
  "o3-mini (high-reasoning version)": create_openai_reasoning_llm("o3-mini", reasoning_effort="high"),
81
 
82
+
83
+
84
  # Anthropic models (Claude)
85
  "Claude 3.5 (low temp)": create_anthropic_llm("claude-3-5-sonnet-latest", LOW),
86
  "Claude 3.5 (mid temp)": create_anthropic_llm("claude-3-5-sonnet-latest", MID),