File size: 3,124 Bytes
c2ea5ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
export interface ModelConfig {
  id: string;
  name: string;
  description: string;
  category: "standard" | "reasoning";
  recommended?: boolean;
  speed: "fast" | "medium" | "slow";
  contextWindow: string;
  costLevel: "low" | "medium" | "high";
}

export const AVAILABLE_MODELS = {
  standard: [
    {
      id: "gpt-4o-mini",
      name: "GPT-4o Mini",
      description: "Cost-effective model for most tasks",
      category: "standard" as const,
      recommended: true,
      speed: "fast" as const,
      contextWindow: "128K",
      costLevel: "low" as const,
    },
    {
      id: "gpt-4o",
      name: "GPT-4o",
      description: "Balanced performance and capabilities",
      category: "standard" as const,
      speed: "medium" as const,
      contextWindow: "128K",
      costLevel: "medium" as const,
    },
    {
      id: "gpt-4.1-nano",
      name: "GPT-4.1 Nano",
      description: "Fastest and most cost-effective latest model",
      category: "standard" as const,
      speed: "fast" as const,
      contextWindow: "1M",
      costLevel: "low" as const,
    },
    {
      id: "gpt-4.1-mini",
      name: "GPT-4.1 Mini",
      description: "Balanced performance with large context",
      category: "standard" as const,
      speed: "medium" as const,
      contextWindow: "1M",
      costLevel: "medium" as const,
    },
    {
      id: "gpt-4.1",
      name: "GPT-4.1",
      description: "Latest flagship model with 1M token context",
      category: "standard" as const,
      speed: "medium" as const,
      contextWindow: "1M",
      costLevel: "high" as const,
    },
  ],
  reasoning: [
    {
      id: "o4-mini",
      name: "o4-mini",
      description: "Latest reasoning model for complex analysis",
      category: "reasoning" as const,
      speed: "slow" as const,
      contextWindow: "200K",
      costLevel: "medium" as const,
    },
    {
      id: "o3",
      name: "o3",
      description: "Advanced reasoning for complex problems",
      category: "reasoning" as const,
      speed: "slow" as const,
      contextWindow: "128K",
      costLevel: "high" as const,
    },
    {
      id: "o1",
      name: "o1",
      description: "Original reasoning model",
      category: "reasoning" as const,
      speed: "slow" as const,
      contextWindow: "128K",
      costLevel: "high" as const,
    },
  ],
};

export const getModelById = (modelId: string): ModelConfig | undefined => {
  const allModels = [
    ...AVAILABLE_MODELS.standard,
    ...AVAILABLE_MODELS.reasoning,
  ];
  return allModels.find((model) => model.id === modelId);
};

export const getDefaultModel = (): string => "gpt-4o-mini";

export function getCurrentModelName(): string {
  try {
    const savedModel = localStorage.getItem("agentgraph_model_preference");
    if (savedModel && getModelById(savedModel)) {
      return getModelById(savedModel)?.name || "Unknown Model";
    }
    return getModelById(getDefaultModel())?.name || "Unknown Model";
  } catch (error) {
    console.warn("Failed to get current model name:", error);
    return getModelById(getDefaultModel())?.name || "Unknown Model";
  }
}