icebear0828 Claude Opus 4.6 commited on
Commit
7824fcd
·
1 Parent(s): 5b8472d

feat: add gpt-5.4 + codex-spark support, matrix model selector, extended reasoning efforts

Browse files

- Add gpt-5.4 (4 efforts: minimal/low/medium/high) and gpt-5.3-codex-spark (minimal/low) to models.yaml
- Update codex alias to gpt-5.4
- Generalize isCodexModelId → isCodexCompatibleId to match bare gpt-X.Y models
- Add /v1/models/catalog endpoint returning full CodexModelInfo[] with reasoning efforts
- Replace flat model dropdown with family × reasoning effort matrix selector in dashboard
- Add reasoning_effort to code examples when non-default effort is selected
- Bump client to v26.305.950 (build 863)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

CHANGELOG.md CHANGED
@@ -8,6 +8,11 @@
8
 
9
  ### Added
10
 
 
 
 
 
 
11
  - Reasoning/Thinking 输出支持:始终向 Codex API 发送 `summary: "auto"` 以获取推理摘要事件;OpenAI 路由在客户端发送 `reasoning_effort` 时以 `reasoning_content` 输出;Anthropic 路由在客户端发送 `thinking.type: enabled/adaptive` 时以 thinking block 输出;未知 SSE 事件记录到 debug 日志以便发现新事件类型
12
  - 图片输入支持:OpenAI、Anthropic、Gemini 三种格式的图片内容现在可以正确透传到 Codex 后端(`input_image` + data URI),此前图片被静默丢弃
13
  - 每窗口使用量计数器:Dashboard 主显示当前窗口内的请求数和 Token 用量,累计总量降为次要灰色小字;窗口过期时自动归零(时间驱动,零 API 开销),后端同步作为双保险校正
 
8
 
9
  ### Added
10
 
11
+ - GPT-5.4 + Codex Spark 模型支持:新增 `gpt-5.4`(4 种 effort: minimal/low/medium/high)和 `gpt-5.3-codex-spark`(minimal/low),`codex` 别名更新为 `gpt-5.4`
12
+ - 扩展推理等级:支持 `minimal`、`xhigh` 等新 effort 值,客户端发送的任意 `reasoning_effort` 均透传到后端
13
+ - 模型家族矩阵选择器:Dashboard 模型选择从平面下拉改为家族列表 + 推理等级按钮组,通过 `/v1/models/catalog` 端点获取完整目录
14
+ - 泛化模型识别:`isCodexCompatibleId()` 同时匹配 `gpt-X.Y-codex-*` 和裸 `gpt-X.Y` 格式,确保新模型命名规范变化时自动接入
15
+ - 代码示例动态 reasoning_effort:CodeExamples 组件根据选中的推理等级自动插入 `reasoning_effort` 参数
16
  - Reasoning/Thinking 输出支持:始终向 Codex API 发送 `summary: "auto"` 以获取推理摘要事件;OpenAI 路由在客户端发送 `reasoning_effort` 时以 `reasoning_content` 输出;Anthropic 路由在客户端发送 `thinking.type: enabled/adaptive` 时以 thinking block 输出;未知 SSE 事件记录到 debug 日志以便发现新事件类型
17
  - 图片输入支持:OpenAI、Anthropic、Gemini 三种格式的图片内容现在可以正确透传到 Codex 后端(`input_image` + data URI),此前图片被静默丢弃
18
  - 每窗口使用量计数器:Dashboard 主显示当前窗口内的请求数和 Token 用量,累计总量降为次要灰色小字;窗口过期时自动归零(时间驱动,零 API 开销),后端同步作为双保险校正
config/default.yaml CHANGED
@@ -3,8 +3,8 @@ api:
3
  timeout_seconds: 60
4
  client:
5
  originator: Codex Desktop
6
- app_version: 26.303.1606
7
- build_number: "806"
8
  platform: darwin
9
  arch: arm64
10
  chromium_version: "144"
 
3
  timeout_seconds: 60
4
  client:
5
  originator: Codex Desktop
6
+ app_version: 26.305.950
7
+ build_number: "863"
8
  platform: darwin
9
  arch: arm64
10
  chromium_version: "144"
config/models.yaml CHANGED
@@ -7,6 +7,21 @@
7
  # Dynamic fetch merges with static; backend entries win for shared IDs.
8
 
9
  models:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # ── GPT-5.3 Codex family ──────────────────────────────────────────
11
  - id: gpt-5.3-codex
12
  displayName: GPT-5.3 Codex
@@ -79,6 +94,18 @@ models:
79
  supportsPersonality: false
80
  upgrade: null
81
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  # ── GPT-5.2 Codex family ──────────────────────────────────────────
83
  - id: gpt-5.2-codex
84
  displayName: GPT-5.2 Codex
@@ -298,4 +325,4 @@ models:
298
  upgrade: null
299
 
300
  aliases:
301
- codex: "gpt-5.2-codex"
 
7
  # Dynamic fetch merges with static; backend entries win for shared IDs.
8
 
9
  models:
10
+ # ── GPT-5.4 ──────────────────────────────────────────────────────
11
+ - id: gpt-5.4
12
+ displayName: GPT-5.4
13
+ description: Latest Codex flagship model
14
+ isDefault: false
15
+ supportedReasoningEfforts:
16
+ - { reasoningEffort: minimal, description: "Minimal reasoning" }
17
+ - { reasoningEffort: low, description: "Fastest responses" }
18
+ - { reasoningEffort: medium, description: "Balanced speed and quality" }
19
+ - { reasoningEffort: high, description: "Deepest reasoning" }
20
+ defaultReasoningEffort: medium
21
+ inputModalities: [text, image]
22
+ supportsPersonality: true
23
+ upgrade: null
24
+
25
  # ── GPT-5.3 Codex family ──────────────────────────────────────────
26
  - id: gpt-5.3-codex
27
  displayName: GPT-5.3 Codex
 
94
  supportsPersonality: false
95
  upgrade: null
96
 
97
+ - id: gpt-5.3-codex-spark
98
+ displayName: GPT-5.3 Codex Spark
99
+ description: GPT-5.3 Codex — ultra-lightweight, lowest latency
100
+ isDefault: false
101
+ supportedReasoningEfforts:
102
+ - { reasoningEffort: minimal, description: "Minimal reasoning" }
103
+ - { reasoningEffort: low, description: "Fastest responses" }
104
+ defaultReasoningEffort: low
105
+ inputModalities: [text]
106
+ supportsPersonality: false
107
+ upgrade: null
108
+
109
  # ── GPT-5.2 Codex family ──────────────────────────────────────────
110
  - id: gpt-5.2-codex
111
  displayName: GPT-5.2 Codex
 
325
  upgrade: null
326
 
327
  aliases:
328
+ codex: "gpt-5.4"
shared/hooks/use-status.ts CHANGED
@@ -1,13 +1,62 @@
1
- import { useState, useEffect, useCallback } from "preact/hooks";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  export function useStatus(accountCount: number) {
4
  const [baseUrl, setBaseUrl] = useState("Loading...");
5
  const [apiKey, setApiKey] = useState("Loading...");
6
  const [models, setModels] = useState<string[]>(["codex"]);
7
  const [selectedModel, setSelectedModel] = useState("codex");
 
 
8
 
9
  const loadModels = useCallback(async () => {
10
  try {
 
 
 
 
 
 
11
  const resp = await fetch("/v1/models");
12
  const data = await resp.json();
13
  const ids: string[] = data.data.map((m: { id: string }) => m.id);
@@ -37,5 +86,35 @@ export function useStatus(accountCount: number) {
37
  loadStatus();
38
  }, [loadModels, accountCount]);
39
 
40
- return { baseUrl, apiKey, models, selectedModel, setSelectedModel };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  }
 
1
+ import { useState, useEffect, useCallback, useMemo } from "preact/hooks";
2
+
3
+ export interface CatalogModel {
4
+ id: string;
5
+ displayName: string;
6
+ supportedReasoningEfforts: { reasoningEffort: string; description: string }[];
7
+ defaultReasoningEffort: string;
8
+ }
9
+
10
+ export interface ModelFamily {
11
+ id: string;
12
+ displayName: string;
13
+ efforts: { reasoningEffort: string; description: string }[];
14
+ defaultEffort: string;
15
+ }
16
+
17
+ /**
18
+ * Extract model family ID from a model ID.
19
+ * gpt-5.3-codex-high → gpt-5.3-codex
20
+ * gpt-5.3-codex-spark → gpt-5.3-codex-spark (spark is a distinct family)
21
+ * gpt-5.4 → gpt-5.4
22
+ */
23
+ function getFamilyId(id: string): string {
24
+ // Bare model: gpt-5.4
25
+ if (/^gpt-\d+(?:\.\d+)?$/.test(id)) return id;
26
+ // Spark family: gpt-X.Y-codex-spark
27
+ if (/^gpt-\d+(?:\.\d+)?-codex-spark$/.test(id)) return id;
28
+ // Mini family: gpt-X.Y-codex-mini
29
+ if (/^gpt-\d+(?:\.\d+)?-codex-mini$/.test(id)) return id;
30
+ // Codex base or tier variant (high/mid/low/max): family = gpt-X.Y-codex
31
+ const m = id.match(/^(gpt-\d+(?:\.\d+)?-codex)(?:-(?:high|mid|low|max))?$/);
32
+ if (m) return m[1];
33
+ // Legacy: gpt-5-codex, gpt-5-codex-mini
34
+ const legacy = id.match(/^(gpt-\d+-codex)(?:-(?:high|mid|low|max|mini))?$/);
35
+ if (legacy) return legacy[1];
36
+ return id;
37
+ }
38
+
39
+ /** Check if a model ID is a tier variant (not the base family model). */
40
+ function isTierVariant(id: string): boolean {
41
+ return /^gpt-\d+(?:\.\d+)?-codex-(?:high|mid|low|max)$/.test(id);
42
+ }
43
 
44
  export function useStatus(accountCount: number) {
45
  const [baseUrl, setBaseUrl] = useState("Loading...");
46
  const [apiKey, setApiKey] = useState("Loading...");
47
  const [models, setModels] = useState<string[]>(["codex"]);
48
  const [selectedModel, setSelectedModel] = useState("codex");
49
+ const [modelCatalog, setModelCatalog] = useState<CatalogModel[]>([]);
50
+ const [selectedEffort, setSelectedEffort] = useState("medium");
51
 
52
  const loadModels = useCallback(async () => {
53
  try {
54
+ // Fetch full catalog for effort info
55
+ const catalogResp = await fetch("/v1/models/catalog");
56
+ const catalogData: CatalogModel[] = await catalogResp.json();
57
+ setModelCatalog(catalogData);
58
+
59
+ // Also fetch model list (includes aliases)
60
  const resp = await fetch("/v1/models");
61
  const data = await resp.json();
62
  const ids: string[] = data.data.map((m: { id: string }) => m.id);
 
86
  loadStatus();
87
  }, [loadModels, accountCount]);
88
 
89
+ // Build model families group catalog by family, excluding tier variants
90
+ const modelFamilies = useMemo((): ModelFamily[] => {
91
+ if (modelCatalog.length === 0) return [];
92
+
93
+ const familyMap = new Map<string, ModelFamily>();
94
+ for (const m of modelCatalog) {
95
+ const fid = getFamilyId(m.id);
96
+ // Only use the base family model (not tier variants) to define the family
97
+ if (isTierVariant(m.id)) continue;
98
+ if (familyMap.has(fid)) continue;
99
+ familyMap.set(fid, {
100
+ id: fid,
101
+ displayName: m.displayName,
102
+ efforts: m.supportedReasoningEfforts,
103
+ defaultEffort: m.defaultReasoningEffort,
104
+ });
105
+ }
106
+ return [...familyMap.values()];
107
+ }, [modelCatalog]);
108
+
109
+ return {
110
+ baseUrl,
111
+ apiKey,
112
+ models,
113
+ selectedModel,
114
+ setSelectedModel,
115
+ selectedEffort,
116
+ setSelectedEffort,
117
+ modelFamilies,
118
+ modelCatalog,
119
+ };
120
  }
src/models/model-store.ts CHANGED
@@ -115,9 +115,11 @@ function normalizeBackendModel(raw: BackendModelEntry): NormalizedModelWithMeta
115
  };
116
  }
117
 
118
- /** Check if a model ID is a Codex model (gpt-X.Y-codex or gpt-X.Y-codex-tier). */
119
- function isCodexModelId(id: string): boolean {
120
- return /^gpt-\d+(\.\d+)?-codex/.test(id);
 
 
121
  }
122
 
123
  /**
@@ -137,7 +139,7 @@ export function applyBackendModels(backendModels: BackendModelEntry[]): void {
137
  const staticIds = new Set(_catalog.map((m) => m.id));
138
  const filtered = backendModels.filter((raw) => {
139
  const id = raw.slug ?? raw.id ?? raw.name ?? "";
140
- return staticIds.has(id) || isCodexModelId(id);
141
  });
142
 
143
  const staticMap = new Map(_catalog.map((m) => [m.id, m]));
 
115
  };
116
  }
117
 
118
+ /** Check if a model ID is Codex-compatible (gpt-X.Y-codex-* or bare gpt-X.Y). */
119
+ function isCodexCompatibleId(id: string): boolean {
120
+ if (/^gpt-\d+(\.\d+)?-codex/.test(id)) return true;
121
+ if (/^gpt-\d+(\.\d+)?$/.test(id)) return true;
122
+ return false;
123
  }
124
 
125
  /**
 
139
  const staticIds = new Set(_catalog.map((m) => m.id));
140
  const filtered = backendModels.filter((raw) => {
141
  const id = raw.slug ?? raw.id ?? raw.name ?? "";
142
+ return staticIds.has(id) || isCodexCompatibleId(id);
143
  });
144
 
145
  const staticMap = new Map(_catalog.map((m) => [m.id, m]));
src/routes/models.ts CHANGED
@@ -78,6 +78,11 @@ export function createModelRoutes(): Hono {
78
  });
79
  });
80
 
 
 
 
 
 
81
  // Extended endpoint: model details with reasoning efforts
82
  app.get("/v1/models/:modelId/info", (c) => {
83
  const modelId = c.req.param("modelId");
 
78
  });
79
  });
80
 
81
+ // Full catalog with reasoning efforts (for dashboard UI)
82
+ app.get("/v1/models/catalog", (c) => {
83
+ return c.json(getModelCatalog());
84
+ });
85
+
86
  // Extended endpoint: model details with reasoning efforts
87
  app.get("/v1/models/:modelId/info", (c) => {
88
  const modelId = c.req.param("modelId");
web/src/App.tsx CHANGED
@@ -39,6 +39,9 @@ function Dashboard() {
39
  models={status.models}
40
  selectedModel={status.selectedModel}
41
  onModelChange={status.setSelectedModel}
 
 
 
42
  />
43
  <AnthropicSetup
44
  apiKey={status.apiKey}
@@ -48,6 +51,7 @@ function Dashboard() {
48
  baseUrl={status.baseUrl}
49
  apiKey={status.apiKey}
50
  model={status.selectedModel}
 
51
  />
52
  </div>
53
  </main>
 
39
  models={status.models}
40
  selectedModel={status.selectedModel}
41
  onModelChange={status.setSelectedModel}
42
+ modelFamilies={status.modelFamilies}
43
+ selectedEffort={status.selectedEffort}
44
+ onEffortChange={status.setSelectedEffort}
45
  />
46
  <AnthropicSetup
47
  apiKey={status.apiKey}
 
51
  baseUrl={status.baseUrl}
52
  apiKey={status.apiKey}
53
  model={status.selectedModel}
54
+ reasoningEffort={status.selectedEffort}
55
  />
56
  </div>
57
  </main>
web/src/components/ApiConfig.tsx CHANGED
@@ -1,6 +1,7 @@
1
  import { useT } from "../../../shared/i18n/context";
2
  import { CopyButton } from "./CopyButton";
3
  import { useCallback } from "preact/hooks";
 
4
 
5
  interface ApiConfigProps {
6
  baseUrl: string;
@@ -8,14 +9,53 @@ interface ApiConfigProps {
8
  models: string[];
9
  selectedModel: string;
10
  onModelChange: (model: string) => void;
 
 
 
11
  }
12
 
13
- export function ApiConfig({ baseUrl, apiKey, models, selectedModel, onModelChange }: ApiConfigProps) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  const t = useT();
15
 
16
  const getBaseUrl = useCallback(() => baseUrl, [baseUrl]);
17
  const getApiKey = useCallback(() => apiKey, [apiKey]);
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  return (
20
  <section class="bg-white dark:bg-card-dark border border-gray-200 dark:border-border-dark rounded-xl p-5 shadow-sm transition-colors">
21
  <div class="flex items-center justify-between mb-6 border-b border-slate-100 dark:border-border-dark pb-4">
@@ -41,25 +81,65 @@ export function ApiConfig({ baseUrl, apiKey, models, selectedModel, onModelChang
41
  <CopyButton getText={getBaseUrl} class="absolute right-2" titleKey="copyUrl" />
42
  </div>
43
  </div>
44
- {/* Default Model */}
45
  <div class="space-y-1.5">
46
  <label class="text-xs font-semibold text-slate-700 dark:text-text-main">{t("defaultModel")}</label>
47
- <div class="relative">
48
- <select
49
- class="w-full appearance-none pl-3 pr-10 py-2.5 bg-white dark:bg-bg-dark border border-gray-200 dark:border-border-dark rounded-lg text-[0.78rem] text-slate-700 dark:text-text-main font-medium focus:ring-1 focus:ring-primary focus:border-primary outline-none cursor-pointer transition-colors"
50
- value={selectedModel}
51
- onChange={(e) => onModelChange((e.target as HTMLSelectElement).value)}
52
- >
53
- {models.map((m) => (
54
- <option key={m} value={m}>{m}</option>
55
- ))}
56
- </select>
57
- <div class="pointer-events-none absolute inset-y-0 right-0 flex items-center px-2 text-slate-500 dark:text-text-dim">
58
- <svg class="size-[18px]" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
59
- <path stroke-linecap="round" stroke-linejoin="round" d="M19.5 8.25l-7.5 7.5-7.5-7.5" />
60
- </svg>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  </div>
62
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  </div>
64
  {/* API Key */}
65
  <div class="space-y-1.5 md:col-span-2">
 
1
  import { useT } from "../../../shared/i18n/context";
2
  import { CopyButton } from "./CopyButton";
3
  import { useCallback } from "preact/hooks";
4
+ import type { ModelFamily } from "../../../shared/hooks/use-status";
5
 
6
  interface ApiConfigProps {
7
  baseUrl: string;
 
9
  models: string[];
10
  selectedModel: string;
11
  onModelChange: (model: string) => void;
12
+ modelFamilies: ModelFamily[];
13
+ selectedEffort: string;
14
+ onEffortChange: (effort: string) => void;
15
  }
16
 
17
+ const EFFORT_LABELS: Record<string, string> = {
18
+ none: "None",
19
+ minimal: "Minimal",
20
+ low: "Low",
21
+ medium: "Medium",
22
+ high: "High",
23
+ xhigh: "XHigh",
24
+ };
25
+
26
+ export function ApiConfig({
27
+ baseUrl,
28
+ apiKey,
29
+ models,
30
+ selectedModel,
31
+ onModelChange,
32
+ modelFamilies,
33
+ selectedEffort,
34
+ onEffortChange,
35
+ }: ApiConfigProps) {
36
  const t = useT();
37
 
38
  const getBaseUrl = useCallback(() => baseUrl, [baseUrl]);
39
  const getApiKey = useCallback(() => apiKey, [apiKey]);
40
 
41
+ // When a family is selected, update model + snap effort to default if current effort is unsupported
42
+ const handleFamilySelect = useCallback(
43
+ (family: ModelFamily) => {
44
+ onModelChange(family.id);
45
+ const supportedEfforts = family.efforts.map((e) => e.reasoningEffort);
46
+ if (!supportedEfforts.includes(selectedEffort)) {
47
+ onEffortChange(family.defaultEffort);
48
+ }
49
+ },
50
+ [onModelChange, onEffortChange, selectedEffort],
51
+ );
52
+
53
+ // Find the currently selected family's supported efforts
54
+ const currentFamily = modelFamilies.find((f) => f.id === selectedModel);
55
+ const currentEfforts = currentFamily?.efforts ?? [];
56
+
57
+ const showMatrix = modelFamilies.length > 0;
58
+
59
  return (
60
  <section class="bg-white dark:bg-card-dark border border-gray-200 dark:border-border-dark rounded-xl p-5 shadow-sm transition-colors">
61
  <div class="flex items-center justify-between mb-6 border-b border-slate-100 dark:border-border-dark pb-4">
 
81
  <CopyButton getText={getBaseUrl} class="absolute right-2" titleKey="copyUrl" />
82
  </div>
83
  </div>
84
+ {/* Model selector — matrix or flat fallback */}
85
  <div class="space-y-1.5">
86
  <label class="text-xs font-semibold text-slate-700 dark:text-text-main">{t("defaultModel")}</label>
87
+ {showMatrix ? (
88
+ <div class="border border-gray-200 dark:border-border-dark rounded-lg overflow-hidden">
89
+ {/* Model family list */}
90
+ <div class="max-h-[200px] overflow-y-auto">
91
+ {modelFamilies.map((f) => (
92
+ <button
93
+ key={f.id}
94
+ onClick={() => handleFamilySelect(f)}
95
+ class={`w-full text-left px-3 py-2 text-[0.78rem] font-medium border-b border-gray-100 dark:border-border-dark last:border-b-0 transition-colors ${
96
+ selectedModel === f.id
97
+ ? "bg-primary/10 text-primary dark:bg-primary/20"
98
+ : "text-slate-700 dark:text-text-main hover:bg-slate-50 dark:hover:bg-[#21262d]"
99
+ }`}
100
+ >
101
+ {f.displayName}
102
+ </button>
103
+ ))}
104
+ </div>
105
+ {/* Reasoning effort buttons for selected family */}
106
+ {currentEfforts.length > 1 && (
107
+ <div class="flex gap-1.5 p-2 bg-slate-50 dark:bg-bg-dark/50 border-t border-gray-200 dark:border-border-dark flex-wrap">
108
+ {currentEfforts.map((e) => (
109
+ <button
110
+ key={e.reasoningEffort}
111
+ onClick={() => onEffortChange(e.reasoningEffort)}
112
+ title={e.description}
113
+ class={`px-2.5 py-1 text-[0.7rem] font-semibold rounded transition-all ${
114
+ selectedEffort === e.reasoningEffort
115
+ ? "bg-primary text-white shadow-sm"
116
+ : "bg-white dark:bg-[#21262d] text-slate-600 dark:text-text-dim border border-gray-200 dark:border-border-dark hover:border-primary/50"
117
+ }`}
118
+ >
119
+ {EFFORT_LABELS[e.reasoningEffort] ?? e.reasoningEffort}
120
+ </button>
121
+ ))}
122
+ </div>
123
+ )}
124
  </div>
125
+ ) : (
126
+ <div class="relative">
127
+ <select
128
+ class="w-full appearance-none pl-3 pr-10 py-2.5 bg-white dark:bg-bg-dark border border-gray-200 dark:border-border-dark rounded-lg text-[0.78rem] text-slate-700 dark:text-text-main font-medium focus:ring-1 focus:ring-primary focus:border-primary outline-none cursor-pointer transition-colors"
129
+ value={selectedModel}
130
+ onChange={(e) => onModelChange((e.target as HTMLSelectElement).value)}
131
+ >
132
+ {models.map((m) => (
133
+ <option key={m} value={m}>{m}</option>
134
+ ))}
135
+ </select>
136
+ <div class="pointer-events-none absolute inset-y-0 right-0 flex items-center px-2 text-slate-500 dark:text-text-dim">
137
+ <svg class="size-[18px]" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
138
+ <path stroke-linecap="round" stroke-linejoin="round" d="M19.5 8.25l-7.5 7.5-7.5-7.5" />
139
+ </svg>
140
+ </div>
141
+ </div>
142
+ )}
143
  </div>
144
  {/* API Key */}
145
  <div class="space-y-1.5 md:col-span-2">
web/src/components/CodeExamples.tsx CHANGED
@@ -21,8 +21,18 @@ function buildExamples(
21
  baseUrl: string,
22
  apiKey: string,
23
  model: string,
24
- origin: string
 
25
  ): Record<string, string> {
 
 
 
 
 
 
 
 
 
26
  return {
27
  "openai-python": `from openai import OpenAI
28
 
@@ -33,7 +43,7 @@ client = OpenAI(
33
 
34
  response = client.chat.completions.create(
35
  model="${model}",
36
- messages=[{"role": "user", "content": "Hello"}],
37
  )
38
  print(response.choices[0].message.content)`,
39
 
@@ -42,7 +52,7 @@ print(response.choices[0].message.content)`,
42
  -H "Authorization: Bearer ${apiKey}" \\
43
  -d '{
44
  "model": "${model}",
45
- "messages": [{"role": "user", "content": "Hello"}]
46
  }'`,
47
 
48
  "openai-node": `import OpenAI from "openai";
@@ -54,7 +64,7 @@ const client = new OpenAI({
54
 
55
  const stream = await client.chat.completions.create({
56
  model: "${model}",
57
- messages: [{ role: "user", content: "Hello" }],
58
  stream: true,
59
  });
60
  for await (const chunk of stream) {
@@ -137,17 +147,18 @@ interface CodeExamplesProps {
137
  baseUrl: string;
138
  apiKey: string;
139
  model: string;
 
140
  }
141
 
142
- export function CodeExamples({ baseUrl, apiKey, model }: CodeExamplesProps) {
143
  const t = useT();
144
  const [protocol, setProtocol] = useState<Protocol>("openai");
145
  const [codeLang, setCodeLang] = useState<CodeLang>("python");
146
 
147
  const origin = typeof window !== "undefined" ? window.location.origin : "";
148
  const examples = useMemo(
149
- () => buildExamples(baseUrl, apiKey, model, origin),
150
- [baseUrl, apiKey, model, origin]
151
  );
152
 
153
  const currentCode = examples[`${protocol}-${codeLang}`] || "Loading...";
 
21
  baseUrl: string,
22
  apiKey: string,
23
  model: string,
24
+ origin: string,
25
+ reasoningEffort: string
26
  ): Record<string, string> {
27
+ const effortLine = reasoningEffort && reasoningEffort !== "medium"
28
+ ? `\n reasoning_effort="${reasoningEffort}",`
29
+ : "";
30
+ const effortJson = reasoningEffort && reasoningEffort !== "medium"
31
+ ? `,\n "reasoning_effort": "${reasoningEffort}"`
32
+ : "";
33
+ const effortJs = reasoningEffort && reasoningEffort !== "medium"
34
+ ? `\n reasoning_effort: "${reasoningEffort}",`
35
+ : "";
36
  return {
37
  "openai-python": `from openai import OpenAI
38
 
 
43
 
44
  response = client.chat.completions.create(
45
  model="${model}",
46
+ messages=[{"role": "user", "content": "Hello"}],${effortLine}
47
  )
48
  print(response.choices[0].message.content)`,
49
 
 
52
  -H "Authorization: Bearer ${apiKey}" \\
53
  -d '{
54
  "model": "${model}",
55
+ "messages": [{"role": "user", "content": "Hello"}]${effortJson}
56
  }'`,
57
 
58
  "openai-node": `import OpenAI from "openai";
 
64
 
65
  const stream = await client.chat.completions.create({
66
  model: "${model}",
67
+ messages: [{ role: "user", content: "Hello" }],${effortJs}
68
  stream: true,
69
  });
70
  for await (const chunk of stream) {
 
147
  baseUrl: string;
148
  apiKey: string;
149
  model: string;
150
+ reasoningEffort: string;
151
  }
152
 
153
+ export function CodeExamples({ baseUrl, apiKey, model, reasoningEffort }: CodeExamplesProps) {
154
  const t = useT();
155
  const [protocol, setProtocol] = useState<Protocol>("openai");
156
  const [codeLang, setCodeLang] = useState<CodeLang>("python");
157
 
158
  const origin = typeof window !== "undefined" ? window.location.origin : "";
159
  const examples = useMemo(
160
+ () => buildExamples(baseUrl, apiKey, model, origin, reasoningEffort),
161
+ [baseUrl, apiKey, model, origin, reasoningEffort]
162
  );
163
 
164
  const currentCode = examples[`${protocol}-${codeLang}`] || "Loading...";