3v324v23 commited on
Commit
1c098c5
·
1 Parent(s): c4523a3

fixbug:OpenAI

Browse files
api/services/ai.service.ts CHANGED
@@ -14,14 +14,27 @@ import axios from 'axios';
14
 
15
  dotenv.config();
16
 
17
- const openai = new OpenAI({
18
- apiKey: process.env.OPENAI_API_KEY,
19
- baseURL: process.env.OPENAI_API_BASE_URL || 'https://api.siliconflow.cn/v1',
20
- });
21
-
22
  const MODEL = process.env.MODEL_NAME || 'Qwen/Qwen2.5-7B-Instruct';
23
 
24
  export class AIService {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  /**
26
  * 增强型 RAG 检索对话 (Enterprise Grade: Hybrid Search + Rerank + Agentic Flow)
27
  */
@@ -110,7 +123,7 @@ export class AIService {
110
  // ... (保持不变)
111
 
112
  // 5. 流式生成响应
113
- const stream = await openai.chat.completions.create({
114
  model: model || MODEL,
115
  messages: [
116
  {
@@ -158,7 +171,7 @@ ${context || '无本地知识库匹配。'}
158
 
159
  static async generateTitle(query: string, response: string): Promise<string> {
160
  try {
161
- const completion = await openai.chat.completions.create({
162
  model: MODEL,
163
  messages: [
164
  {
@@ -319,7 +332,7 @@ ${context || '无本地知识库匹配。'}
319
  }
320
 
321
  static async getEmbedding(text: string): Promise<number[]> {
322
- const response = await openai.embeddings.create({
323
  model: 'BAAI/bge-m3',
324
  input: text.replace(/\n/g, ' ').slice(0, 8192),
325
  });
 
14
 
15
  dotenv.config();
16
 
 
 
 
 
 
17
  const MODEL = process.env.MODEL_NAME || 'Qwen/Qwen2.5-7B-Instruct';
18
 
19
  export class AIService {
20
+ private static _openai: OpenAI | null = null;
21
+
22
+ static get openai() {
23
+ if (!this._openai) {
24
+ const apiKey = process.env.OPENAI_API_KEY || 'sk-no-key-provided';
25
+
26
+ if (!process.env.OPENAI_API_KEY) {
27
+ console.warn('[AIService] 未检测到 OPENAI_API_KEY,AI 功能将不可用。请在环境变量中设置。');
28
+ }
29
+
30
+ this._openai = new OpenAI({
31
+ apiKey: apiKey,
32
+ baseURL: process.env.OPENAI_API_BASE_URL || 'https://api.siliconflow.cn/v1',
33
+ });
34
+ }
35
+ return this._openai;
36
+ }
37
+
38
  /**
39
  * 增强型 RAG 检索对话 (Enterprise Grade: Hybrid Search + Rerank + Agentic Flow)
40
  */
 
123
  // ... (保持不变)
124
 
125
  // 5. 流式生成响应
126
+ const stream = await AIService.openai.chat.completions.create({
127
  model: model || MODEL,
128
  messages: [
129
  {
 
171
 
172
  static async generateTitle(query: string, response: string): Promise<string> {
173
  try {
174
+ const completion = await AIService.openai.chat.completions.create({
175
  model: MODEL,
176
  messages: [
177
  {
 
332
  }
333
 
334
  static async getEmbedding(text: string): Promise<number[]> {
335
+ const response = await AIService.openai.embeddings.create({
336
  model: 'BAAI/bge-m3',
337
  input: text.replace(/\n/g, ' ').slice(0, 8192),
338
  });
api/services/workflow.service.ts CHANGED
@@ -63,10 +63,7 @@ export class WorkflowService {
63
  try {
64
  // 构造类似 Chat 的请求,直接利用 AIService 调用 OpenAI
65
  // 由于 AIService.chatWithKnowledge 是为 RAG 设计的,我们这里手动调用 openai 以获得更好的控制
66
- const completion = await new OpenAI({
67
- apiKey: process.env.OPENAI_API_KEY,
68
- baseURL: process.env.OPENAI_API_BASE_URL || 'https://api.siliconflow.cn/v1',
69
- }).chat.completions.create({
70
  model: 'Qwen/Qwen2.5-7B-Instruct',
71
  messages: [
72
  { role: 'system', content: systemPrompt },
 
63
  try {
64
  // 构造类似 Chat 的请求,直接利用 AIService 调用 OpenAI
65
  // 由于 AIService.chatWithKnowledge 是为 RAG 设计的,我们这里手动调用 openai 以获得更好的控制
66
+ const completion = await AIService.openai.chat.completions.create({
 
 
 
67
  model: 'Qwen/Qwen2.5-7B-Instruct',
68
  messages: [
69
  { role: 'system', content: systemPrompt },