File size: 3,328 Bytes
60fde3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# src/llm_generation/api_client.py

import asyncio
from email import message
import requests
import json
import sys
import os
import logging
from typing import Dict, Any, Optional, List
from dotenv import load_dotenv
import httpx
from pathlib import Path

# 假设 FileHandler 在这里,如果没有用到文件上传功能,可以先注释掉
# from src.utils.file_handler import FileHandler 


logger = logging.getLogger(__name__)

class AIAPIClient:
    def __init__(self, api_key: str, base_url: str, api_type: str = "openai", concurrency_limit: int = 5):
        self.api_key = api_key
        self.base_url = base_url
        self.api_type = api_type
        self.semaphore = asyncio.Semaphore(concurrency_limit)
    
    def get_headers(self) -> Dict[str, str]:
        return {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}"
        }
    
    def build_request_body(self, messages: List[Dict], model: str, **kwargs) -> Dict[str, Any]:
        """
        [修改] 适配 OpenAI 格式的 messages 列表输入
        """
        return {
            "model": model,
            "messages": messages,
            "temperature": kwargs.get("temperature", 0.7),
            "max_tokens": kwargs.get("max_tokens", 1000)
        }

    def _print_error_details(self, response):
        # ... (保持你原本的错误打印逻辑) ...
        print(f"❌ HTTP 状态码: {response.status_code}")
        try:
            error_json = response.json()
            print(f"❌ API 错误信息 (JSON):")
            print(json.dumps(error_json, indent=2, ensure_ascii=False))
        except json.JSONDecodeError:
            text = response.text
            print(f"❌ 错误详情 (Text/HTML): {text[:500]}...")

    def call_chat(self, messages: List[Dict[str, str]], model: str, **kwargs) -> str:
        """
        [新增] 专门用于 generator.py 调用的同步接口
        输入: OpenAI 格式的 messages 列表
        输出: 纯文本回复
        """
        headers = self.get_headers()
        # 注意:这里我们直接用 messages 列表构建 body,而不是拆分成 user_prompt
        body = {
            "model": model,
            "messages": messages,
            "temperature": kwargs.get("temperature", 0.7),
            "max_tokens": kwargs.get("max_tokens", 65536)
        }
        
        # 确保 base_url 指向 chat/completions
        # 如果你的环境变量 BASE_URL 已经是完整的 (e.g., .../v1/chat/completions),则直接用
        # 如果只是域名,则需要拼接。这里假设传入的是完整的 endpoint URL。
        endpoint = self.base_url 
        
        try:
            response = requests.post(
                endpoint,
                headers=headers,
                json=body,
                timeout=600
            )
            response.raise_for_status()
            resp_json = response.json()
            return resp_json["choices"][0]["message"]["content"]
        
        except requests.exceptions.RequestException as e:
            print(f"❌ 请求异常: {e}")
            if hasattr(e, 'response') and e.response is not None:
                self._print_error_details(e.response)
            return "" # 返回空字符串表示失败,由上层处理重试或跳过