File size: 15,984 Bytes
04600a4
aff3611
72de27b
aff3611
6935049
 
 
aff3611
 
 
8bfbcde
aff3611
72de27b
 
 
 
 
 
 
 
 
aff3611
 
 
6935049
 
8bfbcde
72de27b
6935049
72de27b
37bee57
72de27b
37bee57
6935049
 
37bee57
 
 
72de27b
6935049
37bee57
6935049
 
72de27b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6935049
 
72de27b
6935049
 
72de27b
 
37bee57
72de27b
37bee57
6935049
 
 
 
37bee57
 
6935049
6e6c6a1
abceca3
37bee57
 
6935049
 
 
 
 
 
8bfbcde
6935049
8bfbcde
6935049
 
8bfbcde
 
6935049
 
 
 
 
 
 
 
8bfbcde
 
6935049
8bfbcde
 
6935049
 
8bfbcde
 
72de27b
8bfbcde
37bee57
8bfbcde
72de27b
 
6935049
c025244
 
6935049
c025244
 
6935049
72de27b
c025244
72de27b
6935049
72de27b
6935049
72de27b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c025244
72de27b
 
 
 
 
37bee57
72de27b
6935049
8fb64e4
72de27b
8fb64e4
72de27b
8fb64e4
72de27b
 
 
 
 
37bee57
72de27b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bdb3747
72de27b
 
 
 
 
37bee57
72de27b
 
 
 
 
 
2a68bca
72de27b
 
6935049
72de27b
 
74e864a
72de27b
2a68bca
74e864a
 
 
 
6935049
74e864a
 
 
 
 
 
 
 
 
 
 
6935049
 
74e864a
8bfbcde
 
 
6935049
8bfbcde
 
6935049
8bfbcde
 
6935049
 
8bfbcde
6935049
8bfbcde
 
6935049
 
8bfbcde
6935049
8bfbcde
 
 
6935049
8bfbcde
6935049
8bfbcde
 
74e864a
6935049
 
74e864a
6935049
 
 
74e864a
2a68bca
74e864a
6935049
74e864a
 
 
6935049
74e864a
 
6935049
74e864a
2a68bca
74e864a
 
 
 
2a68bca
72de27b
6935049
8bfbcde
 
72de27b
8bfbcde
72de27b
8bfbcde
72de27b
 
 
8bfbcde
 
 
 
72de27b
 
8bfbcde
72de27b
 
 
 
8bfbcde
72de27b
8bfbcde
 
72de27b
 
 
 
8bfbcde
72de27b
8bfbcde
 
72de27b
8bfbcde
2a68bca
72de27b
6935049
8bfbcde
 
72de27b
8bfbcde
72de27b
8bfbcde
72de27b
 
 
8bfbcde
72de27b
 
 
 
 
 
 
 
 
 
 
 
 
 
8bfbcde
72de27b
 
8bfbcde
72de27b
 
 
 
8bfbcde
 
 
72de27b
 
 
 
8bfbcde
 
 
 
72de27b
 
8bfbcde
72de27b
 
 
6935049
 
 
 
 
 
 
 
 
 
8bfbcde
72de27b
 
 
 
8bfbcde
72de27b
8bfbcde
 
72de27b
8bfbcde
2a68bca
72de27b
6935049
72de27b
 
 
8bfbcde
 
 
72de27b
 
8bfbcde
72de27b
 
2a68bca
72de27b
37bee57
 
72de27b
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
import os
import uuid
import logging
from datetime import datetime
from fastapi import FastAPI, HTTPException, Depends, Request
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.responses import HTMLResponse
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from typing import List, Optional

# ------------------- 1. 日志配置 -------------------
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S"
)
logger = logging.getLogger("cross-encoder-api")

# ------------------- 2. 基础配置(缓存 + 环境变量) -------------------
os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface_cache"
os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/huggingface_cache"

# 从环境变量获取 API Key(OpenAI 风格)
API_KEY = os.getenv("OPENAI_API_KEY")
if not API_KEY:
    logger.error("环境变量 OPENAI_API_KEY 未设置")
    raise ValueError("请设置环境变量 OPENAI_API_KEY")
logger.info("API Key 加载成功")

# ------------------- 3. 初始化 FastAPI 应用 -------------------
app = FastAPI(
    title="OpenAI 兼容的 Cross-Encoder 重排序 API",
    description="基于 cross-encoder/ms-marco-MiniLM-L-6-v2 的文本相关性排序接口",
    version="1.0.0"
)

# ------------------- 4. OpenAI 风格认证(Bearer Token) -------------------
oauth2_scheme = HTTPBearer(auto_error=False)

def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(oauth2_scheme)):
    """验证 API Key:必须通过 Authorization: Bearer YOUR_API_KEY 传递"""
    request_id = str(uuid.uuid4())[:8]  # 生成短请求ID用于日志追踪
    if not credentials:
        logger.warning(f"请求 {request_id}:缺少认证信息")
        raise HTTPException(
            status_code=401,
            detail="缺少认证信息(请使用 'Authorization: Bearer YOUR_API_KEY')",
            headers={"WWW-Authenticate": "Bearer"}
        )
    if credentials.scheme != "Bearer":
        logger.warning(f"请求 {request_id}:认证方案错误,应为 Bearer,实际为 {credentials.scheme}")
        raise HTTPException(
            status_code=401,
            detail="认证方案错误(请使用 'Bearer' 方案)",
            headers={"WWW-Authenticate": "Bearer"}
        )
    if credentials.credentials != API_KEY:
        logger.warning(f"请求 {request_id}:无效的 API Key")
        raise HTTPException(
            status_code=401,
            detail="无效的 API Key",
            headers={"WWW-Authenticate": "Bearer"}
        )
    logger.info(f"请求 {request_id}:API Key 验证通过")
    return (credentials.credentials, request_id)  # 返回API Key和请求ID

# ------------------- 5. 数据模型定义 -------------------
class RerankRequest(BaseModel):
    query: str
    documents: List[str]
    top_k: Optional[int] = 3
    truncation: Optional[bool] = True

class DocumentScore(BaseModel):
    document: str
    relevance_score: float
    index: int

class RerankResponse(BaseModel):
    request_id: str
    query: str
    top_k: int
    results: List[DocumentScore]
    model: str = "cross-encoder/ms-marco-MiniLM-L-6-v2"
    timestamp: str = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]

# GPT 兼容的请求/响应模型
class GPTMessage(BaseModel):
    role: str
    content: str

class GPTRequest(BaseModel):
    model: str
    messages: List[GPTMessage]
    top_k: Optional[int] = 3

class Choice(BaseModel):
    index: int
    message: GPTMessage
    finish_reason: str = "stop"

class GPTResponse(BaseModel):
    id: str = f"chatcmpl-{uuid.uuid4().hex}"
    object: str = "chat.completion"
    created: int = int(datetime.now().timestamp())
    model: str
    choices: List[Choice]
    usage: dict = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}

# ------------------- 6. 加载 Cross-Encoder 模型 -------------------
class CrossEncoderModel:
    def __init__(self, model_name: str = "cross-encoder/ms-marco-MiniLM-L-6-v2"):
        self.model_name = model_name
        logger.info(f"开始加载模型:{model_name}")
        
        # 验证缓存目录可写
        cache_dir = os.environ.get("TRANSFORMERS_CACHE", "/tmp/huggingface_cache")
        try:
            test_file = os.path.join(cache_dir, "test.txt")
            with open(test_file, "w") as f:
                f.write("test")
            os.remove(test_file)
            logger.info(f"缓存目录可写:{cache_dir}")
        except Exception as e:
            logger.error(f"缓存目录不可写:{str(e)}")
            raise RuntimeError(f"缓存目录不可写:{str(e)}")
        
        # 加载模型
        try:
            logger.info("开始加载分词器...")
            self.tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
            logger.info("分词器加载完成")
            
            logger.info("开始加载模型权重...")
            self.model = AutoModelForSequenceClassification.from_pretrained(model_name, cache_dir=cache_dir)
            logger.info("模型权重加载完成")
            
            self.device = "cuda" if torch.cuda.is_available() else "cpu"
            self.model.to(self.device)
            self.model.eval()
            logger.info(f"模型加载完成,使用设备:{self.device}")
        except Exception as e:
            logger.error(f"模型加载失败:{str(e)}")
            raise

    def rerank(self, query: str, documents: List[str], top_k: int, truncation: bool, request_id: str) -> List[DocumentScore]:
        """核心重排序逻辑,增加详细日志"""
        logger.info(f"请求 {request_id}:开始重排序处理,查询长度: {len(query)}, 文档数量: {len(documents)}, top_k: {top_k}")
        
        # 参数校验
        if not documents:
            logger.warning(f"请求 {request_id}:候选文档列表为空")
            raise ValueError("候选文档不能为空")
        if top_k <= 0:
            logger.warning(f"请求 {request_id}:无效的 top_k 值: {top_k}")
            raise ValueError("top_k 必须为正整数")
        
        # 自动将 top_k 限制为文档数量(避免超出)
        adjusted_top_k = min(top_k, len(documents))
        if adjusted_top_k != top_k:
            logger.info(f"请求 {request_id}:top_k 从 {top_k} 调整为 {adjusted_top_k}(文档数量限制)")
        
        # 计算每篇文档的相关性分数
        doc_scores = []
        try:
            for i, doc in enumerate(documents):
                if i % 5 == 0:  # 每处理5个文档输出一次日志
                    logger.info(f"请求 {request_id}:正在处理第 {i+1}/{len(documents)} 个文档")
                
                inputs = self.tokenizer(
                    f"{query} {self.tokenizer.sep_token} {doc}",
                    return_tensors="pt",
                    padding="max_length",
                    truncation=truncation,
                    max_length=512
                ).to(self.device)
                
                with torch.no_grad():
                    outputs = self.model(**inputs)
                
                score = outputs.logits.item()
                doc_scores.append((doc, score))
                logger.debug(f"请求 {request_id}:文档 {i+1} 分数: {score:.4f}")
            
            # 排序并返回结果
            sorted_docs = sorted(doc_scores, key=lambda x: x[1], reverse=True)[:adjusted_top_k]
            logger.info(f"请求 {request_id}:重排序完成,返回 {len(sorted_docs)} 个结果")
            
            return [
                DocumentScore(document=doc, relevance_score=round(score, 4), index=i)
                for i, (doc, score) in enumerate(sorted_docs)
            ]
        except Exception as e:
            logger.error(f"请求 {request_id}:重排序过程出错: {str(e)}")
            raise

# 初始化模型(全局唯一)
try:
    reranker = CrossEncoderModel()
except Exception as e:
    logger.critical(f"模型初始化失败,服务无法启动: {str(e)}")
    raise

# ------------------- 7. API 端点(OpenAI 风格路径) -------------------
# 7.1 根路径首页
@app.get("/", response_class=HTMLResponse)
async def home_page(request: Request):
    client_ip = request.client.host
    current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    logger.info(f"首页访问来自 {client_ip}")
    return f"""
<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <title>OpenAI 兼容重排序 API</title>
    <style>
        body {{ font-family: Arial, sans-serif; max-width: 1200px; margin: 0 auto; padding: 20px; }}
        h1 {{ color: #2c3e50; border-bottom: 2px solid #3498db; padding-bottom: 10px; }}
        h2 {{ color: #34495e; margin-top: 30px; }}
        pre {{ background: #f8f9fa; padding: 15px; border-radius: 5px; border: 1px solid #e9ecef; overflow-x: auto; }}
        table {{ border-collapse: collapse; width: 100%; margin: 20px 0; }}
        th, td {{ border: 1px solid #e9ecef; padding: 12px; text-align: left; }}
        th {{ background-color: #f1f5f9; }}
    </style>
</head>
<body>
    <h1>OpenAI 兼容的 Cross-Encoder 重排序 API</h1>
    <p>基于 <code>cross-encoder/ms-marco-MiniLM-L-6-v2</code> 模型,支持 OpenAI 风格 API 调用。</p>

    <h2>接口列表</h2>
    <table>
        <tr>
            <th>接口</th>
            <th>URL</th>
            <th>方法</th>
            <th>认证</th>
        </tr>
        <tr>
            <td>基础重排序</td>
            <td class="api-url">/v1/rerank</td>
            <td>POST</td>
            <td>Authorization: Bearer API_KEY</td>
        </tr>
        <tr>
            <td>GPT 兼容重排序</td>
            <td class="api-url">/v1/chat/completions</td>
            <td>POST</td>
            <td>Authorization: Bearer API_KEY</td>
        </tr>
        <tr>
            <td>健康检查</td>
            <td class="api-url">/v1/health</td>
            <td>GET</td>
            <td>无需认证</td>
        </tr>
    </table>

    <h2>调用示例(Python)</h2>
    <pre><code>import openai

client = openai.OpenAI(
    api_key="YOUR_API_KEY",
    base_url="https://your-space.hf.space/v1"  # 替换为你的 Space URL
)

response = client.chat.completions.create(
    model="cross-encoder/ms-marco-MiniLM-L-6-v2",
    messages=[
        {{
            "role": "user",
            "content": "query: 什么是机器学习?; documents: 机器学习是AI的分支; Python是编程语言;"
        }}
    ],
    top_k=2
)

print(response.choices[0].message.content)</code></pre>
</body>
</html>
"""

# 7.2 基础重排序接口(/v1/rerank)
@app.post("/v1/rerank", response_model=RerankResponse)
async def base_rerank(
    request: RerankRequest,
    auth_result: tuple = Depends(verify_api_key)
):
    api_key, request_id = auth_result
    try:
        logger.info(f"请求 {request_id}:收到 /v1/rerank 请求,query: {request.query[:50]}...(截断显示)")
        
        # 执行重排序
        results = reranker.rerank(
            query=request.query,
            documents=request.documents,
            top_k=request.top_k,
            truncation=request.truncation,
            request_id=request_id
        )
        
        # 构建响应
        response = RerankResponse(
            request_id=request_id,
            query=request.query,
            top_k=min(request.top_k, len(request.documents)),
            results=results
        )
        
        logger.info(f"请求 {request_id}:处理完成,返回 {len(results)} 个结果")
        return response
        
    except ValueError as e:
        logger.warning(f"请求 {request_id}:参数错误 - {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))
    except Exception as e:
        logger.error(f"请求 {request_id}:服务器错误 - {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"服务器错误:{str(e)}")

# 7.3 GPT 兼容接口(/v1/chat/completions)
@app.post("/v1/chat/completions", response_model=GPTResponse)
async def gpt_compatible_rerank(
    request: GPTRequest,
    auth_result: tuple = Depends(verify_api_key)
):
    api_key, request_id = auth_result
    try:
        logger.info(f"请求 {request_id}:收到 /v1/chat/completions 请求,模型: {request.model}")
        
        # 验证模型名
        if request.model != reranker.model_name:
            error_msg = f"仅支持模型:{reranker.model_name},实际请求:{request.model}"
            logger.warning(f"请求 {request_id}{error_msg}")
            raise ValueError(error_msg)
        
        # 验证消息格式
        if not request.messages:
            logger.warning(f"请求 {request_id}:消息列表为空")
            raise ValueError("消息列表不能为空")
        if request.messages[-1].role != "user":
            error_msg = f"最后一条消息必须是 'user' 角色,实际为:{request.messages[-1].role}"
            logger.warning(f"请求 {request_id}{error_msg}")
            raise ValueError(error_msg)
        
        # 解析输入内容
        content = request.messages[-1].content
        logger.info(f"请求 {request_id}:用户输入: {content[:100]}...(截断显示)")
        
        if "; documents: " not in content:
            error_msg = "输入格式需为 'query: [查询]; documents: [文档1]; [文档2]; ...'"
            logger.warning(f"请求 {request_id}{error_msg}")
            raise ValueError(error_msg)
        
        query_part, docs_part = content.split("; documents: ")
        query = query_part.replace("query: ", "").strip()
        documents = [doc.strip() for doc in docs_part.split(";") if doc.strip()]
        
        logger.info(f"请求 {request_id}:解析完成,query: {query[:50]}..., 文档数量: {len(documents)}")
        
        # 执行重排序
        results = reranker.rerank(
            query=query,
            documents=documents,
            top_k=request.top_k,
            truncation=True,
            request_id=request_id
        )
        
        # 构建 GPT 风格响应
        response = GPTResponse(
            model=request.model,
            choices=[
                Choice(
                    index=0,
                    message=GPTMessage(
                        role="assistant",
                        content=f"重排序结果:{results}"
                    )
                )
            ]
        )
        
        logger.info(f"请求 {request_id}:处理完成,返回 {len(results)} 个结果")
        return response
        
    except ValueError as e:
        logger.warning(f"请求 {request_id}:参数错误 - {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))
    except Exception as e:
        logger.error(f"请求 {request_id}:服务器错误 - {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"服务器错误:{str(e)}")

# 7.4 健康检查接口(/v1/health)
@app.get("/v1/health")
async def health_check(request: Request):
    client_ip = request.client.host
    status = {
        "status": "healthy",
        "model": reranker.model_name,
        "device": reranker.device,
        "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        "uptime": datetime.now().strftime("%Y-%m-%d %H:%M:%S")  # 简化版uptime
    }
    logger.info(f"健康检查来自 {client_ip}{status['status']}")
    return status

# ------------------- 8. 本地运行入口 -------------------
if __name__ == "__main__":
    import uvicorn
    logger.info("启动本地开发服务器...")
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=7860,
        log_config=None  # 使用自定义日志配置
    )