Spaces:
Paused
Paused
| import random | |
| from openai import OpenAI | |
| from fastapi import FastAPI, HTTPException, Request | |
| from fastapi.responses import StreamingResponse | |
| # API key列表 | |
| api_key_list = ["sk-u2QYG3NlYTQ6eE9uNoWFhUCSyS71oY5K43rtKexk9f4XZ7Zv", "sk-viTcmovp3lqZSNVaTWEmnFbSfzb6Uo9QdCeZ3nfI5KmHO8Zz"] # 将这里替换为你实际的API key列表 | |
| # 存储生成的客户端对象的列表 | |
| clients = [] | |
| app = FastAPI() | |
| # 根据API key列表生成对应的客户端对象 | |
| for api_key in api_key_list: | |
| client = OpenAI( | |
| api_key=api_key, | |
| base_url="https://api.chatanywhere.tech/v1" | |
| ) | |
| clients.append(client) | |
| # 定义函数随机返回一个客户端对象 | |
| def get_random_client(): | |
| return random.choice(clients) | |
| async def root_http(request: Request): | |
| """ | |
| 入口 | |
| Returns: | |
| dict: 包含模型生成回答的字典,格式为{"result": 回答内容} | |
| """ | |
| return {"result": 'gpt api空间'} | |
| async def gpt_35_api_http(request: Request): | |
| """ | |
| 处理/gpt-35-api的POST请求,对应原gpt_35_api函数的功能,以非流式调用方式向GPT-3.5-Turbo模型发送对话消息并获取回答。 | |
| Returns: | |
| dict: 包含模型生成回答的字典,格式为{"result": 回答内容} | |
| """ | |
| try: | |
| # 从请求体中获取JSON数据 | |
| json_data = await request.json() | |
| messages = json_data.get("messages") | |
| client = get_random_client() | |
| # 使用OpenAI的API客户端向GPT-3.5-Turbo模型发送请求以创建一个完成对象 | |
| completion = client.chat.completions.create( | |
| model="gpt-3.5-turbo", | |
| messages=messages, | |
| temperature= 0.5, | |
| top_p= 0.9, | |
| frequency_penalty= 0.3, | |
| presence_penalty= 0.2, | |
| ) | |
| # 从完成对象的选择列表中获取第一个选择(通常就是模型生成的回答),并返回其消息内容作为响应 | |
| return {"result": completion.choices[0].message.content} | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"调用GPT-3.5-Turbo时出错: {e}") | |
| async def gpt_35_api_stream_http(request: Request): | |
| """ | |
| 处理/gpt-35-api-stream的POST请求,对应原gpt_35_api_stream函数的功能,以流式传输的方式向GPT-3.5-Turbo模型发送对话消息并实时获取回答。 | |
| Returns: | |
| StreamingResponse: 以流式响应的方式返回模型生成的回答内容,可实时展示给用户 | |
| """ | |
| try: | |
| # 从请求体中获取JSON数据 | |
| json_data = await request.json() | |
| messages = json_data.get("messages") | |
| client = get_random_client() | |
| # 使用OpenAI的API客户端向GPT-3.5-Turbo模型发送请求以创建一个流式传输对象 | |
| stream = client.chat.completions.create( | |
| model='gpt-3.5-turbo', | |
| messages=messages, | |
| stream=True, | |
| temperature= 0.5, | |
| top_p= 0.9, | |
| frequency_penalty= 0.3, | |
| presence_penalty= 0.2, | |
| ) | |
| async def generate(): | |
| for chunk in chunk_stream: | |
| if chunk.choices[0].delta.content is not None: | |
| yield chunk.choices[0].delta.content | |
| return StreamingResponse(generate(), media_type="text/plain") | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"调用GPT-3.5-Turbo时出错: {e}") | |
| async def call_gpt_4_mini_http(request: Request): | |
| """ | |
| 处理/call-gpt-4-mini的POST请求,对应原call_gpt_4_mini函数的功能,使用GPT-4 Mini模型根据给定的对话消息生成回复。 | |
| Returns: | |
| dict: 包含模型生成回答的字典,格式为{"result": 回答内容} 或 {"error": 错误信息} | |
| """ | |
| try: | |
| # 从请求体中获取JSON数据 | |
| json_data = await request.json() | |
| messages = json_data.get("messages") | |
| client = get_random_client() | |
| # 定义你的请求参数 | |
| response = client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=messages, | |
| temperature= 0.5, | |
| top_p= 0.9, | |
| frequency_penalty= 0.3, | |
| presence_penalty= 0.2, | |
| ) | |
| print(response) | |
| print(type(response.choices)) | |
| message_content = response.choices[0].message.content | |
| return {"result": message_content.strip()} | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"调用GPT-4 Mini时出错: {e}") | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |