Spaces:
Paused
Paused
yuanjiajun commited on
Commit ·
4592957
1
Parent(s): 478f94c
feat: gpt接口
Browse files- .gitattributes +2 -35
- Dockerfile +16 -0
- LICENSE +21 -0
- README.md +0 -11
- demo.py +107 -0
- requirements.txt +5 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,2 @@
|
|
| 1 |
-
|
| 2 |
-
*
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
# Auto detect text files and perform LF normalization
|
| 2 |
+
* text=auto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Dockerfile
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
+
# you will also find guides on how best to write your Dockerfile
|
| 3 |
+
|
| 4 |
+
FROM python:3.9
|
| 5 |
+
|
| 6 |
+
RUN useradd -m -u 1000 user
|
| 7 |
+
USER user
|
| 8 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 9 |
+
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 13 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 14 |
+
|
| 15 |
+
COPY --chown=user . /app
|
| 16 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2023-2024 牛爷爷信息技术(南京)有限公司
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
DELETED
|
@@ -1,11 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Joe Llm Api
|
| 3 |
-
emoji: 🌍
|
| 4 |
-
colorFrom: yellow
|
| 5 |
-
colorTo: pink
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
license: mit
|
| 9 |
-
---
|
| 10 |
-
|
| 11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
+
from fastapi import FastAPI, HTTPException, Request
|
| 3 |
+
from fastapi.responses import StreamingResponse
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
client = OpenAI(
|
| 9 |
+
api_key="sk-u2QYG3NlYTQ6eE9uNoWFhUCSyS71oY5K43rtKexk9f4XZ7Zv",
|
| 10 |
+
base_url="https://api.chatanywhere.tech/v1"
|
| 11 |
+
)
|
| 12 |
+
app = FastAPI()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@app.post("/gpt-35-api")
|
| 16 |
+
async def gpt_35_api_http(request: Request):
|
| 17 |
+
"""
|
| 18 |
+
处理/gpt-35-api的POST请求,对应原gpt_35_api函数的功能,以非流式调用方式向GPT-3.5-Turbo模型发送对话消息并获取回答。
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
dict: 包含模型生成回答的字典,格式为{"result": 回答内容}
|
| 22 |
+
"""
|
| 23 |
+
try:
|
| 24 |
+
# 从请求体中获取JSON数据
|
| 25 |
+
json_data = await request.json()
|
| 26 |
+
messages = json_data.get("messages")
|
| 27 |
+
|
| 28 |
+
# 使用OpenAI的API客户端向GPT-3.5-Turbo模型发送请求以创建一个完成对象
|
| 29 |
+
completion = client.chat.completions.create(
|
| 30 |
+
model="gpt-3.5-turbo",
|
| 31 |
+
messages=messages,
|
| 32 |
+
temperature= 0.8,
|
| 33 |
+
top_p= 0.9,
|
| 34 |
+
frequency_penalty= 0.3,
|
| 35 |
+
presence_penalty= 0.2,
|
| 36 |
+
)
|
| 37 |
+
# 从完成对象的选择列表中获取第一个选择(通常就是模型生成的回答),并返回其消息内容作为响应
|
| 38 |
+
return {"result": completion.choices[0].message.content}
|
| 39 |
+
except Exception as e:
|
| 40 |
+
raise HTTPException(status_code=500, detail=f"调用GPT-3.5-Turbo时出错: {e}")
|
| 41 |
+
|
| 42 |
+
@app.post("/gpt-35-api-stream")
|
| 43 |
+
async def gpt_35_api_stream_http(request: Request):
|
| 44 |
+
"""
|
| 45 |
+
处理/gpt-35-api-stream的POST请求,对应原gpt_35_api_stream函数的功能,以流式传输的方式向GPT-3.5-Turbo模型发送对话消息并实时获取回答。
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
StreamingResponse: 以流式响应的方式返回模型生成的回答内容,可实时展示给用户
|
| 49 |
+
"""
|
| 50 |
+
try:
|
| 51 |
+
# 从请求体中获取JSON数据
|
| 52 |
+
json_data = await request.json()
|
| 53 |
+
messages = json_data.get("messages")
|
| 54 |
+
|
| 55 |
+
# 使用OpenAI的API客户端向GPT-3.5-Turbo模型发送请求以创建一个流式传输对象
|
| 56 |
+
stream = client.chat.completions.create(
|
| 57 |
+
model='gpt-3.5-turbo',
|
| 58 |
+
messages=messages,
|
| 59 |
+
stream=True,
|
| 60 |
+
temperature= 0.8,
|
| 61 |
+
top_p= 0.9,
|
| 62 |
+
frequency_penalty= 0.3,
|
| 63 |
+
presence_penalty= 0.2,
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
async def generate():
|
| 67 |
+
for chunk in chunk_stream:
|
| 68 |
+
if chunk.choices[0].delta.content is not None:
|
| 69 |
+
yield chunk.choices[0].delta.content
|
| 70 |
+
|
| 71 |
+
return StreamingResponse(generate(), media_type="text/plain")
|
| 72 |
+
except Exception as e:
|
| 73 |
+
raise HTTPException(status_code=500, detail=f"调用GPT-3.5-Turbo时出错: {e}")
|
| 74 |
+
|
| 75 |
+
@app.post("/gpt-4o-mini")
|
| 76 |
+
async def call_gpt_4_mini_http(request: Request):
|
| 77 |
+
"""
|
| 78 |
+
处理/call-gpt-4-mini的POST请求,对应原call_gpt_4_mini函数的功能,使用GPT-4 Mini模型根据给定的对话消息生成回复。
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
dict: 包含模型生成回答的字典,格式为{"result": 回答内容} 或 {"error": 错误信息}
|
| 82 |
+
"""
|
| 83 |
+
try:
|
| 84 |
+
# 从请求体中获取JSON数据
|
| 85 |
+
json_data = await request.json()
|
| 86 |
+
messages = json_data.get("messages")
|
| 87 |
+
|
| 88 |
+
# 定义你的请求参数
|
| 89 |
+
response = client.chat.completions.create(
|
| 90 |
+
model="gpt-4o-mini",
|
| 91 |
+
messages=messages,
|
| 92 |
+
temperature= 0.8,
|
| 93 |
+
top_p= 0.9,
|
| 94 |
+
frequency_penalty= 0.3,
|
| 95 |
+
presence_penalty= 0.2,
|
| 96 |
+
)
|
| 97 |
+
print(response)
|
| 98 |
+
print(type(response.choices))
|
| 99 |
+
message_content = response.choices[0].message.content
|
| 100 |
+
return {"result": message_content.strip()}
|
| 101 |
+
except Exception as e:
|
| 102 |
+
raise HTTPException(status_code=500, detail=f"调用GPT-4 Mini时出错: {e}")
|
| 103 |
+
|
| 104 |
+
if __name__ == "__main__":
|
| 105 |
+
import uvicorn
|
| 106 |
+
|
| 107 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
openai
|
| 2 |
+
requests
|
| 3 |
+
flask
|
| 4 |
+
uvicorn
|
| 5 |
+
fastapi
|