AndrewKapok commited on
Commit
83ec0f7
·
verified ·
1 Parent(s): 1893a7e

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +32 -0
  2. dockerfile.dockerfile +22 -0
  3. start.sh +16 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import json
4
+
5
+ OLLAMA_URL = "http://localhost:11434/api/chat"
6
+
7
+ def chat(message, history):
8
+ # 构造对话历史
9
+ messages = []
10
+ for human, assistant in history:
11
+ messages.append({"role": "user", "content": human})
12
+ messages.append({"role": "assistant", "content": assistant})
13
+ messages.append({"role": "user", "content": message})
14
+
15
+ # 调用 Ollama 的聊天接口
16
+ payload = {
17
+ "model": "huihui_ai/deepseek-r1-abliterated:1.5b",
18
+ "messages": messages,
19
+ "stream": False # 如需流式输出可改为 True
20
+ }
21
+ try:
22
+ response = requests.post(OLLAMA_URL, json=payload, timeout=60)
23
+ if response.status_code == 200:
24
+ result = response.json()
25
+ return result["message"]["content"]
26
+ else:
27
+ return f"Ollama 返回错误:{response.status_code} - {response.text}"
28
+ except Exception as e:
29
+ return f"请求失败:{str(e)}"
30
+
31
+ # 启动 Gradio 聊天界面
32
+ gr.ChatInterface(chat).launch(server_name="0.0.0.0", server_port=7860)
dockerfile.dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 使用 Ollama 官方镜像作为基础
2
+ FROM ollama/ollama:latest
3
+
4
+ # 安装 Python3 和 pip
5
+ RUN apt-get update && apt-get install -y python3 python3-pip && rm -rf /var/lib/apt/lists/*
6
+
7
+ # 安装 Gradio 和 requests
8
+ RUN pip3 install gradio requests
9
+
10
+ # 在构建阶段拉取模型(避免每次启动都下载)
11
+ RUN ollama pull huihui_ai/deepseek-r1-abliterated:1.5b
12
+
13
+ # 复制应用脚本和启动脚本
14
+ COPY app.py /app.py
15
+ COPY start.sh /start.sh
16
+ RUN chmod +x /start.sh
17
+
18
+ # 暴露 Gradio 服务端口(Hugging Face Space 默认使用 7860)
19
+ EXPOSE 7860
20
+
21
+ # 启动容器时执行启动脚本
22
+ CMD ["/start.sh"]
start.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # 后台启动 Ollama 服务
3
+ ollama serve &
4
+
5
+ # 等待 Ollama API 就绪(最多尝试 30 秒)
6
+ for i in {1..30}; do
7
+ if curl -s http://localhost:11434/api/tags > /dev/null; then
8
+ echo "Ollama is ready."
9
+ break
10
+ fi
11
+ echo "Waiting for Ollama... ($i/30)"
12
+ sleep 1
13
+ done
14
+
15
+ # 启动 Gradio 应用
16
+ python3 /app.py