Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- README.md +41 -1
- app.py +21 -17
- requirements.txt +4 -2
README.md
CHANGED
|
@@ -7,6 +7,46 @@ sdk: gradio
|
|
| 7 |
sdk_version: 6.3.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
sdk_version: 6.3.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
+
license: apache-2.0
|
| 11 |
---
|
| 12 |
|
| 13 |
+
# 🚀 BigData Ops Copilot (Qwen2.5-Coder-7B)
|
| 14 |
+
|
| 15 |
+
这是一个专为**大数据运维(Big Data O&M)**场景打造的轻量级 AI 助手。基于阿里开源的 `Qwen2.5-Coder-7B-Instruct-GGUF` 模型,通过 Hugging Face Spaces 免费算力实现零成本部署。
|
| 16 |
+
|
| 17 |
+
## 🌟 核心能力
|
| 18 |
+
- **组件诊断**:涵盖 Hadoop, Spark, Flink, Kafka, Hive, HBase 等主流大数据组件。
|
| 19 |
+
- **性能调优**:提供 Spark 内存调优、Hive 倾斜优化、ClickHouse 查询加速建议。
|
| 20 |
+
- **运维脚本**:自动生成 Shell、Python、SQL 等运维脚本及监控逻辑。
|
| 21 |
+
- **日志分析**:粘贴 Error Stacktrace,快速定位 OOM、超时、权限等常见故障。
|
| 22 |
+
|
| 23 |
+
## 🛠 部署架构
|
| 24 |
+
- **模型格式**:GGUF (Q4_K_M 量化版本)。
|
| 25 |
+
- **后端框架**:llama-cpp-python + FastAPI。
|
| 26 |
+
- **前端界面**:Gradio。
|
| 27 |
+
- **硬件环境**:Hugging Face CPU Basic (2 vCPU / 16GB RAM)。
|
| 28 |
+
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
## 🔌 Dify 接入指南 (API Mode)
|
| 32 |
+
|
| 33 |
+
本 Space 已经过优化,支持 OpenAI 兼容接口,可无缝对接 Dify 等大模型应用开发平台。
|
| 34 |
+
|
| 35 |
+
### 1. 获取 API 信息
|
| 36 |
+
- **API Endpoint**: `https://coco1990-bigdata-ops-copilot.hf.space/v1`
|
| 37 |
+
- **API Key**: 你的 Hugging Face Access Token
|
| 38 |
+
- **模型名称**: `qwen2.5-coder-7b`
|
| 39 |
+
|
| 40 |
+
### 2. Dify 配置步骤
|
| 41 |
+
1. 进入 Dify **设置 -> 模型供应商**。
|
| 42 |
+
2. 添加 **OpenAI-API-compatible** 类型供应商。
|
| 43 |
+
3. 填入上述 Endpoint 和 Key 即可完成连接。
|
| 44 |
+
|
| 45 |
+
> **⚠️ 注意事项**:
|
| 46 |
+
> - **自动休眠**:免费版 Space 在长时间无人访问后会进入休眠状态。若 Dify 调用失败,请手动访问本页面唤醒。
|
| 47 |
+
> - **推理速度**:由于运行在 CPU 环境,首字响应可能存在 5-10 秒延迟,请在 Dify 中适当调大超时时间。
|
| 48 |
+
|
| 49 |
+
---
|
| 50 |
+
|
| 51 |
+
## 📜 免责声明
|
| 52 |
+
本模型提供的建议仅供参考,在生产环境执行任何脚本或配置更改前,请务必在测试环境验证。
|
app.py
CHANGED
|
@@ -1,32 +1,36 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from llama_cpp import Llama
|
|
|
|
| 3 |
from huggingface_hub import hf_hub_download
|
|
|
|
| 4 |
|
| 5 |
-
# 1.
|
| 6 |
-
# 建议使用 Q4_K_M 版本,平衡性能与 16GB 内存限制
|
| 7 |
model_path = hf_hub_download(
|
| 8 |
repo_id="Qwen/Qwen2.5-Coder-7B-Instruct-GGUF",
|
| 9 |
filename="qwen2.5-coder-7b-instruct-q4_k_m.gguf"
|
| 10 |
)
|
| 11 |
|
| 12 |
-
# 2.
|
|
|
|
| 13 |
llm = Llama(
|
| 14 |
model_path=model_path,
|
| 15 |
-
n_ctx=4096,
|
| 16 |
-
n_threads=2
|
| 17 |
)
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
messages = [{"role": "system", "content": system_prompt}]
|
| 24 |
for user_msg, assistant_msg in history:
|
| 25 |
messages.append({"role": "user", "content": user_msg})
|
| 26 |
messages.append({"role": "assistant", "content": assistant_msg})
|
| 27 |
messages.append({"role": "user", "content": message})
|
| 28 |
-
|
| 29 |
-
# 流式生成
|
| 30 |
response_text = ""
|
| 31 |
for chunk in llm.create_chat_completion(messages=messages, stream=True):
|
| 32 |
delta = chunk['choices'][0]['delta']
|
|
@@ -34,12 +38,12 @@ def respond(message, history):
|
|
| 34 |
response_text += delta['content']
|
| 35 |
yield response_text
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
)
|
| 43 |
|
| 44 |
if __name__ == "__main__":
|
| 45 |
-
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
import gradio as gr
|
| 3 |
from llama_cpp import Llama
|
| 4 |
+
from llama_cpp.server.app import create_app
|
| 5 |
from huggingface_hub import hf_hub_download
|
| 6 |
+
import uvicorn
|
| 7 |
|
| 8 |
+
# 1. 下载模型
|
|
|
|
| 9 |
model_path = hf_hub_download(
|
| 10 |
repo_id="Qwen/Qwen2.5-Coder-7B-Instruct-GGUF",
|
| 11 |
filename="qwen2.5-coder-7b-instruct-q4_k_m.gguf"
|
| 12 |
)
|
| 13 |
|
| 14 |
+
# 2. 初始化模型核心
|
| 15 |
+
# 注意:为了 API 性能,我们将 n_ctx 保持在 4096
|
| 16 |
llm = Llama(
|
| 17 |
model_path=model_path,
|
| 18 |
+
n_ctx=4096,
|
| 19 |
+
n_threads=2
|
| 20 |
)
|
| 21 |
|
| 22 |
+
# 3. 创建 OpenAI 兼容的 API 应用 (基于 FastAPI)
|
| 23 |
+
app = create_app(llm)
|
| 24 |
|
| 25 |
+
# 4. 定义 Gradio 界面逻辑(保留 UI,方便你作为运维助手直接使用)
|
| 26 |
+
def respond(message, history):
|
| 27 |
+
system_prompt = "你是一位资深大数据运维专家。请为用户提供 Hadoop/Spark/Flink 等组件的代码调优或故障排查建议。"
|
| 28 |
messages = [{"role": "system", "content": system_prompt}]
|
| 29 |
for user_msg, assistant_msg in history:
|
| 30 |
messages.append({"role": "user", "content": user_msg})
|
| 31 |
messages.append({"role": "assistant", "content": assistant_msg})
|
| 32 |
messages.append({"role": "user", "content": message})
|
| 33 |
+
|
|
|
|
| 34 |
response_text = ""
|
| 35 |
for chunk in llm.create_chat_completion(messages=messages, stream=True):
|
| 36 |
delta = chunk['choices'][0]['delta']
|
|
|
|
| 38 |
response_text += delta['content']
|
| 39 |
yield response_text
|
| 40 |
|
| 41 |
+
demo = gr.ChatInterface(fn=respond, title="BigData Ops Copilot (API Mode)")
|
| 42 |
+
|
| 43 |
+
# 5. 关键步骤:将 Gradio 挂载到 FastAPI 路由上
|
| 44 |
+
# 这样你访问 URL 时看到的是 UI,而 Dify 访问 /v1 时调用的是 API
|
| 45 |
+
app = gr.mount_gradio_app(app, demo, path="/")
|
|
|
|
| 46 |
|
| 47 |
if __name__ == "__main__":
|
| 48 |
+
# Hugging Face 指定必须监听 7860 端口
|
| 49 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
requirements.txt
CHANGED
|
@@ -1,3 +1,5 @@
|
|
| 1 |
-
llama-cpp-python
|
| 2 |
gradio
|
| 3 |
-
huggingface_hub
|
|
|
|
|
|
|
|
|
| 1 |
+
llama-cpp-python[server]
|
| 2 |
gradio
|
| 3 |
+
huggingface_hub
|
| 4 |
+
fastapi
|
| 5 |
+
uvicorn
|