OppaAI's picture
Update app.py
5e39a23 verified
raw
history blame
3.3 kB
import gradio as gr
import json
import base64
from io import BytesIO
import requests
import os
# HF token & model
HF_TOKEN = os.environ.get("HF_CV_ROBOT_TOKEN")
MODEL = "Qwen/Qwen2.5-VL-7B-Instruct" # 確認此模型有支援 VLM (目前有)
if not HF_TOKEN:
print("ERROR: HF_CV_ROBOT_TOKEN environment variable not set.")
# -------------------------------
# 主處理函數
# -------------------------------
def process(payload: dict):
try:
if not HF_TOKEN:
return {"error": "Hugging Face token is missing. Please check Space secrets."}
robot_id = payload.get("robot_id", "unknown")
image_b64 = payload["image_b64"]
# ------------------------------------------------
# ⭐ 1) Base64 → 圖檔並存成 temp.jpg
# ------------------------------------------------
img_bytes = base64.b64decode(image_b64)
temp_path = "temp.jpg"
with open(temp_path, "wb") as f:
f.write(img_bytes)
# ------------------------------------------------
# ⭐ 2) JSON 部分(只放文字)
# ------------------------------------------------
data = {
"model": MODEL,
"messages": [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image in detail."}
]
}
]
}
# ------------------------------------------------
# ⭐ 3) 用 multipart/form-data 傳送 image + JSON payload
# ------------------------------------------------
resp = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers={"Authorization": f"Bearer {HF_TOKEN}"},
data={"payload": json.dumps(data)},
files={"file": ("image.jpg", open(temp_path, "rb"), "image/jpeg")},
timeout=60
)
# ------------------------------------------------
# ⭐ 4) 處理回應
# ------------------------------------------------
if resp.status_code != 200:
print(f"VLM API error: {resp.status_code}, {resp.text}")
return {"error": f"VLM API error: {resp.status_code}, {resp.text}"}
# 正常解析內容
try:
content = resp.json()["choices"][0]["message"]["content"]
# content 是 array,找出 text
vlm_text = ""
for part in content:
if part.get("type") == "text":
vlm_text += part["text"]
except Exception as e:
return {"error": f"Failed to parse VLM response: {e}, Response text: {resp.text}"}
return {
"received": True,
"robot_id": robot_id,
"vllm_analysis": vlm_text.strip()
}
except Exception as e:
print(f"Unexpected error: {e}")
return {"error": str(e)}
# -------------------------------
# Gradio MCP Server
# -------------------------------
demo = gr.Interface(
fn=process,
inputs=gr.JSON(label="Input Payload (Dict format)"),
outputs=gr.JSON(label="Reply to Jetson"),
api_name="predict"
)
if __name__ == "__main__":
demo.launch(mcp_server=True)