Spaces:
Sleeping
Sleeping
File size: 7,592 Bytes
65ec2a1 f037a8f 65ec2a1 f037a8f aca2800 65ec2a1 aca2800 f037a8f 65ec2a1 aca2800 65ec2a1 f037a8f 65ec2a1 aca2800 65ec2a1 f037a8f 65ec2a1 aca2800 f037a8f 65ec2a1 f037a8f 65ec2a1 aa65666 65ec2a1 aa65666 aca2800 65ec2a1 aa65666 9ecd335 aa65666 65ec2a1 aa65666 65ec2a1 aca2800 aa65666 65ec2a1 f037a8f 65ec2a1 aca2800 f037a8f 65ec2a1 f037a8f aca2800 65ec2a1 f037a8f 65ec2a1 f037a8f 65ec2a1 aca2800 f037a8f 65ec2a1 f037a8f 65ec2a1 f037a8f 65ec2a1 f037a8f 65ec2a1 aca2800 65ec2a1 f037a8f 65ec2a1 f037a8f 65ec2a1 f037a8f 65ec2a1 aca2800 65ec2a1 dceeed5 aca2800 dceeed5 aca2800 65ec2a1 f037a8f aca2800 65ec2a1 f037a8f 65ec2a1 aca2800 f037a8f 65ec2a1 f037a8f 65ec2a1 bf446fe 65ec2a1 f037a8f 65ec2a1 dcc51ac 024277f 9a56bc2 f037a8f aca2800 024277f dcc51ac 9a56bc2 444e2a5 dcc51ac 0ef482f aca2800 dcc51ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
# app.py
import os
import base64
import json
import gradio as gr
from huggingface_hub import upload_file, InferenceClient
from datetime import datetime
import traceback
from typing import Optional, Dict, Any
from fastmcp import FastMCP
# --- Configuration ---
HF_DATASET_REPO = os.environ.get("HF_DATASET_REPO", "OppaAI/Robot_MCP")
HF_VLM_MODEL = os.environ.get("HF_VLM_MODEL", "Qwen/Qwen2.5-VL-7B-Instruct")
mcp = FastMCP("Robot_MCP")
# -----------------------------------------------------
# Register Robot Tools (MCP)
# -----------------------------------------------------
@mcp.tool()
def speak(text: str, emotion: str = "neutral"):
"""Makes the robot speak a given text with an emotion."""
return {"status": "success", "action_executed": "speak", "payload": {"text": text, "emotion": emotion}}
@mcp.tool()
def navigate(direction: str, distance_meters: float):
"""Moves the robot a specified distance in a direction (max 5m)."""
if distance_meters > 5.0:
return {"status": "error", "message": "Safety limit exceeded"}
return {"status": "success", "action_executed": "navigate", "payload": {"direction": direction, "distance": distance_meters}}
@mcp.tool()
def scan_hazard(hazard_type: str, severity: str):
"""Logs a potential hazard detected by the robot."""
timestamp = datetime.now().isoformat()
return {"status": "warning_logged", "log": f"[{timestamp}] HAZARD: {hazard_type} (Severity: {severity})"}
@mcp.tool()
def analyze_human(clothing_color: str, estimated_action: str):
"""Tracks human activity based on visual input."""
return {"status": "human_tracked", "details": f"Human wearing {clothing_color} is {estimated_action}"}
# -----------------------------------------------------
# Save and upload image to HF
# -----------------------------------------------------
def save_and_upload_image(image_b64: str, hf_token: str):
"""Decodes a base64 image, saves it locally, and uploads to Hugging Face Hub."""
try:
image_bytes = base64.b64decode(image_b64)
size_bytes = len(image_bytes)
# Ensure the /tmp directory exists
os.makedirs("/tmp", exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
local_path = f"/tmp/robot_img_{timestamp}.jpg"
with open(local_path, "wb") as f:
f.write(image_bytes)
filename = f"robot_{timestamp}.jpg"
# Corrected Hugging Face Hub upload
from huggingface_hub import HfApi
api = HfApi()
api.upload_file(
path_or_fileobj=local_path,
path_in_repo=f"tmp/{filename}",
repo_id=HF_DATASET_REPO,
repo_type="dataset",
token=hf_token
)
url = f"https://huggingface.co/datasets/{HF_DATASET_REPO}/resolve/main/{filename}"
return local_path, url, filename, size_bytes
except Exception as e:
print(f"[Error] during image upload: {e}")
traceback.print_exc()
return None, None, None, 0
# -----------------------------------------------------
# JSON parsing helper
# -----------------------------------------------------
def safe_parse_json_from_text(text: str) -> Optional[Dict[str, Any]]:
"""Safely extract JSON from messy VLM output"""
if not text:
return None
try:
return json.loads(text)
except:
pass
cleaned = text.strip().strip("`").strip()
if cleaned.lower().startswith("json"):
cleaned = cleaned[4:].strip()
try:
start = cleaned.find("{")
end = cleaned.rfind("}")
if start >= 0 and end > start:
return json.loads(cleaned[start:end+1])
except:
return None
return None
# -----------------------------------------------------
# Call MCP tool safely using public API
# -----------------------------------------------------
def validate_and_call_tool(tool_name: str, tool_args: dict) -> Dict[str, Any]:
"""Use public API instead of _tools"""
try:
# FastMCP v2.11.2 provides call_tool
if hasattr(mcp, "call_tool"):
return mcp.call_tool(tool_name, tool_args)
# fallback: call the registered function directly
if hasattr(mcp, tool_name):
tool_fn = getattr(mcp, tool_name)
return tool_fn(**tool_args)
return {"error": f"Unknown tool '{tool_name}'"}
except Exception as e:
traceback.print_exc()
return {"error": f"Tool execution error: {str(e)}"}
# -----------------------------------------------------
# Main pipeline: image → VLM → tool
# -----------------------------------------------------
def process_and_describe(payload: Dict[str, Any]) -> Dict[str, Any]:
if isinstance(payload, str):
try:
payload = json.loads(payload)
except:
return {"error": "Invalid JSON payload"}
hf_token = payload.get("hf_token")
if not hf_token:
return {"error": "hf_token missing"}
robot_id = payload.get("robot_id", "unknown")
image_b64 = payload.get("image_b64")
if not image_b64:
return {"error": "image_b64 missing"}
# Save + Upload
_, hf_url, _, size_bytes = save_and_upload_image(image_b64, hf_token)
if not hf_url:
return {"error": "Image upload failed"}
# VLM system prompt
system_prompt = f"""
Respond in STRICT JSON ONLY:
{{
"description": "short visual description",
"tool_name": "speak | navigate | scan_hazard | analyze_human",
"arguments": {{ ... }}
}}
"""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": [
{"type": "text", "text": "Analyze the image and choose ONE tool."},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_b64}"}}
]}
]
client = InferenceClient(token=hf_token)
try:
response = client.chat.completions.create(
model=HF_VLM_MODEL,
messages=messages,
max_tokens=300,
temperature=0.1,
)
except Exception as e:
return {"status": "error", "message": f"Inference API call failed: {e}"}
vlm_output = response.choices[0].message.content.strip()
parsed = safe_parse_json_from_text(vlm_output)
if parsed is None:
return {"status": "model_no_json", "robot_id": robot_id, "image_url": hf_url, "vlm_raw": vlm_output, "message": "VLM returned invalid JSON"}
tool_name = parsed.get("tool_name")
tool_args = parsed.get("arguments") or {}
tool_result = validate_and_call_tool(tool_name, tool_args)
return {
"status": "success",
"robot_id": robot_id,
#"image_url": hf_url,
"file_size_bytes": size_bytes,
"vlm_description": parsed.get("description"),
"chosen_tool": tool_name,
"tool_arguments": tool_args,
"tool_execution_result": tool_result,
"vlm_raw": vlm_output
}
# ------------------------------
# Gradio Interface
# ------------------------------
iface = gr.Interface(
fn=process_and_describe,
inputs=gr.JSON(label="Input JSON Payload (must include hf_token & image_b64)"),
outputs=gr.JSON(label="Output JSON Result"),
api_name="predict",
flagging_mode="never"
)
# ------------------------------
# Main Entry
# ------------------------------
if __name__ == "__main__":
print(f"[Config] HF_DATASET_REPO: {HF_DATASET_REPO}")
print(f"[Config] HF_VLM_MODEL: {HF_VLM_MODEL}")
print("[Gradio] Launching interface...")
iface.launch(server_name="0.0.0.0", server_port=7860)
|