Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,17 +3,11 @@ import base64
|
|
| 3 |
import gradio as gr
|
| 4 |
from huggingface_hub import upload_file, InferenceClient
|
| 5 |
import json
|
| 6 |
-
from fastmcp import FastMCP
|
| 7 |
|
| 8 |
# --- Config ---
|
| 9 |
HF_DATASET_REPO = "OppaAI/Robot_MCP"
|
| 10 |
HF_VLM_MODEL = "Qwen/Qwen2.5-VL-7B-Instruct"
|
| 11 |
|
| 12 |
-
# --- MCP server instance ---
|
| 13 |
-
mcp = FastMCP(name="Robot MCP")
|
| 14 |
-
|
| 15 |
-
# --- MCP Tool ---
|
| 16 |
-
@mcp.tool()
|
| 17 |
def say_hi(greeting_text: str = "Hi there!") -> dict:
|
| 18 |
"""Return a greeting command in JSON."""
|
| 19 |
return {"command": "say_hi", "text": greeting_text}
|
|
@@ -55,7 +49,6 @@ def process_and_describe(payload: dict):
|
|
| 55 |
# Initialize HF client
|
| 56 |
hf_client = InferenceClient(token=hf_token)
|
| 57 |
|
| 58 |
-
# System prompt (without stio.describe_tools because not using STIO here)
|
| 59 |
system_prompt = """
|
| 60 |
You are a helpful robot assistant.
|
| 61 |
When you receive an image, you must:
|
|
@@ -93,12 +86,6 @@ def process_and_describe(payload: dict):
|
|
| 93 |
except json.JSONDecodeError:
|
| 94 |
action_data = {"description": vlm_text, "action": None, "greeting_text": None}
|
| 95 |
|
| 96 |
-
# Call the tool if action == say_hi
|
| 97 |
-
tool_result = None
|
| 98 |
-
if action_data.get("action") == "say_hi":
|
| 99 |
-
greeting = action_data.get("greeting_text") or "Hi!"
|
| 100 |
-
tool_result = say_hi(greeting_text=greeting)
|
| 101 |
-
|
| 102 |
return {
|
| 103 |
"saved_to_hf_hub": True,
|
| 104 |
"repo_id": HF_DATASET_REPO,
|
|
@@ -124,13 +111,4 @@ demo = gr.Interface(
|
|
| 124 |
)
|
| 125 |
|
| 126 |
if __name__ == "__main__":
|
| 127 |
-
|
| 128 |
-
import threading
|
| 129 |
-
|
| 130 |
-
def run_mcp():
|
| 131 |
-
mcp.run(transport="stdio")
|
| 132 |
-
|
| 133 |
-
t = threading.Thread(target=run_mcp, daemon=True)
|
| 134 |
-
t.start()
|
| 135 |
-
|
| 136 |
-
demo.launch(mcp_server=True)
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
from huggingface_hub import upload_file, InferenceClient
|
| 5 |
import json
|
|
|
|
| 6 |
|
| 7 |
# --- Config ---
|
| 8 |
HF_DATASET_REPO = "OppaAI/Robot_MCP"
|
| 9 |
HF_VLM_MODEL = "Qwen/Qwen2.5-VL-7B-Instruct"
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def say_hi(greeting_text: str = "Hi there!") -> dict:
|
| 12 |
"""Return a greeting command in JSON."""
|
| 13 |
return {"command": "say_hi", "text": greeting_text}
|
|
|
|
| 49 |
# Initialize HF client
|
| 50 |
hf_client = InferenceClient(token=hf_token)
|
| 51 |
|
|
|
|
| 52 |
system_prompt = """
|
| 53 |
You are a helpful robot assistant.
|
| 54 |
When you receive an image, you must:
|
|
|
|
| 86 |
except json.JSONDecodeError:
|
| 87 |
action_data = {"description": vlm_text, "action": None, "greeting_text": None}
|
| 88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
return {
|
| 90 |
"saved_to_hf_hub": True,
|
| 91 |
"repo_id": HF_DATASET_REPO,
|
|
|
|
| 111 |
)
|
| 112 |
|
| 113 |
if __name__ == "__main__":
|
| 114 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|