Patrick Rathje commited on
Commit
28e48f7
·
1 Parent(s): ff3f7ee
Files changed (3) hide show
  1. app.py +29 -29
  2. gradio_mcp_server.py +1 -2
  3. requirements.txt +3 -1
app.py CHANGED
@@ -8,39 +8,39 @@ from threading import Timer
8
  from functools import partial
9
  import time
10
 
 
 
 
11
  MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
12
 
13
- if os.environ.get("HF_TOKEN"):
14
- from huggingface_hub import InferenceClient
15
-
16
- client = InferenceClient(
17
- provider="hf-inference",
18
- api_key=os.environ["HF_TOKEN"],
19
- )
20
-
21
- def generate(promt, history, code, logs):
22
- print(promt, history, code)
23
- completion = client.chat.completions.create(
24
- model=MODEL,
25
- messages=[
26
- {
27
- "role": "user",
28
- "content": promt
29
- }
30
- ],
31
- )
32
- return completion.choices[0].message
33
- else:
34
- # we try to run on a ZERO GPU space
35
- import spaces
36
- from diffusers import DiffusionPipeline
37
 
38
- pipe = DiffusionPipeline.from_pretrained(MODEL)
39
- pipe.to('cuda')
 
 
40
 
41
- @spaces.GPU
42
- def generate(promt, history, code, logs):
43
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
 
46
 
 
8
  from functools import partial
9
  import time
10
 
11
+ from huggingface_hub import InferenceClient
12
+ from smolagents import ToolCollection, CodeAgent, InferenceClientModel
13
+
14
  MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
15
 
16
+ BUILD_SERVER_MCP_CONFIG = {"url": "https://agents-mcp-hackathon-gradio-motioncanvas-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"}
17
+ DOCS_SERVER_MCP_CONFIG = {"url": "https://prathje-gradio-motioncanvas-docs-mcp-server.hf.space/gradio_api/mcp/sse", "transport": "sse"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ all_tools = []
20
+ with ToolCollection.from_mcp(BUILD_SERVER_MCP_CONFIG, trust_remote_code=True) as build_tool_collection:
21
+ with ToolCollection.from_mcp(DOCS_SERVER_MCP_CONFIG, trust_remote_code=True) as docs_tool_collection:
22
+ all_tools = build_tool_collection.tools + docs_tool_collection.tools
23
 
24
+ if os.environ.get("HF_TOKEN"):
25
+
26
+ model = InferenceClientModel(model_id=MODEL)
27
+
28
+ agent = CodeAgent(tools=[*all_tools], model=model)
29
+
30
+ def generate(message, history, code, logs):
31
+ try:
32
+ res = agent.run(
33
+ "From the following prompt, generate code for a standalone motion canvas scene.tsx and build. You can browse the docs to help you.",
34
+ additional_args={'prompt': 'Please animate the formula for the area of a circle'}
35
+ )
36
+ print(res)
37
+ except Exception as e:
38
+ print(e)
39
+ return "An error occurred while generating the code", "", ""
40
+ else:
41
+ print("No HF_TOKEN found, Zero GPU space not implemented")
42
+ def generate(message, history, code, logs):
43
+ return "LLM not available", "", ""
44
 
45
 
46
 
gradio_mcp_server.py CHANGED
@@ -14,8 +14,7 @@ mcp = FastMCP("Gradio MCP Server")
14
 
15
  clients = {}
16
 
17
- BUILD_SERVER_SPACE_ID = "https://agents-mcp-hackathon-gradio-motioncanvas-mcp-server.hf.space/"
18
- DOCS_SERVER_SPACE_ID = "https://agents-mcp-hackathon-gradio-motioncanvas-docs-mcp-server.hf.space/"
19
 
20
  def get_client(space_id: str) -> Client:
21
  """Get or create a Gradio client for the specified space."""
 
14
 
15
  clients = {}
16
 
17
+
 
18
 
19
  def get_client(space_id: str) -> Client:
20
  """Get or create a Gradio client for the specified space."""
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  gradio[mcp]
2
- gradio_motioncanvasplayer
 
 
 
1
  gradio[mcp]
2
+ gradio_motioncanvasplayer
3
+ huggingface_hub
4
+ smolagents[mcp]