Patrick Rathje commited on
Commit
0160fe0
·
1 Parent(s): 1735827
Files changed (5) hide show
  1. app.py +48 -23
  2. gradio_mcp_server.py +2 -0
  3. llm.py +0 -0
  4. modal_run.py +0 -0
  5. modal_setup.py +0 -0
app.py CHANGED
@@ -7,7 +7,42 @@ import subprocess
7
  from threading import Timer
8
  from functools import partial
9
  import time
10
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  from gradio_motioncanvasplayer import MotionCanvasPlayer
13
 
@@ -15,41 +50,31 @@ from gradio_motioncanvasplayer import MotionCanvasPlayer
15
  example_project_path = "https://prathje-gradio-motioncanvasplayer.hf.space/gradio_api/file=/home/user/app/public/project-3.17.2.js"
16
 
17
 
18
- client = InferenceClient(
19
- provider="hf-inference",
20
- api_key=os.environ["HF_TOKEN"],
21
- )
22
-
23
- def get_completion(prompt, history):
24
- completion = client.chat.completions.create(
25
- model="Qwen/Qwen2.5-Coder-32B-Instruct",
26
- messages=[
27
- {
28
- "role": "user",
29
- "content": "What is the capital of France?"
30
- }
31
- ],
32
- )
33
- return completion.choices[0].message
34
-
35
  def load_example(example):
36
  return example['project_path'], example['code'], ""
37
 
38
  with gr.Blocks(theme=gr.themes.Monochrome()) as app:
39
  gr.Markdown("# Motion Canvas Agent")
40
  gr.Markdown("Leverage the power of AI and Motion Canvas to create animations using TypeScript.")
 
 
 
 
 
 
41
  with gr.Row():
42
  with gr.Column():
43
  gr.Markdown("## Chat")
44
- chat = gr.ChatInterface(fn=get_completion, type="messages")
45
-
46
  gr.Markdown("### TypeScript Code for Your Scene")
47
- code = gr.Code(value="", language="typescript")
48
- logs = gr.Textbox(value="", label="Build Logs", interactive=False)
49
 
50
  with gr.Column():
51
  gr.Markdown("## Preview")
52
- player = MotionCanvasPlayer(example_project_path, auto=True, quality=0.5, width=1920, height=1080, variables="{}")
 
53
 
54
  if __name__ == "__main__":
55
  # Todo: In the future we could allow to use this as an MCP server, but right now, we need the preview to be available.
 
7
  from threading import Timer
8
  from functools import partial
9
  import time
10
+
11
+ MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
12
+
13
+ if os.environ.get("HF_TOKEN"):
14
+ from huggingface_hub import InferenceClient
15
+
16
+ client = InferenceClient(
17
+ provider="hf-inference",
18
+ api_key=os.environ["HF_TOKEN"],
19
+ )
20
+
21
+ def generate(promt, history, code):
22
+ print(promt, history, code)
23
+ completion = client.chat.completions.create(
24
+ model=MODEL,
25
+ messages=[
26
+ {
27
+ "role": "user",
28
+ "content": promt
29
+ }
30
+ ],
31
+ )
32
+ return completion.choices[0].message
33
+ else:
34
+ # we try to run on a ZERO GPU space
35
+ import spaces
36
+ from diffusers import DiffusionPipeline
37
+
38
+ pipe = DiffusionPipeline.from_pretrained(MODEL)
39
+ pipe.to('cuda')
40
+
41
+ @spaces.GPU
42
+ def generate(promt, history, code):
43
+ pass
44
+
45
+
46
 
47
  from gradio_motioncanvasplayer import MotionCanvasPlayer
48
 
 
50
  example_project_path = "https://prathje-gradio-motioncanvasplayer.hf.space/gradio_api/file=/home/user/app/public/project-3.17.2.js"
51
 
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  def load_example(example):
54
  return example['project_path'], example['code'], ""
55
 
56
  with gr.Blocks(theme=gr.themes.Monochrome()) as app:
57
  gr.Markdown("# Motion Canvas Agent")
58
  gr.Markdown("Leverage the power of AI and Motion Canvas to create animations using TypeScript.")
59
+
60
+ player = MotionCanvasPlayer(example_project_path, auto=True, quality=0.5, width=1920, height=1080, variables="{}", render=False)
61
+
62
+ code = gr.Code(value="", language="typescript", render=False)
63
+ logs = gr.Textbox(value="", label="Build Logs", interactive=False, render=False)
64
+
65
  with gr.Row():
66
  with gr.Column():
67
  gr.Markdown("## Chat")
68
+ chat = gr.ChatInterface(fn=generate, type="messages", additional_inputs=[code, logs], additional_outputs=[player, code, logs])
69
+
70
  gr.Markdown("### TypeScript Code for Your Scene")
71
+ code.render()
72
+ logs.render()
73
 
74
  with gr.Column():
75
  gr.Markdown("## Preview")
76
+ player.render()
77
+
78
 
79
  if __name__ == "__main__":
80
  # Todo: In the future we could allow to use this as an MCP server, but right now, we need the preview to be available.
gradio_mcp_server.py CHANGED
@@ -5,6 +5,8 @@ import io
5
  import time
6
  from gradio_client import Client
7
 
 
 
8
  sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
9
  sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace')
10
 
 
5
  import time
6
  from gradio_client import Client
7
 
8
+
9
+
10
  sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
11
  sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace')
12
 
llm.py DELETED
File without changes
modal_run.py DELETED
File without changes
modal_setup.py DELETED
File without changes