Patrick Rathje
working huggingface inference endpoint
1735827
raw
history blame
1.92 kB
import gradio as gr
import os
import shutil
import uuid
import subprocess
from threading import Timer
from functools import partial
import time
from huggingface_hub import InferenceClient
from gradio_motioncanvasplayer import MotionCanvasPlayer
# Just some example project that servers as a placholder in the beginning
example_project_path = "https://prathje-gradio-motioncanvasplayer.hf.space/gradio_api/file=/home/user/app/public/project-3.17.2.js"
client = InferenceClient(
provider="hf-inference",
api_key=os.environ["HF_TOKEN"],
)
def get_completion(prompt, history):
completion = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=[
{
"role": "user",
"content": "What is the capital of France?"
}
],
)
return completion.choices[0].message
def load_example(example):
return example['project_path'], example['code'], ""
with gr.Blocks(theme=gr.themes.Monochrome()) as app:
gr.Markdown("# Motion Canvas Agent")
gr.Markdown("Leverage the power of AI and Motion Canvas to create animations using TypeScript.")
with gr.Row():
with gr.Column():
gr.Markdown("## Chat")
chat = gr.ChatInterface(fn=get_completion, type="messages")
gr.Markdown("### TypeScript Code for Your Scene")
code = gr.Code(value="", language="typescript")
logs = gr.Textbox(value="", label="Build Logs", interactive=False)
with gr.Column():
gr.Markdown("## Preview")
player = MotionCanvasPlayer(example_project_path, auto=True, quality=0.5, width=1920, height=1080, variables="{}")
if __name__ == "__main__":
# Todo: In the future we could allow to use this as an MCP server, but right now, we need the preview to be available.
app.launch(mcp_server=False, strict_cors=False)