File size: 4,337 Bytes
84e3a3d
 
 
 
a76d728
 
 
573431c
 
 
 
 
 
84e3a3d
a76d728
 
 
 
84e3a3d
a76d728
573431c
a76d728
571fa01
573431c
 
 
 
 
 
 
a67e8a4
2bdb618
573431c
84e3a3d
a76d728
573431c
 
 
a76d728
573431c
84e3a3d
a76d728
571fa01
 
 
 
 
 
 
2bdb618
571fa01
2bdb618
573431c
 
 
 
571fa01
a76d728
571fa01
573431c
571fa01
a76d728
573431c
a76d728
 
 
 
 
63bfb4e
a76d728
 
 
fd5a88f
a76d728
63bfb4e
a76d728
 
 
fd5a88f
63bfb4e
a76d728
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573431c
a76d728
 
 
571fa01
a76d728
 
 
a67e8a4
 
 
 
 
a76d728
a67e8a4
a76d728
a67e8a4
 
 
84e3a3d
a76d728
 
 
84e3a3d
573431c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import gradio as gr
from groq import Groq
import os

# -------------------------
# Groq Client
# -------------------------
API_KEY = os.getenv("GROQ_API_KEY")

if not API_KEY:
    raise RuntimeError("❌ Please set GROQ_API_KEY in your environment variables.")

client = Groq(api_key=API_KEY)

SYSTEM_PROMPT = """You are an expert in storyboarding.
Provide structured, creative, and insightful responses about creating and refining storyboards.
Use clear sections, shots, and visual descriptions when possible.
"""

# -------------------------
# Chat Function (UPDATED)
# -------------------------
def respond(message, history, model, temperature, max_tokens):

    if message is None:
        message = ""

    message = message.strip()

    if message == "":
        return history

    # Start with system prompt
    messages = [{"role": "system", "content": SYSTEM_PROMPT}]

    # Append previous conversation (NEW FORMAT)
    for msg in history:
        messages.append(msg)

    # Add new user message
    messages.append({"role": "user", "content": message})

    try:
        response = client.chat.completions.create(
            model=model,
            messages=messages,
            temperature=temperature,
            max_completion_tokens=max_tokens,
        )

        assistant_reply = response.choices[0].message.content

        # Update history in NEW format
        history.append({"role": "user", "content": message})
        history.append({"role": "assistant", "content": assistant_reply})

        return history

    except Exception as e:
        history.append({"role": "assistant", "content": f"❌ Error: {str(e)}"})
        return history


# -------------------------
# Custom CSS
# -------------------------
custom_css = """
body {
    background: linear-gradient(135deg, #ffd966, #1e293b);
}
#title {
    text-align: center;
    font-size: 40px;
    font-weight: bold;
    color: #990000;
}
#subtitle {
    text-align: center;
    font-size: 24px;
    color: #660000;
}
"""

# -------------------------
# UI
# -------------------------
with gr.Blocks(css=custom_css) as demo:

    gr.Markdown(" 🎨 ⚑ The Magical Storyboard ⚑ 🎬 ^O^", elem_id="title")
    gr.Markdown(
        "Turn your imagination into **cinematic storyboards** using AI β€” one prompt at a time.",
        elem_id="subtitle",
    )

    with gr.Row():
        with gr.Column(scale=3):
            chatbot = gr.Chatbot(height=420)
            msg = gr.Textbox(
                placeholder="Describe your scene, commercial, or movie moment...",
                label="Your Idea"
            )

            send = gr.Button("πŸŽ₯ Create Storyboard")
            clear = gr.Button("🧹 Clear Chat")

        with gr.Column(scale=1):
            gr.Markdown("### βš™οΈ Controls")

            model = gr.Dropdown(
                choices=[
                    "llama-3.3-70b-versatile",
                    "llama-3.1-8b-instant",
                ],
                value="llama-3.3-70b-versatile",
                label="Model",
            )

            temperature = gr.Slider(
                0, 2, value=0.9, step=0.1,
                label="Creativity (Temperature)"
            )

            max_tokens = gr.Slider(
                256, 8192, value=2048, step=256,
                label="Max Tokens"
            )

            gr.Markdown("### ✨ Examples")
            ex1 = gr.Button("β˜• 30s Coffee Commercial")
            ex2 = gr.Button("πŸ‘» Horror Movie Opening")
            ex3 = gr.Button("πŸ“š Romantic Bookstore Meet-Cute")

    # -------------------------
    # Interactions
    # -------------------------
    send.click(
        respond,
        inputs=[msg, chatbot, model, temperature, max_tokens],
        outputs=chatbot,
    )

    msg.submit(
        respond,
        inputs=[msg, chatbot, model, temperature, max_tokens],
        outputs=chatbot,
    )

    clear.click(lambda: [], None, chatbot)

    ex1.click(lambda: "Create a storyboard for a 30-second coffee commercial", None, msg)
    ex2.click(lambda: "Generate a horror movie opening scene storyboard", None, msg)
    ex3.click(lambda: "Design a storyboard for a romantic comedy meet-cute at a bookstore", None, msg)

# -------------------------
# Launch
# -------------------------
if __name__ == "__main__":
    demo.queue().launch()