yhyoo commited on
Commit
1f7de60
·
verified ·
1 Parent(s): c382da3

Upload folder using huggingface_hub

Browse files
Files changed (10) hide show
  1. .gitattributes +3 -0
  2. .gitignore +3 -0
  3. README.md +3 -9
  4. app.py +410 -0
  5. image.jpg +3 -0
  6. locustfile.py +92 -0
  7. nova.mp4 +3 -0
  8. nova2.mp4 +3 -0
  9. requirements.txt +4 -0
  10. test/test.http +101 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ image.jpg filter=lfs diff=lfs merge=lfs -text
37
+ nova.mp4 filter=lfs diff=lfs merge=lfs -text
38
+ nova2.mp4 filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .env
2
+ .venv
3
+ temp_video.mp4
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Persona Chatbot
3
- emoji: 🐠
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 5.16.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: persona-chatbot
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 5.15.0
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import os
4
+ from dotenv import load_dotenv
5
+ from PIL import Image
6
+ from io import BytesIO
7
+ import uuid # Add this import at the top
8
+ import hashlib # Add this import at the top
9
+ import websockets
10
+ import asyncio
11
+ import json
12
+ import time
13
+
14
+ TEMP_VIDEO_PATH = "temp_video.mp4" # Temporary file to save the video
15
+
16
+ # Load environment variables
17
+ load_dotenv()
18
+ HTTP_HOST = os.getenv('http_host')
19
+ WEBSOCKET_HOST = os.getenv('websocket_host')
20
+ # WEBSOCKET_HOST = "wss://e69rkwy4l4.execute-api.us-east-1.amazonaws.com/prod"
21
+ DEMO_SECRET = os.getenv('x-demo-secret')
22
+ character = ""
23
+ session_id = None # Add this to track the current session
24
+
25
+ # Add preset prompts from test.http as examples
26
+ PRESET_IMAGE_PROMPTS = [
27
+ "A friendly cartoon bear with expressive eyes, front view portrait",
28
+ "A wise old wizard with a long white beard, looking directly at viewer",
29
+ "A cheerful robot with glowing blue eyes, head and shoulders portrait",
30
+ "A mystical elf warrior with pointed ears and silver hair, 3/4 view",
31
+ "A cute animated fox with orange fur and big eyes, front facing",
32
+ "A steampunk inventor with brass goggles and wild hair, portrait",
33
+ "A gentle dragon with iridescent scales, close-up face shot"
34
+ ]
35
+
36
+ PRESET_CHAT_QUESTIONS = [
37
+ "This character is friendly and playful, always eager to make new friends. They're optimistic and have a warm, welcoming personality.",
38
+ "This character is wise and patient, known for their thoughtful nature. They have a calm demeanor but can be quite witty when the situation calls for it.",
39
+ "This character is curious and analytical, always eager to learn new things. Despite their unique nature, they show surprising warmth and empathy.",
40
+ "This character is noble and protective of others, with plenty of life experience. While reserved at first, they become fiercely loyal to their friends.",
41
+ "This character is energetic and adventurous, always seeking out new experiences. They have a contagious enthusiasm that brightens any room.",
42
+ "This character is gentle and nurturing, with a deep connection to those around them. They're known for their patience and understanding nature.",
43
+ "This character is clever and resourceful, able to think quickly on their feet. They approach problems with creativity and determination.",
44
+ "This character is mysterious and intriguing, with hidden depths to their personality. They reveal their true nature only to those they trust."
45
+ ]
46
+
47
+ PRESET_ANIMATIONS = [
48
+ "Casting a spell with glowing magical effects radiating from his fingers. Focus on realistic hand gestures, including opening, closing, and raising hands, with smooth, fluid motion. Subtle head nods add realism, while magical lighting highlights his hands and face, enhancing the mystical atmosphere",
49
+ "The character waves hello to the viewer",
50
+ "The character nods thoughtfully",
51
+ "The character looks around curiously",
52
+ "The character laughs cheerfully",
53
+ "The character tilts their head and thinks",
54
+ "The character gives a friendly greeting",
55
+ "The character makes a surprised expression"
56
+ ]
57
+
58
+ # WebSocket 연결 상태를 저장할 변수
59
+ ws_connection = None
60
+
61
+ async def connect_websocket():
62
+ global ws_connection
63
+ if not ws_connection:
64
+ ws_connection = await websockets.connect(WEBSOCKET_HOST)
65
+ return ws_connection
66
+
67
+ async def chat_with_model(message, history):
68
+ global character, session_id, ws_connection
69
+
70
+ try:
71
+ ws = await connect_websocket()
72
+ await ws.send(json.dumps({
73
+ "action": "message",
74
+ "message": message,
75
+ "character": character,
76
+ "session_id": session_id
77
+ }))
78
+
79
+ # 사용자 메시지를 한 번만 추가
80
+ history.append({"role": "user", "content": message})
81
+ history.append({"role": "assistant", "content": ""})
82
+ current_response = ""
83
+
84
+ while True:
85
+ response = await ws.recv()
86
+ data = json.loads(response)
87
+
88
+ if data.get('type') == 'stream':
89
+ current_response += data['message']
90
+ # 마지막 메시지의 내용만 업데이트
91
+ history[-1]["content"] = current_response.strip()
92
+ yield "", history
93
+
94
+ elif data.get('type') == 'end':
95
+ yield "", history
96
+ break
97
+
98
+ except Exception as e:
99
+ print(f"WebSocket error: {str(e)}")
100
+ ws_connection = None
101
+ yield "", history
102
+
103
+ def make_api_request(endpoint, payload):
104
+ headers = {
105
+ 'Content-Type': 'application/json',
106
+ 'x-demo-secret': DEMO_SECRET
107
+ }
108
+ response = requests.post(f"{HTTP_HOST}/{endpoint}", json=payload, headers=headers)
109
+ return response.json()
110
+
111
+ def generate_image(prompt):
112
+ global session_id, character
113
+ response = make_api_request("draw", {"prompt": prompt})
114
+ image_url = response.get('image_url')
115
+
116
+ # Generate session_id based on character description
117
+ character = prompt # Store the prompt as character description
118
+ session_id = hashlib.md5(character.encode()).hexdigest()
119
+
120
+ response = requests.get(image_url)
121
+ image = Image.open(BytesIO(response.content))
122
+ return image
123
+
124
+ def animate_clip(text, image, optimize_prompt):
125
+ # Convert image to base64 if an image is provided
126
+ global character
127
+ animation_prompt = "Generate a video of " + character + ". " + text
128
+ image_payload = []
129
+ if image is not None:
130
+ from base64 import b64encode
131
+ from io import BytesIO
132
+ from PIL import Image
133
+ import numpy as np
134
+
135
+ # Convert numpy array to PIL Image if needed
136
+ if isinstance(image, np.ndarray):
137
+ pil_image = Image.fromarray(image)
138
+ else:
139
+ pil_image = image
140
+
141
+ # Resize image to 1280x720 maintaining aspect ratio and adding padding if needed
142
+ target_size = (1280, 720)
143
+
144
+ # Calculate scaling factors
145
+ width_ratio = target_size[0] / pil_image.width
146
+ height_ratio = target_size[1] / pil_image.height
147
+ scaling_factor = min(width_ratio, height_ratio)
148
+
149
+ # Calculate new dimensions
150
+ new_width = int(pil_image.width * scaling_factor)
151
+ new_height = int(pil_image.height * scaling_factor)
152
+
153
+ # Resize image
154
+ resized_image = pil_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
155
+
156
+ # Create new image with padding
157
+ final_image = Image.new('RGB', target_size, (0, 0, 0)) # Black padding
158
+ paste_x = (target_size[0] - new_width) // 2
159
+ paste_y = (target_size[1] - new_height) // 2
160
+ final_image.paste(resized_image, (paste_x, paste_y))
161
+
162
+ # Convert to base64
163
+ buffer = BytesIO()
164
+ final_image.save(buffer, format='PNG')
165
+ base64_image = b64encode(buffer.getvalue()).decode('utf-8')
166
+
167
+ image_payload = [{
168
+ "format": "png",
169
+ "source": {
170
+ "bytes": base64_image
171
+ }
172
+ }]
173
+
174
+ response = make_api_request("animate", {
175
+ "text": animation_prompt,
176
+ "images": image_payload if image is not None else [],
177
+ "optimize_prompt": optimize_prompt
178
+ })
179
+
180
+ if 'error' in response:
181
+ raise gr.Error(response['error'])
182
+
183
+ # Return both the invocation ID and optimized prompt
184
+ return response.get('invocationId'), response.get('optimized_prompt')
185
+
186
+ def fetch_animation_status(invocation_id):
187
+ if not invocation_id:
188
+ return None
189
+
190
+ status_response = requests.get(
191
+ f"{HTTP_HOST}/check-clip-status/{invocation_id}",
192
+ headers={'x-demo-secret': DEMO_SECRET}
193
+ )
194
+ result = status_response.json()
195
+ # If the animation is complete, return the video URL
196
+ if result.get('status') == 'Completed':
197
+ return result.get('presignedUrl')
198
+ return None
199
+
200
+ def load_chat_history(session_id):
201
+ """DynamoDB에서 해당 세션의 채팅 기록을 가져옵니다."""
202
+ try:
203
+ response = make_api_request("get-history", {
204
+ "session_id": session_id
205
+ })
206
+ return response.get('history', [])
207
+ except Exception as e:
208
+ print(f"Error loading chat history: {str(e)}")
209
+ return []
210
+
211
+ def update_character_and_load_history(text_value):
212
+ """캐릭터가 변경될 때 세션 ID를 업데이트하고 채팅 기록을 로드합니다."""
213
+ global character, session_id
214
+ if text_value:
215
+ character = text_value
216
+ session_id = hashlib.md5(character.encode()).hexdigest()
217
+ # 채팅 기록 로드
218
+ history = load_chat_history(session_id)
219
+ print(history)
220
+ return history
221
+ return []
222
+
223
+ # Create the Gradio interface
224
+ with gr.Blocks(css="""
225
+ .gradio-container {
226
+ width: 600px !important;
227
+ margin: 0 auto !important;
228
+ }
229
+ #component-0 {
230
+ width: 550px !important;
231
+ margin: 0 auto !important;
232
+ padding: 20px !important;
233
+ }
234
+
235
+ .contain {
236
+ width: 550px !important;
237
+ }
238
+ /* Add circular mask to the image and remove container borders */
239
+ .image-container img {
240
+ border-radius: 50% !important;
241
+ aspect-ratio: 1 !important;
242
+ object-fit: cover !important;
243
+ width: 200px !important;
244
+ height: 200px !important;
245
+ display: block !important;
246
+ margin: 0 auto !important;
247
+ }
248
+ .image-container {
249
+ border: none !important;
250
+ background: none !important;
251
+ }
252
+ /* Remove the dotted upload area */
253
+ .image-container > div {
254
+ border: none !important;
255
+ }
256
+ """) as demo:
257
+ chat_history = gr.State([])
258
+
259
+ gr.Markdown("<h1 style='text-align: center;'>AnyCompany AI</h1>")
260
+
261
+ # Image Section
262
+ image_prompt = gr.Textbox(
263
+ label="Imagine you are a...",
264
+ placeholder="Describe the image you want to generate..."
265
+ )
266
+ image_dropdown = gr.Dropdown(
267
+ choices=["--- Select a preset prompt or type your own ---"] + PRESET_IMAGE_PROMPTS,
268
+ label="Or choose from preset prompts",
269
+ type="value",
270
+ allow_custom_value=False
271
+ )
272
+ image_btn = gr.Button("Show Face")
273
+ image_output = gr.Image(
274
+ height=300,
275
+ container=True,
276
+ elem_classes=["image-container"]
277
+ )
278
+
279
+ # Divider
280
+ gr.Markdown("---")
281
+
282
+ # Chat Section
283
+ chatbot = gr.Chatbot(label="Chat History", height=400, type="messages")
284
+ chat_input = gr.Textbox(
285
+ label="Chat with your character",
286
+ placeholder="How are you?"
287
+ )
288
+
289
+ # Divider
290
+ gr.Markdown("---")
291
+
292
+ # Animation Section
293
+ animation_prompt = gr.Textbox(
294
+ label="Enter animation description",
295
+ placeholder="Describe the animation sequence you want to create..."
296
+ )
297
+ animation_dropdown = gr.Dropdown(
298
+ choices=["--- Select a preset animation or type your own ---"] + PRESET_ANIMATIONS,
299
+ label="Or choose from preset animations",
300
+ type="value",
301
+ allow_custom_value=False
302
+ )
303
+ optimize_prompt_checkbox = gr.Checkbox(label="Optimize prompt using AI", value=True)
304
+ optimized_prompt = gr.Textbox(
305
+ label="Final Prompt",
306
+ placeholder="The final version of your prompt to the video generation",
307
+ interactive=False
308
+ )
309
+ animation_status = gr.Textbox(
310
+ label="Animation ID",
311
+ placeholder="Not yet requested",
312
+ interactive=False
313
+ )
314
+ animation_output = gr.Video(label="Generated Animation")
315
+ with gr.Row():
316
+ animate_btn = gr.Button("Create Animation")
317
+ check_status_btn = gr.Button("Refresh")
318
+
319
+ def generate_image(prompt):
320
+ response = make_api_request("draw", {"prompt": prompt})
321
+ # Extract the image URL from the response
322
+ image_url = response.get('image_url')
323
+ print(image_url)
324
+
325
+ # Download and convert the image
326
+ response = requests.get(image_url)
327
+ image = Image.open(BytesIO(response.content))
328
+
329
+ return image
330
+
331
+ # Connect image generation to both image output and chat
332
+ image_btn.click(
333
+ fn=lambda prompt: (
334
+ generate_image(prompt), # 이미지 생성
335
+ update_character_and_load_history(prompt) # 채팅 기록 로드
336
+ ),
337
+ inputs=[image_prompt],
338
+ outputs=[image_output, chatbot] # chatbot output 추가
339
+ )
340
+
341
+ # Keep the existing chat response handling
342
+ chat_input.submit(
343
+ chat_with_model,
344
+ [chat_input, chatbot],
345
+ [chat_input, chatbot],
346
+ show_progress="full"
347
+ )
348
+
349
+ def check_and_update_animation(invocation_id):
350
+ video_url = fetch_animation_status(invocation_id)
351
+ if video_url:
352
+ # Download the video
353
+ response = requests.get(video_url, stream=True)
354
+ with open(TEMP_VIDEO_PATH, "wb") as video_file:
355
+ for chunk in response.iter_content(chunk_size=8192):
356
+ video_file.write(chunk)
357
+
358
+ # Return status and the local video file path
359
+ return "Animation completed.", TEMP_VIDEO_PATH
360
+ else:
361
+ return invocation_id, None
362
+
363
+ animate_btn.click(
364
+ animate_clip,
365
+ inputs=[animation_prompt, image_output, optimize_prompt_checkbox],
366
+ outputs=[animation_status, optimized_prompt]
367
+ )
368
+
369
+ check_status_btn.click(
370
+ check_and_update_animation,
371
+ inputs=animation_status,
372
+ outputs=[animation_status, animation_output]
373
+ )
374
+
375
+ def handle_dropdown_change(dropdown_value):
376
+ if dropdown_value.startswith("---"):
377
+ return gr.update(), []
378
+ history = update_character_and_load_history(dropdown_value)
379
+ return dropdown_value, history
380
+
381
+ image_dropdown.change(
382
+ handle_dropdown_change,
383
+ inputs=[image_dropdown],
384
+ outputs=[image_prompt, chatbot]
385
+ )
386
+
387
+ animation_dropdown.change(
388
+ update_other_textbox,
389
+ inputs=[animation_dropdown],
390
+ outputs=[animation_prompt]
391
+ )
392
+
393
+ # Add new function to handle direct text input
394
+ def update_character_from_input(text_value):
395
+ global character, session_id
396
+ if text_value:
397
+ character = text_value
398
+ # Create consistent session_id based on character description
399
+ session_id = hashlib.md5(character.encode()).hexdigest()
400
+ return None
401
+
402
+ # Add new event handler for direct text input
403
+ image_prompt.change(
404
+ update_character_from_input,
405
+ inputs=[image_prompt],
406
+ outputs=None
407
+ )
408
+
409
+ if __name__ == "__main__":
410
+ demo.launch()
image.jpg ADDED

Git LFS Details

  • SHA256: ae71db1d8ae152666637406e51964b394a1f2e98798dc968cf65d286df564674
  • Pointer size: 131 Bytes
  • Size of remote file: 126 kB
locustfile.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from locust import HttpUser, task, between
2
+ from dotenv import load_dotenv
3
+ import os
4
+ import hashlib
5
+ import random
6
+
7
+ load_dotenv()
8
+ HOST = os.getenv('host')
9
+ DEMO_SECRET = os.getenv('x-demo-secret')
10
+
11
+
12
+ class APIUser(HttpUser):
13
+ wait_time = between(1, 3) # Wait 1-3 seconds between tasks
14
+ host = HOST
15
+
16
+ def __init__(self, *args, **kwargs):
17
+ super().__init__(*args, **kwargs)
18
+ # You'll need to set these values in your environment
19
+ self.headers = {
20
+ "Content-Type": "application/json",
21
+ "x-demo-secret": DEMO_SECRET
22
+ }
23
+
24
+ @task(2) # Higher weight for image generation tasks
25
+ def test_draw_endpoints(self):
26
+ prompts = [
27
+ "A friendly cartoon bear with expressive eyes, front view portrait",
28
+ "A wise old wizard with a long white beard, looking directly at viewer",
29
+ "A cheerful robot with glowing blue eyes, head and shoulders portrait",
30
+ "A mystical elf warrior with pointed ears and silver hair, 3/4 view",
31
+ "A cute animated fox with orange fur and big eyes, front facing",
32
+ "A steampunk inventor with brass goggles and wild hair, portrait",
33
+ "A gentle dragon with iridescent scales, close-up face shot",
34
+ "A ferocious snake with a long, forked tongue, close-up face shot",
35
+ ]
36
+ for prompt in prompts:
37
+ self.client.post("/test-draw",
38
+ json={"prompt": prompt},
39
+ headers=self.headers)
40
+
41
+ @task(7)
42
+ def test_chat_endpoints(self):
43
+ questions = [
44
+ "Hi",
45
+ "How are you?",
46
+ "What is your name?",
47
+ "What is your favorite color?",
48
+ "What is your favorite food?",
49
+ "What is your favorite animal?",
50
+ "What is your favorite movie?",
51
+ "What is your favorite book?",
52
+ "What is your favorite song?",
53
+ ]
54
+
55
+ characters = [
56
+ "A friendly cartoon bear with expressive eyes, front view portrait",
57
+ "A wise old wizard with a long white beard, looking directly at viewer",
58
+ "A cheerful robot with glowing blue eyes, head and shoulders portrait",
59
+ "A mystical elf warrior with pointed ears and silver hair, 3/4 view",
60
+ "A cute animated fox with orange fur and big eyes, front facing",
61
+ "A steampunk inventor with brass goggles and wild hair, portrait",
62
+ "A gentle dragon with iridescent scales, close-up face shot",
63
+ "A ferocious snake with a long, forked tongue, close-up face shot",
64
+ ]
65
+ character = random.choice(characters)
66
+ system_prompt = f"Imagine you are {character}"
67
+
68
+ # Generate consistent session ID based on character
69
+ session_id = f'locust-{hashlib.md5(character.encode()).hexdigest()}'
70
+
71
+ for question in questions:
72
+ self.client.post("/test-chat",
73
+ json={
74
+ "system_prompt": system_prompt,
75
+ "question": question,
76
+ "session_id": session_id
77
+ },
78
+ headers=self.headers)
79
+
80
+ @task(1) # Lower weight since this is a two-step process
81
+ def test_animation_flow(self):
82
+ # Step 1: Start animation
83
+ response = self.client.post("/test-animate",
84
+ json={"text": "The animated bear looks at the camera and says 'What's up?'"},
85
+ headers=self.headers)
86
+
87
+ # Step 2: Check status (if the first request was successful)
88
+ if response.status_code == 200:
89
+ invocation_id = response.json().get("invocationId")
90
+ if invocation_id:
91
+ self.client.get(f"/check-clip-status/{invocation_id}",
92
+ headers=self.headers)
nova.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e6cfc55637e98bf2cb8f5014cbb0888ae39952afa54e58e27fe735f848d6a34
3
+ size 3072740
nova2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d38295fd198490c15131c2695f8fe90d6660c961cb0345730970a05d54242897
3
+ size 1755614
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ requests
3
+ python-dotenv
4
+ locust
test/test.http ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @host = {{$dotenv host}}
2
+ @x-demo-secret = {{$dotenv x-demo-secret}}
3
+
4
+
5
+ ### Generate an image with a futuristic cityscape
6
+ POST {{host}}/draw
7
+ Content-Type: application/json
8
+ x-demo-secret: {{x-demo-secret}}
9
+
10
+ {
11
+ "prompt": "A futuristic cityscape at sunset with flying cars and glowing skyscrapers"
12
+ }
13
+
14
+ ### Similar prompt
15
+ POST {{host}}/draw
16
+ Content-Type: application/json
17
+ x-demo-secret: {{x-demo-secret}}
18
+
19
+ {
20
+ "prompt": "A futuristic city's view at sunset with flying automobiles and glowing buildings"
21
+ }
22
+
23
+ ### Generate a tranquil mountain lake
24
+ POST {{host}}/draw
25
+ Content-Type: application/json
26
+ x-demo-secret: {{x-demo-secret}}
27
+
28
+ {
29
+ "prompt": "A tranquil mountain lake surrounded by snow-capped peaks under a clear blue sky"
30
+ }
31
+
32
+ ### Generate a cyberpunk street scene
33
+ POST {{host}}/draw
34
+ Content-Type: application/json
35
+ x-demo-secret: {{x-demo-secret}}
36
+
37
+ {
38
+ "prompt": "A cyberpunk street scene at night with neon lights and futuristic vehicles"
39
+ }
40
+
41
+ ### Generate a magical forest
42
+ POST {{host}}/draw
43
+ Content-Type: application/json
44
+ x-demo-secret: {{x-demo-secret}}
45
+
46
+ {
47
+ "prompt": "A magical forest with glowing trees and mythical creatures"
48
+ }
49
+
50
+ ### Generate a surreal dreamscape
51
+ POST {{host}}/draw
52
+ Content-Type: application/json
53
+ x-demo-secret: {{x-demo-secret}}
54
+
55
+ {
56
+ "prompt": "A surreal dreamscape with floating islands and waterfalls pouring into the sky"
57
+ }
58
+
59
+ ### Generate a medieval castle
60
+ POST {{host}}/draw
61
+ Content-Type: application/json
62
+ x-demo-secret: {{x-demo-secret}}
63
+
64
+ {
65
+ "prompt": "A medieval castle with a moat and a drawbridge"
66
+ }
67
+
68
+ ### Chat with model
69
+ POST {{host}}/chat
70
+ Content-Type: application/json
71
+ x-demo-secret: {{x-demo-secret}}
72
+
73
+ {
74
+ "question": "Who is the president of the United States?"
75
+ }
76
+
77
+
78
+ ### Chat with model
79
+ POST {{host}}/chat
80
+ Content-Type: application/json
81
+ x-demo-secret: {{x-demo-secret}}
82
+
83
+ {
84
+ "question": "What is the capital of the United States?"
85
+ }
86
+
87
+ ### Animate a clip
88
+ # @name animate
89
+ POST {{host}}/animate
90
+ Content-Type: application/json
91
+ x-demo-secret: {{x-demo-secret}}
92
+
93
+ {
94
+ "text": "The animated bear looks at the camera and says 'What's up?'"
95
+ }
96
+
97
+ ### Check clip status
98
+ @invocationId = {{animate.response.body.invocationId}}
99
+ GET {{host}}/check-clip-status/{{invocationId}}
100
+ Content-Type: application/json
101
+ x-demo-secret: {{x-demo-secret}}