ginipick commited on
Commit
515adca
Β·
verified Β·
1 Parent(s): 3bc38a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -220
app.py CHANGED
@@ -1,250 +1,115 @@
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
2
  from gradio_client import Client, handle_file
3
- import logging
4
- import traceback
5
- from datetime import datetime
6
- import json
7
 
8
- # λ‘œκΉ… μ„€μ •
9
  logging.basicConfig(
10
- level=logging.DEBUG,
11
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
12
  )
13
- logger = logging.getLogger(__name__)
14
 
15
- # API μ—”λ“œν¬μΈνŠΈ
16
- API_URL = "http://211.233.58.201:7788/"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def test_api_connection():
19
- """API μ—°κ²° ν…ŒμŠ€νŠΈ"""
20
  try:
21
- client = Client(API_URL)
22
- logger.info(f"Successfully connected to API: {API_URL}")
23
- return True, "API μ—°κ²° 성곡!"
 
 
 
24
  except Exception as e:
25
- logger.error(f"Failed to connect to API: {str(e)}")
26
- return False, f"API μ—°κ²° μ‹€νŒ¨: {str(e)}"
 
27
 
 
28
  def generate_animation(image, audio, guidance_scale, steps, progress=gr.Progress()):
29
- """μ• λ‹ˆλ©”μ΄μ…˜ 생성 ν•¨μˆ˜"""
30
- logger.info("=== μ• λ‹ˆλ©”μ΄μ…˜ 생성 μ‹œμž‘ ===")
31
- logs = []
32
-
33
  try:
34
- # μž…λ ₯ νŒŒλΌλ―Έν„° λ‘œκΉ…
35
- log_msg = f"μž…λ ₯ νŒŒλΌλ―Έν„°:\n- Image: {image}\n- Audio: {audio}\n- Guidance Scale: {guidance_scale}\n- Steps: {steps}"
36
- logger.info(log_msg)
37
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] {log_msg}")
38
-
39
- # μž…λ ₯ 검증
40
- if image is None:
41
- error_msg = "이미지가 μ œκ³΅λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€."
42
- logger.error(error_msg)
43
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] ERROR: {error_msg}")
44
- return None, None, "\n".join(logs)
45
-
46
- if audio is None:
47
- error_msg = "μ˜€λ””μ˜€κ°€ μ œκ³΅λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€."
48
- logger.error(error_msg)
49
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] ERROR: {error_msg}")
50
- return None, None, "\n".join(logs)
51
-
52
- # Progress μ—…λ°μ΄νŠΈ
53
- progress(0.1, desc="API ν΄λΌμ΄μ–ΈνŠΈ μ΄ˆκΈ°ν™” 쀑...")
54
-
55
- # API ν΄λΌμ΄μ–ΈνŠΈ 생성
56
- client = Client(API_URL)
57
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] API ν΄λΌμ΄μ–ΈνŠΈ 생성 μ™„λ£Œ")
58
-
59
- # Progress μ—…λ°μ΄νŠΈ
60
- progress(0.3, desc="파일 ν•Έλ“€ 생성 쀑...")
61
-
62
- # 파일 ν•Έλ“€ 생성
63
- image_handle = handle_file(image)
64
- audio_handle = handle_file(audio)
65
-
66
- log_msg = f"파일 ν•Έλ“€ 생성 μ™„λ£Œ:\n- Image handle: {type(image_handle)}\n- Audio handle: {type(audio_handle)}"
67
- logger.debug(log_msg)
68
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] {log_msg}")
69
-
70
- # Progress μ—…λ°μ΄νŠΈ
71
- progress(0.5, desc="API 호좜 쀑... (이 과정은 μ‹œκ°„μ΄ 걸릴 수 μžˆμŠ΅λ‹ˆλ‹€)")
72
-
73
- # API 호좜
74
- logger.info("API 호좜 μ‹œμž‘")
75
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] API 호좜 μ‹œμž‘...")
76
-
77
  result = client.predict(
78
- image_path=image_handle,
79
- audio_path=audio_handle,
80
  guidance_scale=guidance_scale,
81
  steps=steps,
82
  api_name="/generate_animation"
83
  )
84
-
85
- # Progress μ—…λ°μ΄νŠΈ
86
- progress(0.9, desc="κ²°κ³Ό 처리 쀑...")
87
-
88
- # κ²°κ³Ό λ‘œκΉ…
89
- log_msg = f"API 호좜 성곡!\nκ²°κ³Ό νƒ€μž…: {type(result)}\nκ²°κ³Ό 길이: {len(result) if isinstance(result, (list, tuple)) else 'N/A'}"
90
- logger.info(log_msg)
91
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] {log_msg}")
92
-
93
- # κ²°κ³Ό 상세 λ‘œκΉ…
94
  if isinstance(result, (list, tuple)) and len(result) >= 2:
95
- for i, item in enumerate(result):
96
- log_msg = f"κ²°κ³Ό[{i}]: {type(item)} - {str(item)[:100]}..."
97
- logger.debug(log_msg)
98
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] {log_msg}")
99
-
100
- animation_result = result[0]
101
- comparison_result = result[1]
102
-
103
- # λΉ„λ””μ˜€ 경둜 μΆ”μΆœ
104
- animation_video = None
105
- comparison_video = None
106
-
107
- if isinstance(animation_result, dict) and 'video' in animation_result:
108
- animation_video = animation_result['video']
109
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] μ• λ‹ˆλ©”μ΄μ…˜ λΉ„λ””μ˜€ 경둜: {animation_video}")
110
-
111
- if isinstance(comparison_result, dict) and 'video' in comparison_result:
112
- comparison_video = comparison_result['video']
113
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] 비ꡐ λΉ„λ””μ˜€ 경둜: {comparison_video}")
114
-
115
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] === μ• λ‹ˆλ©”μ΄μ…˜ 생성 μ™„λ£Œ! ===")
116
-
117
- return animation_video, comparison_video, "\n".join(logs)
118
  else:
119
- error_msg = f"μ˜ˆμƒμΉ˜ λͺ»ν•œ κ²°κ³Ό ν˜•μ‹: {type(result)}"
120
- logger.error(error_msg)
121
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] ERROR: {error_msg}")
122
- return None, None, "\n".join(logs)
123
-
124
  except Exception as e:
125
- error_msg = f"였λ₯˜ λ°œμƒ: {str(e)}"
126
- logger.error(error_msg)
127
- logger.error(traceback.format_exc())
128
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] ERROR: {error_msg}")
129
- logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] 상세 였λ₯˜:\n{traceback.format_exc()}")
130
  return None, None, "\n".join(logs)
131
 
132
- # Gradio μΈν„°νŽ˜μ΄μŠ€ 생성
133
- with gr.Blocks(title="Animation Generator API Test") as demo:
134
- gr.Markdown("""
135
- # 🎬 Animation Generator API Test Interface
136
-
137
- 이 μΈν„°νŽ˜μ΄μŠ€λŠ” `http://211.233.58.201:7788/` APIλ₯Ό ν…ŒμŠ€νŠΈν•˜κΈ° μœ„ν•œ λ„κ΅¬μž…λ‹ˆλ‹€.
138
-
139
- ## μ‚¬μš© 방법:
140
- 1. 포트레이트 이미지λ₯Ό μ—…λ‘œλ“œν•˜μ„Έμš”
141
- 2. λ“œλΌμ΄λΉ™ μ˜€λ””μ˜€ νŒŒμΌμ„ μ—…λ‘œλ“œν•˜μ„Έμš”
142
- 3. Guidance Scaleκ³Ό Inference Stepsλ₯Ό μ‘°μ •ν•˜μ„Έμš”
143
- 4. "Generate Animation" λ²„νŠΌμ„ ν΄λ¦­ν•˜μ„Έμš”
144
- """)
145
-
146
- # API μ—°κ²° μƒνƒœ 확인
147
- with gr.Row():
148
- with gr.Column():
149
- connection_status = gr.Textbox(label="API μ—°κ²° μƒνƒœ", interactive=False)
150
- check_connection_btn = gr.Button("API μ—°κ²° ν…ŒμŠ€νŠΈ", variant="secondary")
151
-
152
  gr.Markdown("---")
153
-
154
- # μž…λ ₯ μ„Ήμ…˜
155
  with gr.Row():
156
  with gr.Column():
157
- image_input = gr.Image(
158
- label="Portrait Image (any aspect ratio)",
159
- type="filepath",
160
- elem_id="image_input"
161
- )
162
- audio_input = gr.Audio(
163
- label="Driving Audio",
164
- type="filepath",
165
- elem_id="audio_input"
166
- )
167
-
168
- with gr.Column():
169
- guidance_scale = gr.Slider(
170
- minimum=1,
171
- maximum=10,
172
- value=3,
173
- step=0.1,
174
- label="Guidance Scale",
175
- info="Controls the strength of the guidance"
176
- )
177
- steps = gr.Slider(
178
- minimum=1,
179
- maximum=50,
180
- value=10,
181
- step=1,
182
- label="Inference Steps",
183
- info="Number of denoising steps"
184
- )
185
-
186
- generate_btn = gr.Button("πŸš€ Generate Animation", variant="primary", size="lg")
187
-
188
- # κ²°κ³Ό μ„Ήμ…˜
189
- gr.Markdown("## πŸ“½οΈ Results")
190
- with gr.Row():
191
- with gr.Column():
192
- animation_output = gr.Video(
193
- label="Animation Result",
194
- elem_id="animation_output"
195
  )
196
  with gr.Column():
197
- comparison_output = gr.Video(
198
- label="Side-by-Side Comparison",
199
- elem_id="comparison_output"
200
- )
201
-
202
- # 둜그 μ„Ήμ…˜
203
- with gr.Accordion("πŸ“‹ μ‹€ν–‰ 둜그", open=True):
204
- logs_output = gr.Textbox(
205
- label="Logs",
206
- lines=10,
207
- max_lines=20,
208
- interactive=False,
209
- elem_id="logs"
210
- )
211
-
212
- # 예제 μ„Ήμ…˜
213
- gr.Markdown("## 🎯 Examples")
214
- gr.Examples(
215
- examples=[
216
- ["example_portrait.jpg", "example_audio.wav", 3.0, 10],
217
- ["example_portrait2.jpg", "example_audio2.wav", 2.5, 15],
218
- ],
219
- inputs=[image_input, audio_input, guidance_scale, steps],
220
- outputs=[animation_output, comparison_output, logs_output],
221
- fn=generate_animation,
222
- cache_examples=False
223
- )
224
-
225
- # 이벀트 ν•Έλ“€λŸ¬
226
- check_connection_btn.click(
227
- fn=test_api_connection,
228
- outputs=[connection_status, connection_status]
229
- )
230
-
231
- generate_btn.click(
232
- fn=generate_animation,
233
- inputs=[image_input, audio_input, guidance_scale, steps],
234
- outputs=[animation_output, comparison_output, logs_output]
235
- )
236
-
237
- # νŽ˜μ΄μ§€ λ‘œλ“œ μ‹œ API μ—°κ²° ν…ŒμŠ€νŠΈ
238
- demo.load(
239
- fn=test_api_connection,
240
- outputs=[connection_status, connection_status]
241
- )
242
 
243
- # μ•± μ‹€ν–‰
244
  if __name__ == "__main__":
245
- demo.launch(
246
- server_name="0.0.0.0",
247
- server_port=7860,
248
- share=False,
249
- debug=True
250
- )
 
1
+ """
2
+ Gradio UI that calls the remote animation server.
3
+ Set ANIM_API_URL env-var when deploying:
4
+ ANIM_API_URL=http://211.233.58.201:7788/
5
+ """
6
+
7
+ import os, time, logging
8
+ from datetime import datetime
9
+
10
  import gradio as gr
11
+ import httpx
12
  from gradio_client import Client, handle_file
 
 
 
 
13
 
 
14
  logging.basicConfig(
15
+ level=logging.INFO,
16
+ format="%(asctime)s [%(levelname)s] %(message)s"
17
  )
18
+ log = logging.getLogger(__name__)
19
 
20
+ # ------------------------------------------------------------------ #
21
+ # κΈ°λ³Έ 접속 μ£Όμ†Œ – λ‚΄λΆ€(Loopback) 여뢀에 따라 μžλ™ νŒλ‹¨ + ν™˜κ²½λ³€μˆ˜ μž¬μ •μ˜
22
+ # ------------------------------------------------------------------ #
23
+ _DEFAULT_URL = "http://127.0.0.1:7788/"
24
+ REMOTE_URL = "http://211.233.58.201:7788/"
25
+ API_URL = os.getenv("ANIM_API_URL", REMOTE_URL)
26
+
27
+ if API_URL == REMOTE_URL and os.getenv("HF_SPACE") == "true":
28
+ # HuggingFace 슀페이슀 μ»¨ν…Œμ΄λ„ˆ μ•ˆ β†’ μ™ΈλΆ€ μ„œλ²„ 접속
29
+ pass
30
+ elif "127.0.0.1" in API_URL and os.getenv("HF_SPACE") == "true":
31
+ raise RuntimeError("HF Space λ‚΄λΆ€μ—μ„œλŠ” 곡인 IP λ˜λŠ” λ„λ©”μΈμœΌλ‘œ ANIM_API_URL을 μ§€μ •ν•΄μ•Ό ν•©λ‹ˆλ‹€.")
32
+
33
+ # ------------------------------------------------------------------ #
34
+ def make_client() -> Client:
35
+ """컀λ„₯μ…˜ νƒ€μž„μ•„μ›ƒμ„ 30s둜 늘렀 μž¬μ‹œλ„ κ°€λŠ₯μ„± 확보"""
36
+ timeout = httpx.Timeout(connect=30.0, read=120.0)
37
+ return Client(API_URL, timeout=timeout, retries=3)
38
 
39
  def test_api_connection():
40
+ now = datetime.now().strftime("%H:%M:%S")
41
  try:
42
+ client = make_client()
43
+ # readiness-probe
44
+ ready = client.get('/healthz')['ready']
45
+ msg = f"[{now}] μ„œλ²„ μ—°κ²° 성곡 βœ… (ready={ready})"
46
+ log.info(msg)
47
+ return True, msg
48
  except Exception as e:
49
+ msg = f"[{now}] μ„œλ²„ μ—°κ²° μ‹€νŒ¨ ❌ : {e}"
50
+ log.error(msg)
51
+ return False, msg
52
 
53
+ # ------------------------------------------------------------------ #
54
  def generate_animation(image, audio, guidance_scale, steps, progress=gr.Progress()):
55
+ start = datetime.now().strftime("%H:%M:%S")
56
+ logs = [f"[{start}] μš”μ²­ μ‹œμž‘"]
 
 
57
  try:
58
+ if image is None or audio is None:
59
+ raise ValueError("이미지와 μ˜€λ””μ˜€λ₯Ό λͺ¨λ‘ μ—…λ‘œλ“œν•˜μ„Έμš”.")
60
+
61
+ progress(0.05, desc="파일 μ€€λΉ„")
62
+ client = make_client()
63
+ progress(0.15, desc="μ„œλ²„ 호좜 쀑… (μ΅œλŒ€ 2-3λΆ„ μ†Œμš”)")
64
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  result = client.predict(
66
+ image_path=handle_file(image),
67
+ audio_path=handle_file(audio),
68
  guidance_scale=guidance_scale,
69
  steps=steps,
70
  api_name="/generate_animation"
71
  )
72
+
73
+ progress(0.95, desc="κ²°κ³Ό 정리")
 
 
 
 
 
 
 
 
74
  if isinstance(result, (list, tuple)) and len(result) >= 2:
75
+ anim, comp = result[0], result[1]
76
+ logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] 성곡")
77
+ return anim, comp, "\n".join(logs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  else:
79
+ raise RuntimeError(f"μ˜ˆμƒμΉ˜ λͺ»ν•œ λ°˜ν™˜ ν˜•μ‹: {type(result)}")
 
 
 
 
80
  except Exception as e:
81
+ logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] 였λ₯˜: {e}")
 
 
 
 
82
  return None, None, "\n".join(logs)
83
 
84
+ # ------------------------------------------------------------------ #
85
+ with gr.Blocks(title="Animation Generator Client") as demo:
86
+ gr.Markdown("# 🎬 Animation Generator – Client UI")
87
+
88
+ # health check
89
+ status_box = gr.Textbox(label="API μƒνƒœ", interactive=False)
90
+ gr.Button("μ„œλ²„ μ—°κ²° ν…ŒμŠ€νŠΈ").click(
91
+ test_api_connection,
92
+ outputs=[status_box, status_box]
93
+ )
94
+
 
 
 
 
 
 
 
 
 
95
  gr.Markdown("---")
 
 
96
  with gr.Row():
97
  with gr.Column():
98
+ img_in = gr.Image(type="filepath", label="Portrait")
99
+ aud_in = gr.Audio(type="filepath", label="Audio")
100
+ scale = gr.Slider(1, 10, value=3.0, step=0.1, label="Guidance Scale")
101
+ steps = gr.Slider(5, 30, value=10, step=1, label="Inference Steps")
102
+ gr.Button("πŸš€ Generate").click(
103
+ generate_animation,
104
+ inputs=[img_in, aud_in, scale, steps],
105
+ outputs=["animation_out", "comp_out", "log_out"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  )
107
  with gr.Column():
108
+ gr.Video(label="Animation Result", elem_id="animation_out")
109
+ gr.Video(label="Side-by-side", elem_id="comp_out")
110
+
111
+ with gr.Accordion("둜그", open=False):
112
+ gr.Textbox(label="μ‹€ν–‰ 둜그", lines=12, max_lines=20, interactive=False, elem_id="log_out")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
 
114
  if __name__ == "__main__":
115
+ demo.queue(max_size=4).launch(server_name="0.0.0.0", server_port=7860, show_api=False)