dx8152 commited on
Commit
9f0f291
·
verified ·
1 Parent(s): 6d07d34

Upload 15 files

Browse files
LTX2.3/API issues-API问题办法.bat ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ chcp 65001 >nul
3
+ title LTX 本地显卡模式修复工具
4
+
5
+ echo ========================================
6
+ echo LTX 本地显卡模式修复工具
7
+ echo ========================================
8
+ echo.
9
+
10
+ :: 检查管理员权限
11
+ net session >nul 2>&1
12
+ if %errorlevel% neq 0 (
13
+ echo [!] 请右键选择"以管理员身份运行"此脚本
14
+ pause
15
+ exit /b 1
16
+ )
17
+
18
+ echo [1/2] 正在修改 VRAM 阈值...
19
+ set "policy_file=C:\Program Files\LTX Desktop\resources\backend\runtime_config\runtime_policy.py"
20
+
21
+ if exist "%policy_file%" (
22
+ powershell -Command "(Get-Content '%policy_file%') -replace 'vram_gb < 31', 'vram_gb < 6' | Set-Content '%policy_file%'"
23
+ echo ^_^ VRAM 阈值已修改为 6GB
24
+ ) else (
25
+ echo [!] 未找到 runtime_policy.py,请确认 LTX Desktop 已安装
26
+ )
27
+
28
+ echo.
29
+ echo [2/2] 正在清空 API Key...
30
+ set "settings_file=%USERPROFILE%\AppData\Local\LTXDesktop\settings.json"
31
+
32
+ if exist "%settings_file%" (
33
+ powershell -Command "$content = Get-Content '%settings_file%' -Raw; $content = $content -replace '\"fal_api_key\": \"[^\"]*\"', '\"fal_api_key\": \"\"'; Set-Content -Path '%settings_file%' -Value $content -NoNewline"
34
+ echo ^_^ API Key 已清空
35
+ ) else (
36
+ echo [!] 未找到 settings.json,首次运行后会自动创建
37
+ )
38
+
39
+ echo.
40
+ echo ========================================
41
+ echo 修复完成!请重启 LTX Desktop
42
+ echo ========================================
43
+ pause
LTX2.3/API issues-API问题办法.txt ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1. 复制LTX桌面版的快捷方式到LTX_Shortcut
2
+
3
+ 2. 运行run.bat
4
+ ----
5
+ 1. Copy the LTX desktop shortcut to LTX_Shortcut
6
+
7
+ 2. Run run.bat
8
+ ----
9
+
10
+
11
+
12
+ 【问题描述 / Problem】
13
+ 系统强制使用 FAL API 生成图片,即使本地有 GPU 可用。
14
+ System forces FAL API generation even when local GPU is available.
15
+
16
+ 【原因 / Cause】
17
+ LTX 强制要求 GPU 有 31GB VRAM 才会使用本地显卡,低于此值会强制走 API 模式。
18
+ LTX requires 31GB VRAM to use local GPU. Below this, it forces API mode.
19
+
20
+ ================================================================================
21
+ 【修复方法 / Fix Method】
22
+ ================================================================================
23
+
24
+ 运行: API issues.bat.bat (以管理员身份)
25
+ Run: API issues.bat.bat (as Administrator)
26
+
27
+ ================================================================================
28
+ ================================================================================
29
+
30
+ 【或者手动 / Or Manual】
31
+
32
+ 1. 修改 VRAM 阈值 / Modify VRAM Threshold
33
+ 文件路径 / File: C:\Program Files\LTX Desktop\resources\backend\runtime_config\runtime_policy.py
34
+ 第16行 / Line 16:
35
+ 原 / Original: return vram_gb < 31
36
+ 改为 / Change: return vram_gb < 6
37
+
38
+ 2. 清空 API Key / Clear API Key
39
+ 文件路径 / File: C:\Users\<用户名>\AppData\Local\LTXDesktop\settings.json
40
+ 原 / Original: "fal_api_key": "xxxxx"
41
+ 改为 / Change: "fal_api_key": ""
42
+
43
+ 【说明 / Note】
44
+ - VRAM 阈值改为 6GB,意味着 6GB 及以上显存都会使用本地显卡
45
+ - VRAM threshold set to 6GB means 6GB+ VRAM will use local GPU
46
+ - 清空 fal_api_key 避免系统误判为已配置 API
47
+ - Clear fal_api_key to avoid system thinking API is configured
48
+ - 修改后重启程序即可生效
49
+ - Restart LTX Desktop after changes
50
+ ================================================================================
LTX2.3/LTX_Shortcut/LTX Desktop.lnk ADDED
Binary file (1.94 kB). View file
 
LTX2.3/UI/index.css ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :root {
2
+ --accent: #2563EB; /* Refined blue – not too bright, not purple */
3
+ --accent-hover:#3B82F6;
4
+ --accent-dim: rgba(37,99,235,0.14);
5
+ --accent-ring: rgba(37,99,235,0.35);
6
+ --bg: #111113;
7
+ --panel: #18181B;
8
+ --panel-2: #1F1F23;
9
+ --item: rgba(255,255,255,0.035);
10
+ --border: rgba(255,255,255,0.08);
11
+ --border-2: rgba(255,255,255,0.05);
12
+ --text-dim: #71717A;
13
+ --text-sub: #A1A1AA;
14
+ --text: #FAFAFA;
15
+ }
16
+
17
+ * { box-sizing: border-box; -webkit-font-smoothing: antialiased; min-width: 0; }
18
+ body {
19
+ background: var(--bg); margin: 0; color: var(--text);
20
+ font-family: -apple-system, "SF Pro Display", "Segoe UI", sans-serif;
21
+ display: flex; height: 100vh; overflow: hidden;
22
+ font-size: 13px; line-height: 1.5;
23
+ }
24
+
25
+ .sidebar {
26
+ width: 460px; min-width: 460px;
27
+ background: var(--panel);
28
+ border-right: 1px solid var(--border);
29
+ display: flex; flex-direction: column; z-index: 20;
30
+ overflow-y: auto; overflow-x: hidden;
31
+ }
32
+
33
+ /* Scrollbar */
34
+ ::-webkit-scrollbar { width: 5px; height: 5px; }
35
+ ::-webkit-scrollbar-track { background: transparent; }
36
+ ::-webkit-scrollbar-thumb { background: rgba(255,255,255,0.08); border-radius: 10px; }
37
+ ::-webkit-scrollbar-thumb:hover { background: rgba(255,255,255,0.18); }
38
+
39
+ .sidebar-header { padding: 24px 24px 4px; }
40
+ .sidebar-section { padding: 8px 24px 18px; border-bottom: 1px solid var(--border); }
41
+
42
+ .setting-group {
43
+ background: rgba(255,255,255,0.025);
44
+ border: 1px solid var(--border-2);
45
+ border-radius: 10px;
46
+ padding: 14px;
47
+ margin-bottom: 12px;
48
+ }
49
+ .group-title {
50
+ font-size: 10px; color: var(--text-dim); font-weight: 700;
51
+ text-transform: uppercase; letter-spacing: 0.7px;
52
+ margin-bottom: 12px;
53
+ padding-bottom: 5px;
54
+ border-bottom: 1px solid var(--border-2);
55
+ }
56
+
57
+ /* Mode Tabs */
58
+ .tabs {
59
+ display: flex; gap: 4px; margin-bottom: 14px;
60
+ background: rgba(255,255,255,0.04);
61
+ padding: 4px; border-radius: 10px;
62
+ border: 1px solid var(--border-2);
63
+ }
64
+ .tab {
65
+ flex: 1; padding: 9px 0; text-align: center; border-radius: 7px;
66
+ cursor: pointer; font-size: 12px; color: var(--text-dim);
67
+ transition: all 0.2s; font-weight: 600;
68
+ display: flex; align-items: center; justify-content: center;
69
+ }
70
+ .tab.active { background: var(--accent); color: #fff; box-shadow: 0 1px 6px rgba(10,132,255,0.45); }
71
+ .tab:hover:not(.active) { background: rgba(255,255,255,0.06); color: var(--text); }
72
+
73
+ .label-group { display: flex; justify-content: space-between; align-items: center; margin-bottom: 6px; }
74
+ label { display: block; font-size: 11px; color: var(--text-dim); font-weight: 600; text-transform: uppercase; letter-spacing: 0.5px; margin-bottom: 6px; }
75
+ .val-badge { font-size: 11px; color: var(--accent); font-family: "SF Mono", ui-monospace, monospace; font-weight: 600; }
76
+
77
+ input[type="text"], input[type="number"], select, textarea {
78
+ width: 100%; background: var(--panel-2);
79
+ border: 1px solid var(--border);
80
+ border-radius: 7px; color: var(--text);
81
+ padding: 8px 11px; font-size: 12.5px; outline: none; margin-bottom: 9px;
82
+ /* Only transition border/shadow – NOT background-image to prevent arrow flicker */
83
+ transition: border-color 0.15s, box-shadow 0.15s;
84
+ }
85
+ input:focus, select:focus, textarea:focus {
86
+ border-color: var(--accent);
87
+ box-shadow: 0 0 0 2px var(--accent-ring);
88
+ }
89
+ select {
90
+ -webkit-appearance: none; -moz-appearance: none; appearance: none;
91
+ /* Stable grey arrow – no background shorthand so it won't animate */
92
+ background-color: var(--panel-2);
93
+ background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 24 24' fill='none' stroke='%2371717A' stroke-width='2.5' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6 9 12 15 18 9'/%3E%3C/svg%3E");
94
+ background-repeat: no-repeat;
95
+ background-position: right 10px center;
96
+ background-size: 12px;
97
+ padding-right: 28px;
98
+ cursor: pointer;
99
+ /* Explicitly do NOT transition background properties */
100
+ transition: border-color 0.15s, box-shadow 0.15s;
101
+ }
102
+ select:focus { background-color: var(--panel-2); }
103
+ select option { background: #27272A; color: var(--text); }
104
+ textarea { resize: vertical; min-height: 78px; font-family: inherit; }
105
+
106
+ .slider-container { display: flex; align-items: center; gap: 12px; margin-bottom: 14px; }
107
+ input[type="range"] { flex: 1; accent-color: var(--accent); height: 4px; cursor: pointer; border-radius: 2px; }
108
+
109
+ .upload-zone {
110
+ border: 1px dashed var(--border); border-radius: 10px;
111
+ padding: 18px 10px; text-align: center; cursor: pointer;
112
+ background: rgba(255,255,255,0.01); margin-bottom: 10px; position: relative;
113
+ transition: all 0.2s;
114
+ }
115
+ .upload-zone:hover, .upload-zone.dragover { background: var(--accent-dim); border-color: var(--accent); }
116
+ .upload-icon { font-size: 18px; margin-bottom: 6px; opacity: 0.45; }
117
+ .upload-text { font-size: 11px; color: var(--text); }
118
+ .upload-hint { font-size: 10px; color: var(--text-dim); margin-top: 3px; }
119
+ .preview-thumb { width: 100%; height: auto; max-height: 100px; object-fit: contain; border-radius: 8px; display: none; margin-top: 10px; }
120
+ .clear-img-overlay {
121
+ position: absolute; top: 8px; right: 8px; background: rgba(255,59,48,0.85); color: white;
122
+ width: 20px; height: 20px; border-radius: 10px; display: none; align-items: center; justify-content: center;
123
+ font-size: 11px; cursor: pointer; z-index: 5;
124
+ }
125
+
126
+ .btn-outline {
127
+ background: var(--panel-2);
128
+ border: 1px solid var(--border);
129
+ color: var(--text-sub); padding: 5px 12px; border-radius: 7px;
130
+ font-size: 11.5px; font-weight: 600; cursor: pointer;
131
+ transition: background 0.15s, border-color 0.15s, color 0.15s;
132
+ display: inline-flex; align-items: center; justify-content: center; gap: 5px;
133
+ white-space: nowrap;
134
+ }
135
+ .btn-outline:hover:not(:disabled) { background: rgba(255,255,255,0.08); color: var(--text); border-color: rgba(255,255,255,0.18); }
136
+ .btn-outline:active { opacity: 0.7; }
137
+ .btn-outline:disabled { opacity: 0.3; cursor: not-allowed; }
138
+
139
+ .btn-icon {
140
+ padding: 5px; background: transparent; border: none; color: var(--text-dim);
141
+ border-radius: 6px; cursor: pointer; display: flex; align-items: center; justify-content: center;
142
+ transition: color 0.15s, background 0.15s;
143
+ }
144
+ .btn-icon:hover { color: var(--text-sub); background: rgba(255,255,255,0.07); }
145
+
146
+ .btn-primary {
147
+ width: 100%; padding: 13px;
148
+ background: var(--accent); border: none;
149
+ border-radius: 9px; color: #fff; font-weight: 700; font-size: 13.5px;
150
+ letter-spacing: 0.2px; cursor: pointer; margin-top: 14px;
151
+ transition: background 0.15s;
152
+ }
153
+ .btn-primary:hover:not(:disabled) { background: var(--accent-hover); }
154
+ .btn-primary:active { opacity: 0.82; }
155
+ .btn-primary:disabled { background: rgba(255,255,255,0.08); color: var(--text-dim); cursor: not-allowed; }
156
+
157
+ .btn-danger {
158
+ width: 100%; padding: 12px; background: #DC2626; border: none;
159
+ border-radius: 9px; color: #fff; font-weight: 700; font-size: 13.5px;
160
+ cursor: pointer; margin-top: 8px; display: none; transition: background 0.15s;
161
+ }
162
+ .btn-danger:hover { background: #EF4444; }
163
+
164
+ /* Workspace */
165
+ .workspace { flex: 1; display: flex; flex-direction: column; background: #0A0A0A; position: relative; overflow: hidden; }
166
+ .viewer { flex: 2; display: flex; align-items: center; justify-content: center; padding: 16px; background: #0A0A0A; position: relative; min-height: 40vh; }
167
+ .monitor {
168
+ width: 100%; height: 100%; max-width: 1650px; border-radius: 10px; border: 1px solid var(--border);
169
+ overflow: hidden; position: relative; background: #070707;
170
+ display: flex; align-items: center; justify-content: center;
171
+ background-image: radial-gradient(rgba(255,255,255,0.02) 1px, transparent 1px);
172
+ background-size: 18px 18px;
173
+ }
174
+ .monitor img, .monitor video {
175
+ width: auto; height: auto; max-width: 100%; max-height: 100%;
176
+ object-fit: contain; display: none; z-index: 2; border-radius: 3px;
177
+ }
178
+
179
+ .progress-container { position: absolute; bottom: 0; left: 0; width: 100%; height: 2px; background: var(--border-2); z-index: 10; }
180
+ #progress-fill { width: 0%; height: 100%; background: var(--accent); transition: width 0.5s; }
181
+ #loading-txt { font-size: 12px; color: var(--text-sub); font-weight: 600; z-index: 5; position: absolute; display: none; }
182
+
183
+
184
+
185
+ .spinner {
186
+ width: 12px; height: 12px;
187
+ border: 2px solid rgba(255,255,255,0.2);
188
+ border-top-color: currentColor;
189
+ border-radius: 50%;
190
+ animation: spin 1s linear infinite;
191
+ }
192
+ @keyframes spin { to { transform: rotate(360deg); } }
193
+
194
+ .loading-card {
195
+ display: flex; align-items: center; justify-content: center;
196
+ flex-direction: column; gap: 6px; color: var(--text-dim); font-size: 10px;
197
+ background: rgba(37,99,235,0.07) !important;
198
+ border-color: rgba(37,99,235,0.3) !important;
199
+ }
200
+ .loading-card .spinner { width: 28px; height: 28px; border-width: 3px; color: var(--accent); }
201
+ .loading-card:hover { background: rgba(37,99,235,0.14) !important; border-color: var(--accent) !important; }
202
+
203
+ .library { flex: 1.5; border-top: 1px solid var(--border); padding: 14px 20px; display: flex; flex-direction: column; background: #0F0F11; overflow-y: hidden; }
204
+ #log-container { flex: 1; overflow-y: auto; padding-right: 4px; }
205
+ #log { font-family: ui-monospace, "SF Mono", monospace; font-size: 10.5px; color: var(--text-dim); line-height: 1.7; }
206
+
207
+ /* History wrapper: scrollable area for thumbnails only */
208
+ #history-wrapper {
209
+ flex: 1;
210
+ overflow-y: auto;
211
+ min-height: 110px; /* always show at least one row */
212
+ padding-right: 4px;
213
+ }
214
+ #history-container {
215
+ display: grid;
216
+ grid-template-columns: repeat(auto-fill, minmax(150px, 1fr));
217
+ justify-content: start;
218
+ gap: 10px; align-content: flex-start;
219
+ padding-bottom: 4px;
220
+ }
221
+ /* Pagination row: hidden, using infinite scroll instead */
222
+ #pagination-bar {
223
+ display: none;
224
+ }
225
+
226
+ .history-card {
227
+ width: 100%; max-width: 200px; aspect-ratio: 16 / 9;
228
+ background: #1A1A1E; border-radius: 7px;
229
+ overflow: hidden; border: 1px solid var(--border);
230
+ cursor: pointer; position: relative; transition: border-color 0.15s, transform 0.15s;
231
+ }
232
+ .history-card:hover { border-color: var(--accent); transform: translateY(-1px); }
233
+ .history-card img, .history-card video { width: 100%; height: 100%; object-fit: cover; }
234
+ .history-type-badge {
235
+ position: absolute; top: 5px; left: 5px; font-size: 8px; padding: 1px 5px; border-radius: 3px;
236
+ background: rgba(0,0,0,0.8); color: var(--text-sub); border: 1px solid rgba(255,255,255,0.06);
237
+ z-index: 2; font-weight: 700; letter-spacing: 0.4px;
238
+ }
239
+ .history-delete-btn {
240
+ position: absolute; top: 5px; right: 5px; width: 20px; height: 20px;
241
+ border-radius: 50%; border: none; background: rgba(255,50,50,0.8); color: #fff;
242
+ font-size: 10px; cursor: pointer; z-index: 3; display: flex; align-items: center; justify-content: center;
243
+ opacity: 0; transition: opacity 0.2s;
244
+ }
245
+ .history-card:hover .history-delete-btn { opacity: 1; }
246
+ .history-delete-btn:hover { background: rgba(255,0,0,0.9); }
247
+
248
+ .vram-bar { width: 160px; height: 5px; background: rgba(255,255,255,0.08); border-radius: 999px; overflow: hidden; display: inline-block; vertical-align: middle; }
249
+ .vram-used { height: 100%; background: var(--accent); width: 0%; transition: width 0.5s; }
250
+
251
+ .sub-mode-toggle { display: flex; background: var(--panel-2); border-radius: 7px; padding: 3px; border: 1px solid var(--border); }
252
+ .sub-mode-btn { flex: 1; padding: 6px 0; border-radius: 5px; border: none; background: transparent; font-size: 11.5px; color: var(--text-dim); font-weight: 600; cursor: pointer; transition: background 0.15s, color 0.15s; }
253
+ .sub-mode-btn.active { background: var(--accent); color: #fff; }
254
+ .sub-mode-btn:hover:not(.active) { background: rgba(255,255,255,0.05); color: var(--text-sub); }
255
+
256
+ .vid-section { display: none; margin-top: 12px; }
257
+ .vid-section.active-section { display: block; animation: fadeIn 0.25s ease; }
258
+ @keyframes fadeIn { from { opacity: 0; transform: translateY(4px); } to { opacity: 1; transform: translateY(0); } }
259
+
260
+ /* Status indicator */
261
+ @keyframes breathe-orange {
262
+ 0%,100% { box-shadow: 0 0 4px #FF9F0A; opacity: 0.7; }
263
+ 50% { box-shadow: 0 0 10px #FF9F0A; opacity: 1; }
264
+ }
265
+ .indicator-busy { background: #FF9F0A !important; animation: breathe-orange 1.6s infinite ease-in-out !important; box-shadow: none !important; transition: all 0.3s; }
266
+ .indicator-ready { background: #30D158 !important; box-shadow: 0 0 8px rgba(48,209,88,0.6) !important; animation: none !important; transition: all 0.3s; }
267
+ .indicator-offline { background: #636366 !important; box-shadow: none !important; animation: none !important; transition: all 0.3s; }
268
+
269
+ .res-preview-tag { font-size: 11px; color: var(--accent); margin-bottom: 10px; font-family: ui-monospace, monospace; }
270
+ .top-status { display: flex; justify-content: space-between; font-size: 12px; color: var(--text-dim); margin-bottom: 8px; align-items: center; }
271
+ .checkbox-container { display: flex; align-items: center; gap: 8px; cursor: pointer; background: rgba(255,255,255,0.02); padding: 10px; border-radius: 8px; border: 1px solid var(--border-2); }
272
+ .checkbox-container input { width: 15px; height: 15px; accent-color: var(--accent); cursor: pointer; margin: 0; }
273
+ .checkbox-container label { margin-bottom: 0; cursor: pointer; text-transform: none; color: var(--text); }
274
+ .flex-row { display: flex; gap: 10px; }
275
+ .flex-1 { flex: 1; min-width: 0; }
276
+
277
+ @media (max-width: 1024px) {
278
+ body { flex-direction: column; overflow-y: auto; }
279
+ .sidebar { width: 100%; min-width: 100%; border-right: none; border-bottom: 1px solid var(--border); height: auto; overflow: visible; }
280
+ .workspace { height: auto; min-height: 100vh; overflow: visible; }
281
+ }
282
+ :root {
283
+ --plyr-color-main: #3F51B5;
284
+ --plyr-video-control-background-hover: rgba(255,255,255,0.1);
285
+ --plyr-control-radius: 6px;
286
+ --plyr-player-width: 100%;
287
+ }
288
+ .plyr {
289
+ border-radius: 8px;
290
+ overflow: hidden;
291
+ width: 100%;
292
+ height: 100%;
293
+ }
294
+ .plyr--video .plyr__controls {
295
+ background: linear-gradient(rgba(0,0,0,0), rgba(0,0,0,0.8));
296
+ padding: 20px 15px 15px 15px;
297
+ }
298
+
LTX2.3/UI/index.html ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="zh-CN">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>LTX-2 | Multi-GPU Cinematic Studio</title>
7
+ <link rel="stylesheet" href="index.css">
8
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/plyr/3.7.8/plyr.css" />
9
+ </head>
10
+ <body>
11
+
12
+ <aside class="sidebar">
13
+ <div class="sidebar-header">
14
+ <div style="display: flex; align-items: center; justify-content: space-between; margin-bottom: 12px;">
15
+ <div style="display: flex; align-items: center; gap: 10px;">
16
+ <div id="sys-indicator" class="indicator-ready" style="width: 12px; height: 12px; border-radius: 50%;"></div>
17
+ <span style="font-weight: 800; font-size: 18px;">LTX-2 STUDIO</span>
18
+ </div>
19
+ <div style="display: flex; gap: 8px; align-items: center;">
20
+ <button id="langBtn" onclick="toggleLang()" class="btn-icon" title="切换语言">
21
+ <svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><circle cx="12" cy="12" r="10"></circle><line x1="2" y1="12" x2="22" y2="12"></line><path d="M12 2a15.3 15.3 0 0 1 4 10 15.3 15.3 0 0 1-4 10 15.3 15.3 0 0 1-4-10 15.3 15.3 0 0 1 4-10z"></path></svg>
22
+ </button>
23
+ <button id="clearGpuBtn" onclick="clearGpu()" class="btn-outline" data-lang-zh="释放显存" data-lang-en="Clear VRAM">释放显存</button>
24
+ </div>
25
+ </div>
26
+
27
+ <div class="top-status" style="margin-bottom: 5px;">
28
+ <div style="display: flex; align-items: center; gap: 8px;">
29
+ <span id="sys-status" style="font-weight:bold; color: var(--text-dim); font-size: 12px;" data-lang-zh="正在扫描 GPU..." data-lang-en="Scanning GPU...">正在扫描 GPU...</span>
30
+ </div>
31
+
32
+ <button onclick="const el = document.getElementById('sys-settings'); el.style.display = el.style.display === 'none' ? 'block' : 'none';" class="btn-icon" title="系统高级设置">
33
+ <svg width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><circle cx="12" cy="12" r="3"></circle><path d="M19.4 15a1.65 1.65 0 0 0 .33 1.82l.06.06a2 2 0 0 1 0 2.83 2 2 0 0 1-2.83 0l-.06-.06a1.65 1.65 0 0 0-1.82-.33 1.65 1.65 0 0 0-1 1.51V21a2 2 0 0 1-2 2 2 2 0 0 1-2-2v-.09A1.65 1.65 0 0 0 9 19.4a1.65 1.65 0 0 0-1.82.33l-.06.06a2 2 0 0 1-2.83 0 2 2 0 0 1 0-2.83l.06-.06a1.65 1.65 0 0 0 .33-1.82 1.65 1.65 0 0 0-1.51-1H3a2 2 0 0 1-2-2 2 2 0 0 1 2-2h.09A1.65 1.65 0 0 0 4.6 9a1.65 1.65 0 0 0-.33-1.82l-.06-.06a2 2 0 0 1 0-2.83 2 2 0 0 1 2.83 0l.06.06a1.65 1.65 0 0 0 1.82.33H9a1.65 1.65 0 0 0 1-1.51V3a2 2 0 0 1 2-2 2 2 0 0 1 2 2v.09a1.65 1.65 0 0 0 1 1.51 1.65 1.65 0 0 0 1.82-.33l.06-.06a2 2 0 0 1 2.83 0 2 2 0 0 1 0 2.83l-.06.06a1.65 1.65 0 0 0-.33 1.82V9a1.65 1.65 0 0 0 1.51 1H21a2 2 0 0 1 2 2 2 2 0 0 1-2 2h-.09a1.65 1.65 0 0 0-1.51 1z"></path></svg>
34
+ </button>
35
+
36
+ </div>
37
+
38
+ <div style="font-size: 11px; color: var(--text-dim); margin-bottom: 20px; display: flex; align-items: center; width: 100%;">
39
+ <div class="vram-bar" style="width: 120px; min-width: 120px; margin-top: 0; margin-right: 12px;"><div class="vram-used" id="vram-fill"></div></div>
40
+ <span id="vram-text" style="font-variant-numeric: tabular-nums; flex-shrink: 0; text-align: right;">0/32 GB</span>
41
+ <span id="gpu-name" style="display: none;"></span>
42
+ </div>
43
+
44
+ <div id="sys-settings" style="display: none; padding: 14px; background: rgba(0,0,0,0.4) !important; border-radius: 12px; border: 1px solid rgba(255,255,255,0.1); margin-bottom: 15px; box-shadow: 0 4px 15px rgba(0,0,0,0.5); backdrop-filter: blur(10px);">
45
+ <div style="font-size: 13px; font-weight: bold; margin-bottom: 12px; color: #fff;" data-lang-zh="高级设置" data-lang-en="Advanced Settings">高级设置</div>
46
+
47
+ <label style="font-size: 11px; margin-bottom: 6px;" data-lang-zh="工作设备" data-lang-en="GPU Device">工作设备</label>
48
+ <select id="gpu-selector" onchange="switchGpu(this.value)" style="margin-bottom: 12px; font-size: 11px; padding: 6px;">
49
+ <option value="" data-lang-zh="正在检测 GPU..." data-lang-en="Detecting GPU...">正在检测 GPU...</option>
50
+ </select>
51
+
52
+ <label style="font-size: 11px; margin-bottom: 6px;" data-lang-zh="输出路径" data-lang-en="Output Path">输出路径</label>
53
+ <div style="display:flex; gap:6px;">
54
+ <input type="text" id="global-out-dir" onchange="setOutputDir()" placeholder="默认: LTXDesktop/outputs" style="margin-bottom: 0; padding: 6px 8px; font-size: 11px;">
55
+ <button class="btn-outline" style="padding: 6px;" onclick="browseOutputDir()" title="浏览本地目录">
56
+ <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M22 19a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V5a2 2 0 0 1 2-2h5l2 3h9a2 2 0 0 1 2 2z"></path></svg>
57
+ </button>
58
+ </div>
59
+ <div style="font-size: 10px; color: var(--text-dim); margin-top: 8px; line-height: 1.4;" data-lang-zh="系统默认会在 C 盘保留输出文件。您可以浏览更换目录。" data-lang-en="Default output in C drive. Browse to change path.">系统默认会在 C 盘保留输出文件。您可以浏览更换目录。</div>
60
+ </div>
61
+ </div>
62
+
63
+ <div class="sidebar-section">
64
+ <div class="tabs">
65
+ <div id="tab-video" class="tab active" onclick="switchMode('video')">
66
+ <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" style="margin-right: 6px;"><rect x="2" y="2" width="20" height="20" rx="2.18" ry="2.18"></rect><line x1="7" y1="2" x2="7" y2="22"></line><line x1="17" y1="2" x2="17" y2="22"></line><line x1="2" y1="12" x2="22" y2="12"></line><line x1="2" y1="7" x2="7" y2="7"></line><line x1="2" y1="17" x2="7" y2="17"></line><line x1="17" y1="17" x2="22" y2="17"></line><line x1="17" y1="7" x2="22" y2="7"></line></svg>
67
+ <span data-lang-zh="视频生成" data-lang-en="Video">视频生成</span>
68
+ </div>
69
+ <div id="tab-upscale" class="tab" onclick="switchMode('upscale')">
70
+ <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" style="margin-right: 6px;"><polygon points="13 2 3 14 12 14 11 22 21 10 12 10 13 2"></polygon></svg>
71
+ <span data-lang-zh="视频增强" data-lang-en="Upscale">视频增强</span>
72
+ </div>
73
+ <div id="tab-image" class="tab" onclick="switchMode('image')">
74
+ <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" style="margin-right: 6px;"><rect x="3" y="3" width="18" height="18" rx="2" ry="2"></rect><circle cx="8.5" cy="8.5" r="1.5"></circle><polyline points="21 15 16 10 5 21"></polyline></svg>
75
+ <span data-lang-zh="图像生成" data-lang-en="Image">图像生成</span>
76
+ </div>
77
+ </div>
78
+
79
+ <label data-lang-zh="视觉描述词" data-lang-en="Prompt">视觉描述词</label>
80
+ <textarea id="prompt" placeholder="在此输入视觉描述词..." style="height: 90px; margin-bottom: 0;"></textarea>
81
+ </div>
82
+
83
+ <!-- 视频模式选项 -->
84
+ <div class="sidebar-section" id="video-opts" style="display:none">
85
+ <div class="setting-group">
86
+ <div class="group-title" data-lang-zh="基础设置" data-lang-en="Basic Settings">基础设置</div>
87
+ <div class="flex-row">
88
+ <div class="flex-1">
89
+ <label data-lang-zh="清晰度" data-lang-en="Quality">清晰度</label>
90
+ <select id="vid-quality" onchange="updateResPreview()">
91
+ <option value="1080">1080P Full HD</option>
92
+ <option value="720" selected>720P Standard</option>
93
+ <option value="576">576P Preview</option>
94
+ </select>
95
+ </div>
96
+ <div class="flex-1">
97
+ <label data-lang-zh="画幅比例" data-lang-en="Aspect Ratio">画幅比例</label>
98
+ <select id="vid-ratio" onchange="updateResPreview()">
99
+ <option value="16:9">16:9</option>
100
+ <option value="9:16">9:16</option>
101
+ </select>
102
+ </div>
103
+ </div>
104
+ <div id="res-preview" class="res-preview-tag" style="margin-top: -5px; margin-bottom: 12px;">最终发送: 1280x720</div>
105
+
106
+ <div class="flex-row">
107
+ <div class="flex-1">
108
+ <label data-lang-zh="帧率" data-lang-en="FPS">帧率</label>
109
+ <select id="vid-fps">
110
+ <option value="24" selected>24 FPS</option>
111
+ <option value="25">25 FPS</option>
112
+ <option value="30">30 FPS</option>
113
+ <option value="48">48 FPS</option>
114
+ <option value="60">60 FPS</option>
115
+ </select>
116
+ </div>
117
+ <div class="flex-1">
118
+ <label data-lang-zh="时长 (秒)" data-lang-en="Duration (sec)">时长 (秒)</label>
119
+ <input type="number" id="vid-duration" value="5" min="1" max="30" step="1">
120
+ </div>
121
+ </div>
122
+ </div>
123
+
124
+ <div class="setting-group">
125
+ <div class="group-title" data-lang-zh="镜头与音频" data-lang-en="Camera & Audio">镜头与音频</div>
126
+ <label data-lang-zh="镜头运动" data-lang-en="Camera Motion">镜头运动</label>
127
+ <select id="vid-motion">
128
+ <option value="static" selected>Static (静止机位)</option>
129
+ <option value="dolly_in">Dolly In (推近)</option>
130
+ <option value="dolly_out">Dolly Out (拉远)</option>
131
+ <option value="dolly_left">Dolly Left (向左)</option>
132
+ <option value="dolly_right">Dolly Right (向右)</option>
133
+ <option value="jib_up">Jib Up (升臂)</option>
134
+ <option value="jib_down">Jib Down (降臂)</option>
135
+ <option value="focus_shift">Focus Shift (焦点)</option>
136
+ </select>
137
+ <div class="checkbox-container">
138
+ <input type="checkbox" id="vid-audio" checked>
139
+ <label for="vid-audio" data-lang-zh="生成 AI 环境音" data-lang-en="Generate AI Audio">生成 AI 环境音</label>
140
+ </div>
141
+ </div>
142
+
143
+ <!-- 生成媒介组 -->
144
+ <div class="setting-group" id="video-source-group">
145
+ <div class="group-title" data-lang-zh="生成媒介" data-lang-en="Source">生成媒介</div>
146
+
147
+ <div class="flex-row" style="margin-bottom: 10px;">
148
+ <div class="flex-1">
149
+ <label data-lang-zh="起始帧" data-lang-en="Start Frame">起始帧</label>
150
+ <div class="upload-zone" id="start-frame-drop-zone" onclick="document.getElementById('start-frame-input').click()">
151
+ <div class="clear-img-overlay" id="clear-start-frame-overlay" onclick="event.stopPropagation(); clearFrame('start')">×</div>
152
+ <div id="start-frame-placeholder">
153
+ <div class="upload-icon">🖼️</div>
154
+ <div class="upload-text" data-lang-zh="上传首帧" data-lang-en="Upload">上传首帧</div>
155
+ </div>
156
+ <img id="start-frame-preview" class="preview-thumb">
157
+ <input type="file" id="start-frame-input" accept="image/*" style="display:none" onchange="handleFrameUpload(this.files[0], 'start')">
158
+ </div>
159
+ <input type="hidden" id="start-frame-path">
160
+ </div>
161
+ <div class="flex-1">
162
+ <label data-lang-zh="结束帧" data-lang-en="End Frame">结束帧</label>
163
+ <div class="upload-zone" id="end-frame-drop-zone" onclick="document.getElementById('end-frame-input').click()">
164
+ <div class="clear-img-overlay" id="clear-end-frame-overlay" onclick="event.stopPropagation(); clearFrame('end')">×</div>
165
+ <div id="end-frame-placeholder">
166
+ <div class="upload-icon">🏁</div>
167
+ <div class="upload-text" data-lang-zh="上传尾帧" data-lang-en="Upload">上传尾帧</div>
168
+ </div>
169
+ <img id="end-frame-preview" class="preview-thumb">
170
+ <input type="file" id="end-frame-input" accept="image/*" style="display:none" onchange="handleFrameUpload(this.files[0], 'end')">
171
+ </div>
172
+ <input type="hidden" id="end-frame-path">
173
+ </div>
174
+ </div>
175
+
176
+ <div class="flex-row">
177
+ <div class="flex-1">
178
+ <label data-lang-zh="参考音频" data-lang-en="Audio">参考音频</label>
179
+ <div class="upload-zone" id="audio-drop-zone" onclick="document.getElementById('vid-audio-input').click()">
180
+ <div class="clear-img-overlay" id="clear-audio-overlay" onclick="event.stopPropagation(); clearUploadedAudio()">×</div>
181
+ <div id="audio-upload-placeholder">
182
+ <div class="upload-icon">🎵</div>
183
+ <div class="upload-text" data-lang-zh="点击上传音频" data-lang-en="Upload">点击上传音频</div>
184
+ </div>
185
+ <div id="audio-upload-status" style="display:none;">
186
+ <div class="upload-icon" style="color:var(--accent); opacity:1;">✔️</div>
187
+ <div id="audio-filename-status" class="upload-text"></div>
188
+ </div>
189
+ <input type="file" id="vid-audio-input" accept="audio/*" style="display:none" onchange="handleAudioUpload(this.files[0])">
190
+ </div>
191
+ <input type="hidden" id="uploaded-audio-path">
192
+ </div>
193
+ </div>
194
+ <div style="font-size: 10px; color: var(--text-dim); text-align: center; margin-top: 5px;" data-lang-zh="💡 仅首帧=图生视频,首尾帧=插帧" data-lang-en="💡 Start only=Img2Vid, Both=Interpolation">
195
+ 💡 仅首帧=图生视频,首尾帧=插帧
196
+ </div>
197
+ </div>
198
+ </div>
199
+
200
+ <!-- 图像模式选项 -->
201
+ <div id="image-opts" class="sidebar-section" style="display:none">
202
+ <label data-lang-zh="分辨率预设" data-lang-en="Resolution">分辨率预设</label>
203
+ <select id="img-res-preset" onchange="applyImgPreset(this.value)">
204
+ <option value="1024x1024">1:1 (1024x1024)</option>
205
+ <option value="1280x720">16:9 (1280x720)</option>
206
+ <option value="720x1280">9:16 (720x1280)</option>
207
+ <option value="custom">Custom</option>
208
+ </select>
209
+
210
+ <div id="img-custom-res" class="flex-row" style="margin-top: 10px;">
211
+ <div class="flex-1"><label data-lang-zh="宽度" data-lang-en="Width">宽度</label><input type="number" id="img-w" value="1024" onchange="updateImgResPreview()"></div>
212
+ <div class="flex-1"><label data-lang-zh="高度" data-lang-en="Height">高度</label><input type="number" id="img-h" value="1024" onchange="updateImgResPreview()"></div>
213
+ </div>
214
+ <div id="img-res-preview" class="res-preview-tag">最终发送: 1024x1024</div>
215
+
216
+ <div class="label-group" style="margin-top: 15px;">
217
+ <label data-lang-zh="采样步数" data-lang-en="Steps">采样步数</label>
218
+ <span class="val-badge" id="stepsVal">28</span>
219
+ </div>
220
+ <div class="slider-container">
221
+ <input type="range" id="img-steps" min="1" max="50" value="28" oninput="document.getElementById('stepsVal').innerText=this.value">
222
+ </div>
223
+ </div>
224
+
225
+ <!-- 超分模式选项 -->
226
+ <div id="upscale-opts" class="sidebar-section" style="display:none">
227
+ <div class="setting-group">
228
+ <label data-lang-zh="待超分视频" data-lang-en="Video">待超分视频</label>
229
+ <div class="upload-zone" id="upscale-drop-zone" onclick="document.getElementById('upscale-video-input').click()" style="margin-bottom: 0;">
230
+ <div class="clear-img-overlay" id="clear-upscale-overlay" onclick="event.stopPropagation(); clearUpscaleVideo()">×</div>
231
+ <div id="upscale-placeholder">
232
+ <div class="upload-icon">📹</div>
233
+ <div class="upload-text" data-lang-zh="拖入视频" data-lang-en="Drop Video">拖入视频</div>
234
+ </div>
235
+ <div id="upscale-status" style="display:none;">
236
+ <div class="upload-icon" style="color:var(--accent); opacity:1;">✔️</div>
237
+ <div id="upscale-filename" class="upload-text"></div>
238
+ </div>
239
+ <input type="file" id="upscale-video-input" accept="video/*" style="display:none" onchange="handleUpscaleVideoUpload(this.files[0])">
240
+ </div>
241
+ <input type="hidden" id="upscale-video-path">
242
+ </div>
243
+
244
+ <div class="setting-group">
245
+ <label data-lang-zh="目标分辨率" data-lang-en="Target">目标分辨率</label>
246
+ <select id="upscale-res" style="margin-bottom: 0;">
247
+ <option value="1080p">1080P (2x)</option>
248
+ <option value="720p">720P</option>
249
+ </select>
250
+ </div>
251
+ </div>
252
+
253
+ <div style="padding: 0 30px 30px 30px;">
254
+ <button class="btn-primary" id="mainBtn" onclick="run()" data-lang-zh="开始渲染" data-lang-en="Generate">开始渲染</button>
255
+ </div>
256
+ </aside>
257
+
258
+ <main class="workspace">
259
+ <section class="viewer" id="viewer-section">
260
+ <div class="monitor" id="viewer">
261
+ <div id="loading-txt" data-lang-zh="等待分配渲染任务..." data-lang-en="Waiting for task...">等待分配渲染任务...</div>
262
+ <img id="res-img" src="">
263
+ <div id="video-wrapper" style="width:100%; height:100%; display:none; max-height:100%; align-items:center; justify-content:center;">
264
+ <video id="res-video" autoplay loop playsinline></video>
265
+ </div>
266
+ <div class="progress-container"><div id="progress-fill"></div></div>
267
+ </div>
268
+ </section>
269
+
270
+ <!-- Drag Handle -->
271
+ <div id="resize-handle" style="
272
+ height: 5px; background: transparent; cursor: row-resize;
273
+ flex-shrink: 0; position: relative; z-index: 50;
274
+ display: flex; align-items: center; justify-content: center;
275
+ " title="拖动调整面板高度">
276
+ <div style="width: 40px; height: 3px; background: var(--border); border-radius: 999px; pointer-events: none;"></div>
277
+ </div>
278
+
279
+ <section class="library" id="library-section">
280
+ <div style="display: flex; justify-content: space-between; margin-bottom: 15px; align-items: center; border-bottom: 1px solid var(--border); padding-bottom: 10px;">
281
+ <div style="display: flex; gap: 20px;">
282
+ <span id="tab-history" style="font-size: 11px; font-weight: 800; color: var(--accent); cursor: pointer; border-bottom: 2px solid var(--accent); padding-bottom: 11px; margin-bottom: -11px;" onclick="switchLibTab('history')" data-lang-zh="历史资产" data-lang-en="ASSETS">历史资产</span>
283
+ <span id="tab-log" style="font-size: 11px; font-weight: 800; color: var(--text-dim); cursor: pointer; border-bottom: 2px solid transparent; padding-bottom: 11px; margin-bottom: -11px;" onclick="switchLibTab('log')" data-lang-zh="系统日志" data-lang-en="LOGS">系统日志</span>
284
+ </div>
285
+ <button onclick="fetchHistory(currentHistoryPage)" style="background: var(--item); border: 1px solid var(--border); border-radius: 6px; color: var(--text-dim); font-size: 11px; padding: 4px 10px; cursor: pointer;" data-lang-zh="刷新" data-lang-en="Refresh">刷新</button>
286
+ </div>
287
+
288
+ <div id="log-container" style="display: none; flex: 1; flex-direction: column;">
289
+ <div id="log">> LTX-2 Studio Ready. Expecting commands...</div>
290
+ </div>
291
+
292
+ <div id="history-wrapper">
293
+ <div id="history-container"></div>
294
+ </div>
295
+ <div id="pagination-bar" style="display:none;"></div>
296
+ </section>
297
+ </main>
298
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/plyr/3.7.8/plyr.min.js"></script>
299
+ <script src="index.js"></script>
300
+
301
+ </body>
302
+ </html>
LTX2.3/UI/index.js ADDED
@@ -0,0 +1,1055 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // ─── Resizable panel drag logic ───────────────────────────────────────────────
2
+ (function() {
3
+ const handle = document.getElementById('resize-handle');
4
+ const viewer = document.getElementById('viewer-section');
5
+ const library = document.getElementById('library-section');
6
+ const workspace = document.querySelector('.workspace');
7
+ let dragging = false, startY = 0, startVH = 0;
8
+
9
+ handle.addEventListener('mousedown', (e) => {
10
+ dragging = true;
11
+ startY = e.clientY;
12
+ startVH = viewer.getBoundingClientRect().height;
13
+ document.body.style.cursor = 'row-resize';
14
+ document.body.style.userSelect = 'none';
15
+ handle.querySelector('div').style.background = 'var(--accent)';
16
+ e.preventDefault();
17
+ });
18
+ document.addEventListener('mousemove', (e) => {
19
+ if (!dragging) return;
20
+ const wsH = workspace.getBoundingClientRect().height;
21
+ const delta = e.clientY - startY;
22
+ let newVH = startVH + delta;
23
+ // Clamp: viewer min 150px, library min 100px
24
+ newVH = Math.max(150, Math.min(wsH - 100 - 5, newVH));
25
+ viewer.style.flex = 'none';
26
+ viewer.style.height = newVH + 'px';
27
+ library.style.flex = '1';
28
+ });
29
+ document.addEventListener('mouseup', () => {
30
+ if (dragging) {
31
+ dragging = false;
32
+ document.body.style.cursor = '';
33
+ document.body.style.userSelect = '';
34
+ handle.querySelector('div').style.background = 'var(--border)';
35
+ }
36
+ });
37
+ // Hover highlight
38
+ handle.addEventListener('mouseenter', () => { handle.querySelector('div').style.background = 'var(--text-dim)'; });
39
+ handle.addEventListener('mouseleave', () => { if (!dragging) handle.querySelector('div').style.background = 'var(--border)'; });
40
+ })();
41
+ // ──────────────────────────────────────────────────────────────────────────────
42
+
43
+
44
+
45
+
46
+
47
+
48
+ // 动态获取当前访问的域名或 IP,自动对齐 3000 端口
49
+ const BASE = `http://${window.location.hostname}:3000`;
50
+
51
+ let currentMode = 'image';
52
+ let pollInterval = null;
53
+ let isEnglish = false;
54
+
55
+ function toggleLang() {
56
+ isEnglish = !isEnglish;
57
+ const lang = isEnglish ? 'en' : 'zh';
58
+
59
+ document.querySelectorAll('[data-lang-zh]').forEach(el => {
60
+ if (el.tagName === 'INPUT' || el.tagName === 'TEXTAREA') {
61
+ el.placeholder = el.getAttribute('data-lang-' + lang) || el.placeholder;
62
+ } else {
63
+ el.textContent = el.getAttribute('data-lang-' + lang) || el.textContent;
64
+ }
65
+ });
66
+
67
+ document.getElementById('langBtn').title = isEnglish ? '切换语言' : 'Toggle Language';
68
+ }
69
+
70
+ // 建议增加一个简单的调试日志,方便在控制台确认地址是否正确
71
+ console.log("Connecting to Backend API at:", BASE);
72
+
73
+ // 分辨率自动计算逻辑
74
+ function updateResPreview() {
75
+ const q = document.getElementById('vid-quality').value; // "1080", "720", "544"
76
+ const r = document.getElementById('vid-ratio').value;
77
+
78
+ let resLabel = q === "1080" ? "1080p" : q === "720" ? "720p" : "576p";
79
+
80
+ let resDisplay;
81
+ if (r === "16:9") {
82
+ resDisplay = q === "1080" ? "1920x1080" : q === "720" ? "1280x720" : "1024x576";
83
+ } else {
84
+ resDisplay = q === "1080" ? "1080x1920" : q === "720" ? "720x1280" : "576x1024";
85
+ }
86
+
87
+ document.getElementById('res-preview').innerText = `最终发送规格: ${resLabel} (${resDisplay})`;
88
+ return resLabel;
89
+ }
90
+
91
+ // 图片分辨率预览
92
+ function updateImgResPreview() {
93
+ const w = document.getElementById('img-w').value;
94
+ const h = document.getElementById('img-h').value;
95
+ document.getElementById('img-res-preview').innerText = `最终发送规格: ${w}x${h}`;
96
+ }
97
+
98
+ // 切换图片预设分辨率
99
+ function applyImgPreset(val) {
100
+ if (val === "custom") {
101
+ document.getElementById('img-custom-res').style.display = 'flex';
102
+ } else {
103
+ const [w, h] = val.split('x');
104
+ document.getElementById('img-w').value = w;
105
+ document.getElementById('img-h').value = h;
106
+ updateImgResPreview();
107
+ // 隐藏自定义区域或保持显示供微调
108
+ // document.getElementById('img-custom-res').style.display = 'none';
109
+ }
110
+ }
111
+
112
+
113
+
114
+ // 处理帧图片上传
115
+ async function handleFrameUpload(file, frameType) {
116
+ if (!file) return;
117
+
118
+ const preview = document.getElementById(`${frameType}-frame-preview`);
119
+ const placeholder = document.getElementById(`${frameType}-frame-placeholder`);
120
+ const clearOverlay = document.getElementById(`clear-${frameType}-frame-overlay`);
121
+
122
+ const previewReader = new FileReader();
123
+ previewReader.onload = (e) => {
124
+ preview.src = e.target.result;
125
+ preview.style.display = 'block';
126
+ placeholder.style.display = 'none';
127
+ clearOverlay.style.display = 'flex';
128
+ };
129
+ previewReader.readAsDataURL(file);
130
+
131
+ const reader = new FileReader();
132
+ reader.onload = async (e) => {
133
+ const b64Data = e.target.result;
134
+ addLog(`正在上传 ${frameType === 'start' ? '起始帧' : '结束帧'}: ${file.name}...`);
135
+ try {
136
+ const res = await fetch(`${BASE}/api/system/upload-image`, {
137
+ method: 'POST',
138
+ headers: { 'Content-Type': 'application/json' },
139
+ body: JSON.stringify({ image: b64Data, filename: file.name })
140
+ });
141
+ const data = await res.json();
142
+ if (res.ok && data.path) {
143
+ document.getElementById(`${frameType}-frame-path`).value = data.path;
144
+ addLog(`✅ ${frameType === 'start' ? '起始帧' : '结束帧'}上传成功`);
145
+ } else {
146
+ throw new Error(data.error || data.detail || "上传失败");
147
+ }
148
+ } catch (e) {
149
+ addLog(`❌ 帧图片上传失败: ${e.message}`);
150
+ }
151
+ };
152
+ reader.readAsDataURL(file);
153
+ }
154
+
155
+ function clearFrame(frameType) {
156
+ document.getElementById(`${frameType}-frame-input`).value = "";
157
+ document.getElementById(`${frameType}-frame-path`).value = "";
158
+ document.getElementById(`${frameType}-frame-preview`).style.display = 'none';
159
+ document.getElementById(`${frameType}-frame-preview`).src = "";
160
+ document.getElementById(`${frameType}-frame-placeholder`).style.display = 'block';
161
+ document.getElementById(`clear-${frameType}-frame-overlay`).style.display = 'none';
162
+ addLog(`🧹 已清除${frameType === 'start' ? '起始帧' : '结束帧'}`);
163
+ }
164
+
165
+ // 处理图片上传
166
+ async function handleImageUpload(file) {
167
+ if (!file) return;
168
+
169
+ // 预览图片
170
+ const preview = document.getElementById('upload-preview');
171
+ const placeholder = document.getElementById('upload-placeholder');
172
+ const clearOverlay = document.getElementById('clear-img-overlay');
173
+
174
+ const previewReader = new FileReader();
175
+ preview.onload = () => {
176
+ preview.style.display = 'block';
177
+ placeholder.style.display = 'none';
178
+ clearOverlay.style.display = 'flex';
179
+ };
180
+ previewReader.onload = (e) => preview.src = e.target.result;
181
+ previewReader.readAsDataURL(file);
182
+
183
+ // 使用 FileReader 转换为 Base64,绕过后端缺失 python-multipart 的问题
184
+ const reader = new FileReader();
185
+ reader.onload = async (e) => {
186
+ const b64Data = e.target.result;
187
+ addLog(`正在上传参考图: ${file.name}...`);
188
+ try {
189
+ const res = await fetch(`${BASE}/api/system/upload-image`, {
190
+ method: 'POST',
191
+ headers: { 'Content-Type': 'application/json' },
192
+ body: JSON.stringify({
193
+ image: b64Data,
194
+ filename: file.name
195
+ })
196
+ });
197
+ const data = await res.json();
198
+ if (res.ok && data.path) {
199
+ document.getElementById('uploaded-img-path').value = data.path;
200
+ addLog(`✅ 参考图上传成功: ${file.name}`);
201
+ } else {
202
+ const errMsg = data.error || data.detail || "上传失败";
203
+ throw new Error(typeof errMsg === 'string' ? errMsg : JSON.stringify(errMsg));
204
+ }
205
+ } catch (e) {
206
+ addLog(`❌ 图片上传失败: ${e.message}`);
207
+ }
208
+ };
209
+ reader.onerror = () => addLog("❌ 读取本地文件失败");
210
+ reader.readAsDataURL(file);
211
+ }
212
+
213
+ function clearUploadedImage() {
214
+ document.getElementById('vid-image-input').value = "";
215
+ document.getElementById('uploaded-img-path').value = "";
216
+ document.getElementById('upload-preview').style.display = 'none';
217
+ document.getElementById('upload-preview').src = "";
218
+ document.getElementById('upload-placeholder').style.display = 'block';
219
+ document.getElementById('clear-img-overlay').style.display = 'none';
220
+ addLog("🧹 已清除参考图");
221
+ }
222
+
223
+ // 处理音频上传
224
+ async function handleAudioUpload(file) {
225
+ if (!file) return;
226
+
227
+ const placeholder = document.getElementById('audio-upload-placeholder');
228
+ const statusDiv = document.getElementById('audio-upload-status');
229
+ const filenameStatus = document.getElementById('audio-filename-status');
230
+ const clearOverlay = document.getElementById('clear-audio-overlay');
231
+
232
+ placeholder.style.display = 'none';
233
+ filenameStatus.innerText = file.name;
234
+ statusDiv.style.display = 'block';
235
+ clearOverlay.style.display = 'flex';
236
+
237
+ const reader = new FileReader();
238
+ reader.onload = async (e) => {
239
+ const b64Data = e.target.result;
240
+ addLog(`正在上传音频: ${file.name}...`);
241
+ try {
242
+ // 复用图片上传接口,后端已支持任意文件类型
243
+ const res = await fetch(`${BASE}/api/system/upload-image`, {
244
+ method: 'POST',
245
+ headers: { 'Content-Type': 'application/json' },
246
+ body: JSON.stringify({
247
+ image: b64Data,
248
+ filename: file.name
249
+ })
250
+ });
251
+ const data = await res.json();
252
+ if (res.ok && data.path) {
253
+ document.getElementById('uploaded-audio-path').value = data.path;
254
+ addLog(`✅ 音频上传成功: ${file.name}`);
255
+ } else {
256
+ const errMsg = data.error || data.detail || "上传失败";
257
+ throw new Error(typeof errMsg === 'string' ? errMsg : JSON.stringify(errMsg));
258
+ }
259
+ } catch (e) {
260
+ addLog(`❌ 音频上传失败: ${e.message}`);
261
+ }
262
+ };
263
+ reader.onerror = () => addLog("❌ 读取本地音频文件失败");
264
+ reader.readAsDataURL(file);
265
+ }
266
+
267
+ function clearUploadedAudio() {
268
+ document.getElementById('vid-audio-input').value = "";
269
+ document.getElementById('uploaded-audio-path').value = "";
270
+ document.getElementById('audio-upload-placeholder').style.display = 'block';
271
+ document.getElementById('audio-upload-status').style.display = 'none';
272
+ document.getElementById('clear-audio-overlay').style.display = 'none';
273
+ addLog("🧹 已清除音频文件");
274
+ }
275
+
276
+ // 处理超分视频上传
277
+ async function handleUpscaleVideoUpload(file) {
278
+ if (!file) return;
279
+ const placeholder = document.getElementById('upscale-placeholder');
280
+ const statusDiv = document.getElementById('upscale-status');
281
+ const filenameStatus = document.getElementById('upscale-filename');
282
+ const clearOverlay = document.getElementById('clear-upscale-overlay');
283
+
284
+ filenameStatus.innerText = file.name;
285
+ placeholder.style.display = 'none';
286
+ statusDiv.style.display = 'block';
287
+ clearOverlay.style.display = 'flex';
288
+
289
+ const reader = new FileReader();
290
+ reader.onload = async (e) => {
291
+ const b64Data = e.target.result;
292
+ addLog(`正在上传待超分视频: ${file.name}...`);
293
+ try {
294
+ const res = await fetch(`${BASE}/api/system/upload-image`, {
295
+ method: 'POST',
296
+ headers: { 'Content-Type': 'application/json' },
297
+ body: JSON.stringify({ image: b64Data, filename: file.name })
298
+ });
299
+ const data = await res.json();
300
+ if (res.ok && data.path) {
301
+ document.getElementById('upscale-video-path').value = data.path;
302
+ addLog(`✅ 视频上传成功`);
303
+ } else {
304
+ throw new Error(data.error || "上传失败");
305
+ }
306
+ } catch (e) {
307
+ addLog(`❌ 视频上传失败: ${e.message}`);
308
+ }
309
+ };
310
+ reader.readAsDataURL(file);
311
+ }
312
+
313
+ function clearUpscaleVideo() {
314
+ document.getElementById('upscale-video-input').value = "";
315
+ document.getElementById('upscale-video-path').value = "";
316
+ document.getElementById('upscale-placeholder').style.display = 'block';
317
+ document.getElementById('upscale-status').style.display = 'none';
318
+ document.getElementById('clear-upscale-overlay').style.display = 'none';
319
+ addLog("🧹 已清除待超分视频");
320
+ }
321
+
322
+ // 初始化拖拽上传逻辑
323
+ function initDragAndDrop() {
324
+ const audioDropZone = document.getElementById('audio-drop-zone');
325
+ const startFrameDropZone = document.getElementById('start-frame-drop-zone');
326
+ const endFrameDropZone = document.getElementById('end-frame-drop-zone');
327
+ const upscaleDropZone = document.getElementById('upscale-drop-zone');
328
+
329
+ const zones = [audioDropZone, startFrameDropZone, endFrameDropZone, upscaleDropZone];
330
+
331
+ ['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => {
332
+ zones.forEach(zone => {
333
+ if (!zone) return;
334
+ zone.addEventListener(eventName, (e) => {
335
+ e.preventDefault();
336
+ e.stopPropagation();
337
+ }, false);
338
+ });
339
+ });
340
+
341
+ ['dragenter', 'dragover'].forEach(eventName => {
342
+ zones.forEach(zone => {
343
+ if (!zone) return;
344
+ zone.addEventListener(eventName, () => zone.classList.add('dragover'), false);
345
+ });
346
+ });
347
+
348
+ ['dragleave', 'drop'].forEach(eventName => {
349
+ zones.forEach(zone => {
350
+ if (!zone) return;
351
+ zone.addEventListener(eventName, () => zone.classList.remove('dragover'), false);
352
+ });
353
+ });
354
+
355
+ audioDropZone.addEventListener('drop', (e) => {
356
+ const file = e.dataTransfer.files[0];
357
+ if (file && file.type.startsWith('audio/')) handleAudioUpload(file);
358
+ }, false);
359
+
360
+ startFrameDropZone.addEventListener('drop', (e) => {
361
+ const file = e.dataTransfer.files[0];
362
+ if (file && file.type.startsWith('image/')) handleFrameUpload(file, 'start');
363
+ }, false);
364
+
365
+ endFrameDropZone.addEventListener('drop', (e) => {
366
+ const file = e.dataTransfer.files[0];
367
+ if (file && file.type.startsWith('image/')) handleFrameUpload(file, 'end');
368
+ }, false);
369
+
370
+ upscaleDropZone.addEventListener('drop', (e) => {
371
+ const file = e.dataTransfer.files[0];
372
+ if (file && file.type.startsWith('video/')) handleUpscaleVideoUpload(file);
373
+ }, false);
374
+ }
375
+
376
+ let _isGeneratingFlag = false;
377
+
378
+ // 系统状态轮询
379
+ async function checkStatus() {
380
+ try {
381
+ const h = await fetch(`${BASE}/health`).then(r => r.json()).catch(() => ({status: "error"}));
382
+ const g = await fetch(`${BASE}/api/gpu-info`).then(r => r.json()).catch(() => ({gpu_info: {}}));
383
+ const p = await fetch(`${BASE}/api/generation/progress`).then(r => r.json()).catch(() => ({progress: 0}));
384
+ const sysGpus = await fetch(`${BASE}/api/system/list-gpus`).then(r => r.json()).catch(() => ({gpus: []}));
385
+
386
+ const activeGpu = (sysGpus.gpus || []).find(x => x.active) || (sysGpus.gpus || [])[0] || {};
387
+ const gpuName = activeGpu.name || g.gpu_info?.name || "GPU";
388
+
389
+ const s = document.getElementById('sys-status');
390
+ const indicator = document.getElementById('sys-indicator');
391
+
392
+ const isReady = h.status === "ok" || h.status === "ready" || h.models_loaded;
393
+ const backendActive = (p && p.progress > 0);
394
+
395
+ if (_isGeneratingFlag || backendActive) {
396
+ s.innerText = `${gpuName}: 运算中...`;
397
+ if(indicator) indicator.className = 'indicator-busy';
398
+ } else {
399
+ s.innerText = isReady ? `${gpuName}: 在线 / 就绪` : `${gpuName}: 启动中...`;
400
+ if(indicator) indicator.className = isReady ? 'indicator-ready' : 'indicator-offline';
401
+ }
402
+ s.style.color = "var(--text-dim)";
403
+
404
+ const vUsedMB = g.gpu_info?.vramUsed || 0;
405
+ const vTotalMB = activeGpu.vram_mb || g.gpu_info?.vram || 32768;
406
+ const vUsedGB = vUsedMB / 1024;
407
+ const vTotalGB = vTotalMB / 1024;
408
+
409
+ document.getElementById('vram-fill').style.width = (vUsedMB / vTotalMB * 100) + "%";
410
+ document.getElementById('vram-text').innerText = `${vUsedGB.toFixed(1)} / ${vTotalGB.toFixed(0)} GB`;
411
+ } catch(e) { document.getElementById('sys-status').innerText = "未检测到后端 (Port 3000)"; }
412
+ }
413
+ setInterval(checkStatus, 1000); // 提升到 1 秒一次实时监控
414
+ checkStatus();
415
+ initDragAndDrop();
416
+ listGpus(); // 初始化 GPU 列表
417
+ getOutputDir(); // 获取当前的保存路径
418
+
419
+ async function setOutputDir() {
420
+ const dir = document.getElementById('global-out-dir').value.trim();
421
+ try {
422
+ const res = await fetch(`${BASE}/api/system/set-dir`, {
423
+ method: 'POST',
424
+ headers: { 'Content-Type': 'application/json' },
425
+ body: JSON.stringify({ directory: dir })
426
+ });
427
+ if (res.ok) {
428
+ addLog(`✅ 存储路径更新成功! 当前路径: ${dir || '默认路径'}`);
429
+ if (typeof fetchHistory === 'function') fetchHistory(currentHistoryPage);
430
+ }
431
+ } catch (e) {
432
+ addLog(`❌ 设置路径时连接异常: ${e.message}`);
433
+ }
434
+ }
435
+
436
+ async function browseOutputDir() {
437
+ try {
438
+ const res = await fetch(`${BASE}/api/system/browse-dir`);
439
+ const data = await res.json();
440
+ if (data.status === "success" && data.directory) {
441
+ document.getElementById('global-out-dir').value = data.directory;
442
+ // auto apply immediately
443
+ setOutputDir();
444
+ addLog(`📂 检测到新路径,已自动套用!`);
445
+ } else if (data.error) {
446
+ addLog(`❌ 内部系统权限拦截了弹窗: ${data.error}`);
447
+ }
448
+ } catch (e) {
449
+ addLog(`❌ 无法调出文件夹浏览弹窗, 请直接复制粘贴绝对路径。`);
450
+ }
451
+ }
452
+
453
+ async function getOutputDir() {
454
+ try {
455
+ const res = await fetch(`${BASE}/api/system/get-dir`);
456
+ const data = await res.json();
457
+ if (data.directory && data.directory.indexOf('LTXDesktop') === -1 && document.getElementById('global-out-dir')) {
458
+ document.getElementById('global-out-dir').value = data.directory;
459
+ }
460
+ } catch (e) {}
461
+ }
462
+
463
+ function switchMode(m) {
464
+ currentMode = m;
465
+ document.getElementById('tab-image').classList.toggle('active', m === 'image');
466
+ document.getElementById('tab-video').classList.toggle('active', m === 'video');
467
+ document.getElementById('tab-upscale').classList.toggle('active', m === 'upscale');
468
+
469
+ document.getElementById('image-opts').style.display = m === 'image' ? 'block' : 'none';
470
+ document.getElementById('video-opts').style.display = m === 'video' ? 'block' : 'none';
471
+ document.getElementById('upscale-opts').style.display = m === 'upscale' ? 'block' : 'none';
472
+
473
+ // 如果切到图像模式,隐藏提示词框外的其他东西
474
+ document.getElementById('prompt').placeholder = m === 'upscale' ? "输入画面增强引导词 (可选)..." : "在此输入视觉描述词 (Prompt)...";
475
+ }
476
+
477
+ function showGeneratingView() {
478
+ if (!_isGeneratingFlag) return;
479
+ const resImg = document.getElementById('res-img');
480
+ const videoWrapper = document.getElementById('video-wrapper');
481
+ if (resImg) resImg.style.display = "none";
482
+ if (videoWrapper) videoWrapper.style.display = "none";
483
+ if (player) {
484
+ try { player.stop(); } catch(_) {}
485
+ } else {
486
+ const vid = document.getElementById('res-video');
487
+ if (vid) { vid.pause(); vid.removeAttribute('src'); vid.load(); }
488
+ }
489
+ const loadingTxt = document.getElementById('loading-txt');
490
+ if (loadingTxt) loadingTxt.style.display = "flex";
491
+ }
492
+
493
+ async function run() {
494
+ // 防止重复点击(_isGeneratingFlag 比 btn.disabled 更可靠)
495
+ if (_isGeneratingFlag) {
496
+ addLog("⚠️ 当前正在生成中,请等待完成");
497
+ return;
498
+ }
499
+
500
+ const btn = document.getElementById('mainBtn');
501
+ const prompt = document.getElementById('prompt').value.trim();
502
+
503
+ if (currentMode !== 'upscale' && !prompt) {
504
+ addLog("⚠️ 请输入提示词后再开始渲染");
505
+ return;
506
+ }
507
+
508
+ // 先设置标志 + 禁用按钮,然后用顶层 try/finally 保证一定能解锁
509
+ _isGeneratingFlag = true;
510
+ btn.disabled = true;
511
+
512
+ try {
513
+ // 安全地操作 UI 元素(改用 if 判空,防止 Plyr 接管后 getElementById 返回 null)
514
+ const loader = document.getElementById('loading-txt');
515
+ const resImg = document.getElementById('res-img');
516
+ const resVideo = document.getElementById('res-video');
517
+
518
+ if (loader) {
519
+ loader.style.display = "flex";
520
+ loader.style.flexDirection = "column";
521
+ loader.style.alignItems = "center";
522
+ loader.style.gap = "12px";
523
+ loader.innerHTML = `
524
+ <div class="spinner" style="width:48px;height:48px;border-width:4px;color:var(--accent);"></div>
525
+ <div id="loader-step-text" style="font-size:13px;font-weight:700;color:var(--text-sub);">GPU 正在分配资源...</div>
526
+ `;
527
+ }
528
+ if (resImg) resImg.style.display = "none";
529
+ // 必须隐藏整个 video-wrapper(Plyr 外层容器),否则第二次生成时视频会与 spinner 叠加
530
+ const videoWrapper = document.getElementById('video-wrapper');
531
+ if (videoWrapper) videoWrapper.style.display = "none";
532
+ if (player) { try { player.stop(); } catch(_) {} }
533
+ else if (resVideo) { resVideo.pause?.(); resVideo.removeAttribute?.('src'); }
534
+
535
+ checkStatus();
536
+
537
+ // 重置后端状态锁(非关键,失败不影响主流程)
538
+ try { await fetch(`${BASE}/api/system/reset-state`, { method: 'POST' }); } catch(_) {}
539
+
540
+ startProgressPolling();
541
+
542
+ // ---- 新增:在历史记录区插入「正在渲染」缩略图卡片 ----
543
+ const historyContainer = document.getElementById('history-container');
544
+ if (historyContainer) {
545
+ const old = document.getElementById('current-loading-card');
546
+ if (old) old.remove();
547
+ const loadingCard = document.createElement('div');
548
+ loadingCard.className = 'history-card loading-card';
549
+ loadingCard.id = 'current-loading-card';
550
+ loadingCard.onclick = showGeneratingView;
551
+ loadingCard.innerHTML = `
552
+ <div class="spinner"></div>
553
+ <div id="loading-card-step" style="font-size:10px;color:var(--text-dim);margin-top:4px;">等待中...</div>
554
+ `;
555
+ historyContainer.prepend(loadingCard);
556
+ }
557
+
558
+ // ---- 构建请求 ----
559
+ let endpoint, payload;
560
+ if (currentMode === 'image') {
561
+ const w = parseInt(document.getElementById('img-w').value);
562
+ const h = parseInt(document.getElementById('img-h').value);
563
+ endpoint = '/api/generate-image';
564
+ payload = {
565
+ prompt, width: w, height: h,
566
+ numSteps: parseInt(document.getElementById('img-steps').value),
567
+ numImages: 1
568
+ };
569
+ addLog(`正在发起图像渲染: ${w}x${h}, Steps: ${payload.numSteps}`);
570
+
571
+ } else if (currentMode === 'video') {
572
+ const res = updateResPreview();
573
+ const dur = parseFloat(document.getElementById('vid-duration').value);
574
+ const fps = document.getElementById('vid-fps').value;
575
+ if (dur > 20) addLog(`⚠️ 时长设定为 ${dur}s 极长,可能导致显存溢出或耗时较久。`);
576
+
577
+ const audio = document.getElementById('vid-audio').checked ? "true" : "false";
578
+ const audioPath = document.getElementById('uploaded-audio-path').value;
579
+ const startFramePathValue = document.getElementById('start-frame-path').value;
580
+ const endFramePathValue = document.getElementById('end-frame-path').value;
581
+
582
+ let finalImagePath = null, finalStartFramePath = null, finalEndFramePath = null;
583
+ if (startFramePathValue && endFramePathValue) {
584
+ finalStartFramePath = startFramePathValue;
585
+ finalEndFramePath = endFramePathValue;
586
+ } else if (startFramePathValue) {
587
+ finalImagePath = startFramePathValue;
588
+ }
589
+
590
+ endpoint = '/api/generate';
591
+ payload = {
592
+ prompt, resolution: res, model: "ltx-2",
593
+ cameraMotion: document.getElementById('vid-motion').value,
594
+ negativePrompt: "low quality, blurry, noisy, static noise, distorted",
595
+ duration: String(dur), fps, audio,
596
+ imagePath: finalImagePath,
597
+ audioPath: audioPath || null,
598
+ startFramePath: finalStartFramePath,
599
+ endFramePath: finalEndFramePath,
600
+ aspectRatio: document.getElementById('vid-ratio').value
601
+ };
602
+ addLog(`正在发起视频渲染: ${res}, 时长: ${dur}s, FPS: ${fps}, 音频: ${audio}, 参考图: ${finalImagePath ? '已加载' : '无'}, 参考音频: ${audioPath ? '已加载' : '无'}, 插帧: ${finalStartFramePath && finalEndFramePath ? '已加载' : '无'}, 镜头: ${payload.cameraMotion}`);
603
+
604
+ } else if (currentMode === 'upscale') {
605
+ const videoPath = document.getElementById('upscale-video-path').value;
606
+ const targetRes = document.getElementById('upscale-res').value;
607
+ if (!videoPath) throw new Error("请先上传待超分的视频");
608
+ endpoint = '/api/system/upscale-video';
609
+ payload = { video_path: videoPath, resolution: targetRes, prompt: "high quality, detailed, 4k", strength: 0.7 };
610
+ addLog(`正在发起视频超分: 目标 ${targetRes}`);
611
+ }
612
+
613
+ // ---- 发送请求 ----
614
+ const res = await fetch(BASE + endpoint, {
615
+ method: 'POST',
616
+ headers: { 'Content-Type': 'application/json' },
617
+ body: JSON.stringify(payload)
618
+ });
619
+ const data = await res.json();
620
+ if (!res.ok) {
621
+ const errMsg = data.error || data.detail || "API 拒绝了请求";
622
+ throw new Error(typeof errMsg === 'string' ? errMsg : JSON.stringify(errMsg));
623
+ }
624
+
625
+ // ---- 显示结果 ----
626
+ const rawPath = data.image_paths ? data.image_paths[0] : data.video_path;
627
+ if (rawPath) {
628
+ try { displayOutput(rawPath); } catch (dispErr) { addLog(`⚠️ 播放器显示异常: ${dispErr.message}`); }
629
+ }
630
+
631
+ // 强制刷新历史记录(不依赖 isLoadingHistory 标志,确保新生成的视频立即显示)
632
+ setTimeout(() => {
633
+ isLoadingHistory = false; // 强制重置状态
634
+ if (typeof fetchHistory === 'function') fetchHistory(1);
635
+ }, 500);
636
+
637
+ } catch (e) {
638
+ addLog(`❌ 渲染中断: ${e.message}`);
639
+ const loader = document.getElementById('loading-txt');
640
+ if (loader) loader.innerText = "渲染失败,请检查显存或参数";
641
+
642
+ } finally {
643
+ // ✅ 无论发生什么,这里一定执行,确保按钮永远可以再次点击
644
+ _isGeneratingFlag = false;
645
+ btn.disabled = false;
646
+ stopProgressPolling();
647
+ checkStatus();
648
+ // 生成完毕后自动释放显存,降低 VRAM 压力(不 await 避免阻塞 UI 解锁)
649
+ setTimeout(() => clearGpu(), 500);
650
+ }
651
+ }
652
+
653
+ async function clearGpu() {
654
+ const btn = document.getElementById('clearGpuBtn');
655
+ btn.disabled = true;
656
+ btn.innerText = "清理中...";
657
+ try {
658
+ const res = await fetch(`${BASE}/api/system/clear-gpu`, {
659
+ method: 'POST',
660
+ headers: { 'Content-Type': 'application/json' }
661
+ });
662
+ const data = await res.json();
663
+ if (res.ok) {
664
+ addLog(`🧹 显存清理成功: ${data.message}`);
665
+ // 立即触发状态刷新
666
+ checkStatus();
667
+ setTimeout(checkStatus, 1000);
668
+ } else {
669
+ const errMsg = data.error || data.detail || "后端未实现此接口 (404)";
670
+ throw new Error(errMsg);
671
+ }
672
+ } catch(e) {
673
+ addLog(`❌ 清理显存失败: ${e.message}`);
674
+ } finally {
675
+ btn.disabled = false;
676
+ btn.innerText = "释放显存";
677
+ }
678
+ }
679
+
680
+ async function listGpus() {
681
+ try {
682
+ const res = await fetch(`${BASE}/api/system/list-gpus`);
683
+ const data = await res.json();
684
+ if (res.ok && data.gpus) {
685
+ const selector = document.getElementById('gpu-selector');
686
+ selector.innerHTML = data.gpus.map(g =>
687
+ `<option value="${g.id}" ${g.active ? 'selected' : ''}>GPU ${g.id}: ${g.name} (${g.vram})</option>`
688
+ ).join('');
689
+
690
+ // 更新当前显示的 GPU 名称
691
+ const activeGpu = data.gpus.find(g => g.active);
692
+ if (activeGpu) document.getElementById('gpu-name').innerText = activeGpu.name;
693
+ }
694
+ } catch (e) {
695
+ console.error("Failed to list GPUs", e);
696
+ }
697
+ }
698
+
699
+ async function switchGpu(id) {
700
+ if (!id) return;
701
+ addLog(`🔄 正在切换到 GPU ${id}...`);
702
+ try {
703
+ const res = await fetch(`${BASE}/api/system/switch-gpu`, {
704
+ method: 'POST',
705
+ headers: { 'Content-Type': 'application/json' },
706
+ body: JSON.stringify({ gpu_id: parseInt(id) })
707
+ });
708
+ const data = await res.json();
709
+ if (res.ok) {
710
+ addLog(`✅ 已成功切换到 GPU ${id},模型将重新加载。`);
711
+ listGpus(); // 重新获取列表以同步状态
712
+ setTimeout(checkStatus, 1000);
713
+ } else {
714
+ throw new Error(data.error || "切换失败");
715
+ }
716
+ } catch (e) {
717
+ addLog(`❌ GPU 切换失败: ${e.message}`);
718
+ }
719
+ }
720
+
721
+ function startProgressPolling() {
722
+ if (pollInterval) clearInterval(pollInterval);
723
+ pollInterval = setInterval(async () => {
724
+ try {
725
+ const res = await fetch(`${BASE}/api/generation/progress`);
726
+ const d = await res.json();
727
+ if (d.progress > 0) {
728
+ const phaseMap = {
729
+ 'loading_model': '加载权重',
730
+ 'encoding_text': 'T5 编码',
731
+ 'validating_request': '校验请求',
732
+ 'uploading_audio': '上传音频',
733
+ 'uploading_image': '上传图像',
734
+ 'inference': 'AI 推理',
735
+ 'downloading_output': '下载结果',
736
+ 'complete': '完成'
737
+ };
738
+ const phaseStr = phaseMap[d.phase] || d.phase || '推理';
739
+
740
+ // 步骤格式:优先显示 current_step/total_steps,降级用百分比
741
+ let stepLabel;
742
+ if (d.current_step !== undefined && d.current_step !== null && d.total_steps) {
743
+ stepLabel = `${d.current_step}/${d.total_steps} 步`;
744
+ } else {
745
+ stepLabel = `${d.progress}%`;
746
+ }
747
+
748
+ document.getElementById('progress-fill').style.width = d.progress + "%";
749
+ // 更��主预览区的进度文字(内嵌子元素)
750
+ const loaderStep = document.getElementById('loader-step-text');
751
+ if (loaderStep) loaderStep.innerText = `GPU 运算中: ${stepLabel} [ ${phaseStr} ]`;
752
+ else {
753
+ const loadingTxt = document.getElementById('loading-txt');
754
+ if (loadingTxt) loadingTxt.innerText = `GPU 运算中: ${stepLabel} [${phaseStr}]`;
755
+ }
756
+
757
+ // 同步更新历史缩略图卡片上的进度文字
758
+ const cardStep = document.getElementById('loading-card-step');
759
+ if (cardStep) cardStep.innerText = stepLabel;
760
+ }
761
+ } catch(e) {}
762
+ }, 1000);
763
+ }
764
+
765
+ function stopProgressPolling() {
766
+ clearInterval(pollInterval);
767
+ pollInterval = null;
768
+ document.getElementById('progress-fill').style.width = "0%";
769
+ // 移除渲染中的卡片(生成已结束)
770
+ const lc = document.getElementById('current-loading-card');
771
+ if (lc) lc.remove();
772
+ }
773
+
774
+ function displayOutput(fileOrPath) {
775
+ const img = document.getElementById('res-img');
776
+ const vid = document.getElementById('res-video');
777
+ const loader = document.getElementById('loading-txt');
778
+
779
+ // 关键BUG修复:切换前强制清除并停止现有视频和声音,避免后台继续播放
780
+ if(player) {
781
+ player.stop();
782
+ } else {
783
+ vid.pause();
784
+ vid.removeAttribute('src');
785
+ vid.load();
786
+ }
787
+
788
+ let url = "";
789
+ let fileName = fileOrPath;
790
+ if (fileOrPath.indexOf('\\') !== -1 || fileOrPath.indexOf('/') !== -1) {
791
+ url = `${BASE}/api/system/file?path=${encodeURIComponent(fileOrPath)}&t=${Date.now()}`;
792
+ fileName = fileOrPath.split(/[\\/]/).pop();
793
+ } else {
794
+ const outInput = document.getElementById('global-out-dir');
795
+ const globalDir = outInput ? outInput.value.replace(/\\/g, '/').replace(/\/$/, '') : "";
796
+ if (globalDir && globalDir !== "") {
797
+ url = `${BASE}/api/system/file?path=${encodeURIComponent(globalDir + '/' + fileOrPath)}&t=${Date.now()}`;
798
+ } else {
799
+ url = `${BASE}/outputs/${fileOrPath}?t=${Date.now()}`;
800
+ }
801
+ }
802
+
803
+ loader.style.display = "none";
804
+ if (currentMode === 'image') {
805
+ img.src = url;
806
+ img.style.display = "block";
807
+ addLog(`✅ 图像渲染成功: ${fileName}`);
808
+ } else {
809
+ document.getElementById('video-wrapper').style.display = "flex";
810
+
811
+ if(player) {
812
+ player.source = {
813
+ type: 'video',
814
+ sources: [{ src: url, type: 'video/mp4' }]
815
+ };
816
+ player.play();
817
+ } else {
818
+ vid.src = url;
819
+ }
820
+ addLog(`✅ 视频渲染成功: ${fileName}`);
821
+ }
822
+ }
823
+
824
+
825
+
826
+ function addLog(msg) {
827
+ const log = document.getElementById('log');
828
+ const time = new Date().toLocaleTimeString();
829
+ log.innerHTML += `<div style="margin-bottom:5px"> <span style="color:var(--text-dim)">[${time}]</span> ${msg}</div>`;
830
+ log.scrollTop = log.scrollHeight;
831
+ }
832
+
833
+
834
+ // Force switch to video mode on load
835
+ window.addEventListener('DOMContentLoaded', () => switchMode('video'));
836
+
837
+
838
+
839
+
840
+
841
+
842
+
843
+
844
+
845
+
846
+
847
+
848
+ let isLoadingHistory = false;
849
+
850
+ function switchLibTab(tab) {
851
+ document.getElementById('log-container').style.display = tab === 'log' ? 'flex' : 'none';
852
+ const hw = document.getElementById('history-wrapper');
853
+ if (hw) hw.style.display = tab === 'history' ? 'block' : 'none';
854
+
855
+ document.getElementById('tab-log').style.color = tab === 'log' ? 'var(--accent)' : 'var(--text-dim)';
856
+ document.getElementById('tab-log').style.borderColor = tab === 'log' ? 'var(--accent)' : 'transparent';
857
+
858
+ document.getElementById('tab-history').style.color = tab === 'history' ? 'var(--accent)' : 'var(--text-dim)';
859
+ document.getElementById('tab-history').style.borderColor = tab === 'history' ? 'var(--accent)' : 'transparent';
860
+
861
+ if (tab === 'history') {
862
+ fetchHistory();
863
+ }
864
+ }
865
+
866
+ async function fetchHistory(isFirstLoad = false, silent = false) {
867
+ if (isLoadingHistory) return;
868
+ isLoadingHistory = true;
869
+
870
+ try {
871
+ // 加载所有历史,不分页
872
+ const res = await fetch(`${BASE}/api/system/history?page=1&limit=10000`);
873
+ if (!res.ok) {
874
+ isLoadingHistory = false;
875
+ return;
876
+ }
877
+ const data = await res.json();
878
+
879
+ if (data.history && data.history.length > 0) {
880
+ const container = document.getElementById('history-container');
881
+
882
+ // 清空容器
883
+ let loadingCardHtml = "";
884
+ const lc = document.getElementById('current-loading-card');
885
+ if (lc && _isGeneratingFlag) {
886
+ loadingCardHtml = lc.outerHTML;
887
+ }
888
+ container.innerHTML = loadingCardHtml;
889
+
890
+ const outInput = document.getElementById('global-out-dir');
891
+ const globalDir = outInput ? outInput.value.replace(/\\/g, '/').replace(/\/$/, '') : "";
892
+
893
+ // 过滤无效数据
894
+ const validHistory = data.history.filter(item => item && item.filename);
895
+
896
+ const cardsHtml = validHistory.map((item, index) => {
897
+ const url = (globalDir && globalDir !== "")
898
+ ? `${BASE}/api/system/file?path=${encodeURIComponent(globalDir + '/' + item.filename)}`
899
+ : `${BASE}/outputs/${item.filename}`;
900
+
901
+ const safeFilename = item.filename.replace(/'/g, "\\'").replace(/"/g, '\\"');
902
+ const media = item.type === 'video'
903
+ ? `<video data-src="${url}#t=0.001" class="lazy-load" muted loop preload="none" onmouseover="if(this.readyState >= 2) this.play()" onmouseout="this.pause()" style="pointer-events: none; object-fit: cover; width: 100%; height: 100%;"></video>`
904
+ : `<img data-src="${url}" class="lazy-load" style="object-fit: cover; width: 100%; height: 100%;">`;
905
+ return `<div class="history-card" onclick="displayHistoryOutput('${safeFilename}', '${item.type}')">
906
+ <div class="history-type-badge">${item.type === 'video' ? '🎬 VID' : '🎨 IMG'}</div>
907
+ <button class="history-delete-btn" onclick="event.stopPropagation(); deleteHistoryItem('${safeFilename}', '${item.type}', this)">✕</button>
908
+ ${media}
909
+ </div>`;
910
+ }).join('');
911
+
912
+ container.insertAdjacentHTML('beforeend', cardsHtml);
913
+
914
+ // 重新绑定loading card点击事件
915
+ const newLc = document.getElementById('current-loading-card');
916
+ if (newLc) newLc.onclick = showGeneratingView;
917
+
918
+ // 加载可见的图片
919
+ loadVisibleImages();
920
+ }
921
+ } catch(e) {
922
+ console.error("Failed to load history", e);
923
+ } finally {
924
+ isLoadingHistory = false;
925
+ }
926
+ }
927
+
928
+ async function deleteHistoryItem(filename, type, btn) {
929
+ if (!confirm(`确定要删除 "${filename}" 吗?`)) return;
930
+
931
+ try {
932
+ const res = await fetch(`${BASE}/api/system/delete-file`, {
933
+ method: 'POST',
934
+ headers: {'Content-Type': 'application/json'},
935
+ body: JSON.stringify({filename: filename, type: type})
936
+ });
937
+
938
+ if (res.ok) {
939
+ // 删除成功后移除元素
940
+ const card = btn.closest('.history-card');
941
+ if (card) {
942
+ card.remove();
943
+ }
944
+ } else {
945
+ alert('删除失败');
946
+ }
947
+ } catch(e) {
948
+ console.error('Delete failed', e);
949
+ alert('删除失败');
950
+ }
951
+ }
952
+
953
+ function loadVisibleImages() {
954
+ const hw = document.getElementById('history-wrapper');
955
+ if (!hw) return;
956
+
957
+ const lazyMedias = document.querySelectorAll('#history-container .lazy-load');
958
+
959
+ // 每次只加载3个媒体元素(图片或视频)
960
+ let loadedCount = 0;
961
+ lazyMedias.forEach(media => {
962
+ if (loadedCount >= 3) return;
963
+
964
+ const src = media.dataset.src;
965
+ if (!src) return;
966
+
967
+ // 检查是否在可见区域附近
968
+ const rect = media.getBoundingClientRect();
969
+ const containerRect = hw.getBoundingClientRect();
970
+
971
+ if (rect.top < containerRect.bottom + 300 && rect.bottom > containerRect.top - 100) {
972
+ media.src = src;
973
+ media.classList.remove('lazy-load');
974
+
975
+ // 视频需要额外设置 preload
976
+ if (media.tagName === 'VIDEO') {
977
+ media.preload = 'metadata';
978
+ }
979
+
980
+ loadedCount++;
981
+ }
982
+ });
983
+
984
+ // 继续检查直到没有更多媒体需要加载
985
+ if (loadedCount > 0) {
986
+ setTimeout(loadVisibleImages, 100);
987
+ }
988
+ }
989
+
990
+ // 监听history-wrapper���滚动事件来懒加载
991
+ function initHistoryScrollListener() {
992
+ const hw = document.getElementById('history-wrapper');
993
+ if (!hw) return;
994
+
995
+ let scrollTimeout;
996
+ hw.addEventListener('scroll', () => {
997
+ if (scrollTimeout) clearTimeout(scrollTimeout);
998
+ scrollTimeout = setTimeout(() => {
999
+ loadVisibleImages();
1000
+ }, 100);
1001
+ });
1002
+ }
1003
+
1004
+ // 页面加载时初始化滚动监听
1005
+ window.addEventListener('DOMContentLoaded', () => {
1006
+ setTimeout(initHistoryScrollListener, 500);
1007
+ });
1008
+
1009
+ function displayHistoryOutput(file, type) {
1010
+ document.getElementById('res-img').style.display = 'none';
1011
+ document.getElementById('video-wrapper').style.display = 'none';
1012
+
1013
+ const mode = type === 'video' ? 'video' : 'image';
1014
+ switchMode(mode);
1015
+ displayOutput(file);
1016
+ }
1017
+
1018
+ window.addEventListener('DOMContentLoaded', () => {
1019
+ // Initialize Plyr Custom Video Component
1020
+ if(window.Plyr) {
1021
+ player = new Plyr('#res-video', {
1022
+ controls: [
1023
+ 'play-large', 'play', 'progress', 'current-time',
1024
+ 'mute', 'volume', 'fullscreen'
1025
+ ],
1026
+ settings: [],
1027
+ loop: { active: true },
1028
+ autoplay: true
1029
+ });
1030
+ }
1031
+
1032
+ // Fetch current directory context to show in UI
1033
+ fetch(`${BASE}/api/system/get-dir`)
1034
+ .then(res => res.json())
1035
+ .then(data => {
1036
+ if(data && data.directory) {
1037
+ const outInput = document.getElementById('global-out-dir');
1038
+ if (outInput) outInput.value = data.directory;
1039
+ }
1040
+ }).catch(e => console.error(e));
1041
+
1042
+ setTimeout(() => fetchHistory(1), 500);
1043
+
1044
+ let historyRefreshInterval = null;
1045
+ function startHistoryAutoRefresh() {
1046
+ if (historyRefreshInterval) return;
1047
+ historyRefreshInterval = setInterval(() => {
1048
+ if (document.getElementById('history-container').style.display === 'flex' && !_isGeneratingFlag) {
1049
+ fetchHistory(1, true);
1050
+ }
1051
+ }, 5000);
1052
+ }
1053
+ startHistoryAutoRefresh();
1054
+ switchLibTab('history');
1055
+ });
LTX2.3/__pycache__/main.cpython-313.pyc ADDED
Binary file (14.9 kB). View file
 
LTX2.3/main.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import subprocess
4
+ import threading
5
+ import time
6
+ import socket
7
+ import logging
8
+ from fastapi import FastAPI
9
+ from fastapi.responses import FileResponse
10
+ from fastapi.staticfiles import StaticFiles
11
+ import uvicorn
12
+
13
+ # ============================================================
14
+ # 配置区 (动态路径适配与补丁挂载)
15
+ # ============================================================
16
+ def resolve_ltx_path():
17
+ import glob, tempfile, subprocess
18
+ sc_dir = os.path.join(os.getcwd(), "LTX_Shortcut")
19
+ os.makedirs(sc_dir, exist_ok=True)
20
+ lnk_files = glob.glob(os.path.join(sc_dir, "*.lnk"))
21
+ if not lnk_files:
22
+ print("\033[91m[ERROR] 未在 LTX_Shortcut 文件夹中找到快捷方式!\n请打开程序目录下的 LTX_Shortcut 文件夹,并将官方 LTX Desktop 的快捷方式复制进去后重试。\033[0m")
23
+ sys.exit(1)
24
+
25
+ lnk_path = lnk_files[0]
26
+ # 使用 VBScript 解析快捷方式,兼容所有 Windows 系统
27
+ vbs_code = f'''Set sh = CreateObject("WScript.Shell")\nSet obj = sh.CreateShortcut("{os.path.abspath(lnk_path)}")\nWScript.Echo obj.TargetPath'''
28
+ fd, vbs_path = tempfile.mkstemp(suffix='.vbs')
29
+ with os.fdopen(fd, 'w') as f:
30
+ f.write(vbs_code)
31
+ try:
32
+ out = subprocess.check_output(['cscript', '//nologo', vbs_path], stderr=subprocess.STDOUT)
33
+ target_exe = out.decode('ansi').strip()
34
+ finally:
35
+ os.remove(vbs_path)
36
+
37
+ if not target_exe or not os.path.exists(target_exe):
38
+ # 如果快捷方式解析失败,或者解析出来的是朋友电脑的路径(当前电脑不存在),自动全盘搜索默认路径
39
+ default_paths = [
40
+ os.path.join(os.environ.get("LOCALAPPDATA", ""), r"Programs\LTX Desktop\LTX Desktop.exe"),
41
+ r"C:\Program Files\LTX Desktop\LTX Desktop.exe",
42
+ r"D:\Program Files\LTX Desktop\LTX Desktop.exe",
43
+ r"E:\Program Files\LTX Desktop\LTX Desktop.exe"
44
+ ]
45
+ found = False
46
+ for p in default_paths:
47
+ if os.path.exists(p):
48
+ target_exe = p
49
+ print(f"\033[96m[INFO] 自动检测到 LTX 原版安装路径: {p}\033[0m")
50
+ found = True
51
+ break
52
+
53
+ if not found:
54
+ print(f"\033[91m[ERROR] 未能找到原版 LTX Desktop 的安装路径!\033[0m")
55
+ print("请清理 LTX_Shortcut 文件夹,并将您当前电脑上真正的原版快捷方式重贴复制进去。")
56
+ sys.exit(1)
57
+
58
+ return os.path.dirname(target_exe)
59
+
60
+ USER_PROFILE = os.path.expanduser("~")
61
+ PYTHON_EXE = os.path.join(USER_PROFILE, r"AppData\Local\LTXDesktop\python\python.exe")
62
+ DATA_DIR = os.path.join(USER_PROFILE, r"AppData\Local\LTXDesktop")
63
+
64
+ # 1. 动态获取主安装路径
65
+ LTX_INSTALL_DIR = resolve_ltx_path()
66
+ BACKEND_DIR = os.path.join(LTX_INSTALL_DIR, r"resources\backend")
67
+ UI_FILE_NAME = "UI/index.html"
68
+
69
+ # 环境致命检测:如果官方 Python 还没解压释放,立刻强制中断整个程序
70
+ if not os.path.exists(PYTHON_EXE):
71
+ print(f"\n\033[1;41m [致命错误] 您的电脑上尚未配置好 LTX 的官方渲染核心框架! \033[0m")
72
+ print(f"\033[93m此应用仅是 UI 图形控制台,必需依赖原版软件环境才能生成。在 ({PYTHON_EXE}) 未找到运行引擎。\n")
73
+ print(">> 解决方案:\n1. 请先在您的电脑上正常安装【LTX Desktop 官方原版软件】。")
74
+ print("2. 必需:双击打开运行一次原版软件!(运行后原版软件会在后台自动释放环境)")
75
+ print("3. 把原版软件的快捷方式复制到本文档的 LTX_Shortcut 文件夹里面。")
76
+ print("4. 全部完成后,再重新启动本 run.bat 脚本即可!\033[0m\n")
77
+ os._exit(1)
78
+
79
+ # 2. 从目录读取改动过的 Python 文件 (热修复拦截器)
80
+ PATCHES_DIR = os.path.join(os.getcwd(), "patches")
81
+ os.makedirs(PATCHES_DIR, exist_ok=True)
82
+
83
+ # 3. 默认输出定向至程序根目录
84
+ LOCAL_OUTPUTS = os.path.join(os.getcwd(), "outputs")
85
+ os.makedirs(LOCAL_OUTPUTS, exist_ok=True)
86
+
87
+ # 强制注入自定义输出录至 LTX 缓存数据中
88
+ os.makedirs(DATA_DIR, exist_ok=True)
89
+ with open(os.path.join(DATA_DIR, "custom_dir.txt"), 'w', encoding='utf-8') as f:
90
+ f.write(LOCAL_OUTPUTS)
91
+
92
+ os.environ["LTX_APP_DATA_DIR"] = DATA_DIR
93
+
94
+ # 将 patches 目录优先级提升,做到 Python 无损替换
95
+ os.environ["PYTHONPATH"] = f"{PATCHES_DIR};{BACKEND_DIR}"
96
+
97
+ def get_lan_ip():
98
+ try:
99
+ host_name = socket.gethostname()
100
+ _, _, ip_list = socket.gethostbyname_ex(host_name)
101
+
102
+ candidates = []
103
+ for ip in ip_list:
104
+ if ip.startswith("192.168."):
105
+ return ip
106
+ elif ip.startswith("10.") or (ip.startswith("172.") and 16 <= int(ip.split('.')[1]) <= 31):
107
+ candidates.append(ip)
108
+
109
+ if candidates:
110
+ return candidates[0]
111
+
112
+ # Fallback to the default socket routing approach if no obvious LAN IP found
113
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
114
+ s.connect(("8.8.8.8", 80))
115
+ ip = s.getsockname()[0]
116
+ s.close()
117
+ return ip
118
+ except:
119
+ return "127.0.0.1"
120
+
121
+ LAN_IP = get_lan_ip()
122
+
123
+ # ============================================================
124
+ # 服务启动逻辑
125
+ # ============================================================
126
+ def check_port_in_use(port):
127
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
128
+ return s.connect_ex(('127.0.0.1', port)) == 0
129
+
130
+ def launch_backend():
131
+ """启动核心引擎 - 监听 0.0.0.0 确保局域网可调"""
132
+ if check_port_in_use(3000):
133
+ print(f"\n\033[1;41m [致命错误] 3000 端口已被占用,无法启动核心引擎! \033[0m")
134
+ print("\033[93m>> 绝大多数情况下,这是因为【官方原版 LTX Desktop】正在您的电脑后台运行。\033[0m")
135
+ print(">> 冲突会导致显存爆炸。请检查右下角系统托盘图标,右键完全退出官方软件。")
136
+ print(">> 退出后重新双击 run.bat 启动本程序!\n")
137
+ os._exit(1)
138
+
139
+ print(f"\033[96m[CORE] 核心引擎正在启动...\033[0m")
140
+ # 只开启重要级别的 Python 应用层日志,去除无用的 HTTP 刷屏
141
+ import logging as _logging
142
+ _logging.basicConfig(
143
+ level=_logging.INFO,
144
+ format="[%(asctime)s] %(levelname)s %(name)s: %(message)s",
145
+ datefmt="%H:%M:%S",
146
+ force=True
147
+ )
148
+
149
+ # 构建绝对无损的环境拦截器:防止其他电脑被 cwd 劫持加载原版文件
150
+ launcher_code = f"""
151
+ import sys
152
+ import os
153
+
154
+ patch_dir = r"{PATCHES_DIR}"
155
+ backend_dir = r"{BACKEND_DIR}"
156
+
157
+ # 防御性清除:强行剥离所有的默认 backend_dir 引用
158
+ sys.path = [p for p in sys.path if p and os.path.normpath(p) != os.path.normpath(backend_dir)]
159
+ sys.path = [p for p in sys.path if p and p != "." and p != ""]
160
+
161
+ # 绝对插队注入:优先搜索 PATCHES_DIR
162
+ sys.path.insert(0, patch_dir)
163
+ sys.path.insert(1, backend_dir)
164
+
165
+ import uvicorn
166
+ from ltx2_server import app
167
+
168
+ if __name__ == '__main__':
169
+ uvicorn.run(app, host="0.0.0.0", port=3000, log_level="info", access_log=False)
170
+ """
171
+ launcher_path = os.path.join(PATCHES_DIR, "launcher.py")
172
+ with open(launcher_path, "w", encoding="utf-8") as f:
173
+ f.write(launcher_code)
174
+
175
+ cmd = [PYTHON_EXE, launcher_path]
176
+ env = os.environ.copy()
177
+ result = subprocess.run(cmd, cwd=BACKEND_DIR, env=env)
178
+ if result.returncode != 0:
179
+ print(f"\n\033[1;41m [致命错误] 核心引擎异常崩溃退出! (Exit Code: {result.returncode})\033[0m")
180
+ print(">> 请检查上述终端报错信息。确认显卡驱动是否正常。")
181
+ os._exit(1)
182
+
183
+ ui_app = FastAPI()
184
+ # 已移除存在安全隐患的静态资源挂载目录
185
+
186
+ @ui_app.get("/")
187
+ async def serve_index():
188
+ return FileResponse(os.path.join(os.getcwd(), UI_FILE_NAME))
189
+
190
+ @ui_app.get("/index.css")
191
+ async def serve_css():
192
+ return FileResponse(os.path.join(os.getcwd(), "UI/index.css"))
193
+
194
+ @ui_app.get("/index.js")
195
+ async def serve_js():
196
+ return FileResponse(os.path.join(os.getcwd(), "UI/index.js"))
197
+
198
+ def launch_ui_server():
199
+ print(f"\033[92m[UI] 工作站已就绪!\033[0m")
200
+ print(f"\033[92m[LOCAL] 本机访问: http://127.0.0.1:4000\033[0m")
201
+ print(f"\033[93m[WIFI] 局域网访问: http://{LAN_IP}:4000\033[0m")
202
+
203
+ # 彻底压制 WinError 10054 (客户端强制断开) 的底层警告报错
204
+ if sys.platform == 'win32':
205
+ # Uvicorn 内部会拉起循环,所以只能通过底层 Logging Filter 拦截控制台噪音
206
+ class WinError10054Filter(logging.Filter):
207
+ def filter(self, record):
208
+ if record.name == 'asyncio' and hasattr(record, 'exc_info') and record.exc_info:
209
+ exc_type, exc_value, _ = record.exc_info
210
+ if isinstance(exc_value, ConnectionResetError) and getattr(exc_value, 'winerror', None) == 10054:
211
+ return False
212
+ if "10054" in record.getMessage() and "ConnectionResetError" in record.getMessage():
213
+ return False
214
+ return True
215
+ logging.getLogger("asyncio").addFilter(WinError10054Filter())
216
+
217
+ uvicorn.run(ui_app, host="0.0.0.0", port=4000, log_level="warning", access_log=False)
218
+
219
+ if __name__ == "__main__":
220
+ os.system('cls' if os.name == 'nt' else 'clear')
221
+ print("\033[1;97;44m LTX-2 CINEMATIC WORKSTATION | NETWORK ENABLED \033[0m\n")
222
+
223
+ threading.Thread(target=launch_backend, daemon=True).start()
224
+
225
+ # 强制校验 3000 端口是否存活
226
+ print("\033[93m[SYS] 正在等待内部核心 3000 端口启动...\033[0m")
227
+ backend_ready = False
228
+ for _ in range(30):
229
+ try:
230
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
231
+ if s.connect_ex(('127.0.0.1', 3000)) == 0:
232
+ backend_ready = True
233
+ break
234
+ except Exception:
235
+ pass
236
+ time.sleep(1)
237
+
238
+ if backend_ready:
239
+ print("\033[92m[SYS] 3000 端口已通过连通性握手验证!后端装载成功。\033[0m")
240
+ else:
241
+ print("\033[1;41m [崩坏警告] 等待 30 秒后,3000 端口依然无法连通! \033[0m")
242
+ print(">> Uvicorn 可能在后台陷入了死锁,或者被防火墙拦截,前端大概率将无法连接到后端!")
243
+ print(">> 请检查上方是否有 Python 报错。\n")
244
+
245
+ try:
246
+ launch_ui_server()
247
+ except KeyboardInterrupt:
248
+ sys.exit(0)
LTX2.3/patches/__pycache__/api_types.cpython-313.pyc ADDED
Binary file (13 kB). View file
 
LTX2.3/patches/__pycache__/app_factory.cpython-313.pyc ADDED
Binary file (59.1 kB). View file
 
LTX2.3/patches/api_types.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pydantic request/response models and TypedDicts for ltx2_server."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Literal, NamedTuple, TypeAlias, TypedDict
6
+ from typing import Annotated
7
+
8
+ from pydantic import BaseModel, Field, StringConstraints
9
+
10
+ NonEmptyPrompt = Annotated[str, StringConstraints(strip_whitespace=True, min_length=1)]
11
+ ModelFileType = Literal[
12
+ "checkpoint",
13
+ "upsampler",
14
+ "distilled_lora",
15
+ "ic_lora",
16
+ "depth_processor",
17
+ "person_detector",
18
+ "pose_processor",
19
+ "text_encoder",
20
+ "zit",
21
+ ]
22
+
23
+
24
+ class ImageConditioningInput(NamedTuple):
25
+ """Image conditioning triplet used by all video pipelines."""
26
+
27
+ path: str
28
+ frame_idx: int
29
+ strength: float
30
+
31
+
32
+ # ============================================================
33
+ # TypedDicts for module-level state globals
34
+ # ============================================================
35
+
36
+
37
+ class GenerationState(TypedDict):
38
+ id: str | None
39
+ cancelled: bool
40
+ result: str | list[str] | None
41
+ error: str | None
42
+ status: str # "idle" | "running" | "complete" | "cancelled" | "error"
43
+ phase: str
44
+ progress: int
45
+ current_step: int
46
+ total_steps: int
47
+
48
+
49
+ JsonObject: TypeAlias = dict[str, object]
50
+ VideoCameraMotion = Literal[
51
+ "none",
52
+ "dolly_in",
53
+ "dolly_out",
54
+ "dolly_left",
55
+ "dolly_right",
56
+ "jib_up",
57
+ "jib_down",
58
+ "static",
59
+ "focus_shift",
60
+ ]
61
+
62
+
63
+ # ============================================================
64
+ # Response Models
65
+ # ============================================================
66
+
67
+
68
+ class ModelStatusItem(BaseModel):
69
+ id: str
70
+ name: str
71
+ loaded: bool
72
+ downloaded: bool
73
+
74
+
75
+ class GpuTelemetry(BaseModel):
76
+ name: str
77
+ vram: int
78
+ vramUsed: int
79
+
80
+
81
+ class HealthResponse(BaseModel):
82
+ status: str
83
+ models_loaded: bool
84
+ active_model: str | None
85
+ gpu_info: GpuTelemetry
86
+ sage_attention: bool
87
+ models_status: list[ModelStatusItem]
88
+
89
+
90
+ class GpuInfoResponse(BaseModel):
91
+ cuda_available: bool
92
+ mps_available: bool = False
93
+ gpu_available: bool = False
94
+ gpu_name: str | None
95
+ vram_gb: int | None
96
+ gpu_info: GpuTelemetry
97
+
98
+
99
+ class RuntimePolicyResponse(BaseModel):
100
+ force_api_generations: bool
101
+
102
+
103
+ class GenerationProgressResponse(BaseModel):
104
+ status: str
105
+ phase: str
106
+ progress: int
107
+ currentStep: int | None
108
+ totalSteps: int | None
109
+
110
+
111
+ class ModelInfo(BaseModel):
112
+ id: str
113
+ name: str
114
+ description: str
115
+
116
+
117
+ class ModelFileStatus(BaseModel):
118
+ id: ModelFileType
119
+ name: str
120
+ description: str
121
+ downloaded: bool
122
+ size: int
123
+ expected_size: int
124
+ required: bool = True
125
+ is_folder: bool = False
126
+ optional_reason: str | None = None
127
+
128
+
129
+ class TextEncoderStatus(BaseModel):
130
+ downloaded: bool
131
+ size_bytes: int
132
+ size_gb: float
133
+ expected_size_gb: float
134
+
135
+
136
+ class ModelsStatusResponse(BaseModel):
137
+ models: list[ModelFileStatus]
138
+ all_downloaded: bool
139
+ total_size: int
140
+ downloaded_size: int
141
+ total_size_gb: float
142
+ downloaded_size_gb: float
143
+ models_path: str
144
+ has_api_key: bool
145
+ text_encoder_status: TextEncoderStatus
146
+ use_local_text_encoder: bool
147
+
148
+
149
+ class DownloadProgressResponse(BaseModel):
150
+ status: str
151
+ current_downloading_file: ModelFileType | None
152
+ current_file_progress: int
153
+ total_progress: int
154
+ total_downloaded_bytes: int
155
+ expected_total_bytes: int
156
+ completed_files: set[ModelFileType]
157
+ all_files: set[ModelFileType]
158
+ error: str | None
159
+ speed_mbps: int
160
+
161
+
162
+ class SuggestGapPromptResponse(BaseModel):
163
+ status: str = "success"
164
+ suggested_prompt: str
165
+
166
+
167
+ class GenerateVideoResponse(BaseModel):
168
+ status: str
169
+ video_path: str | None = None
170
+
171
+
172
+ class GenerateImageResponse(BaseModel):
173
+ status: str
174
+ image_paths: list[str] | None = None
175
+
176
+
177
+ class CancelResponse(BaseModel):
178
+ status: str
179
+ id: str | None = None
180
+
181
+
182
+ class RetakeResponse(BaseModel):
183
+ status: str
184
+ video_path: str | None = None
185
+ result: JsonObject | None = None
186
+
187
+
188
+ class IcLoraExtractResponse(BaseModel):
189
+ conditioning: str
190
+ original: str
191
+ conditioning_type: Literal["canny", "depth"]
192
+ frame_time: float
193
+
194
+
195
+ class IcLoraGenerateResponse(BaseModel):
196
+ status: str
197
+ video_path: str | None = None
198
+
199
+
200
+ class ModelDownloadStartResponse(BaseModel):
201
+ status: str
202
+ message: str | None = None
203
+ sessionId: str | None = None
204
+
205
+
206
+ class TextEncoderDownloadResponse(BaseModel):
207
+ status: str
208
+ message: str | None = None
209
+ sessionId: str | None = None
210
+
211
+
212
+ class StatusResponse(BaseModel):
213
+ status: str
214
+
215
+
216
+ class ErrorResponse(BaseModel):
217
+ error: str
218
+ message: str | None = None
219
+
220
+
221
+ # ============================================================
222
+ # Request Models
223
+ # ============================================================
224
+
225
+
226
+ class GenerateVideoRequest(BaseModel):
227
+ prompt: NonEmptyPrompt
228
+ resolution: str = "512p"
229
+ model: str = "fast"
230
+ cameraMotion: VideoCameraMotion = "none"
231
+ negativePrompt: str = ""
232
+ duration: str = "2"
233
+ fps: str = "24"
234
+ audio: str = "false"
235
+ imagePath: str | None = None
236
+ audioPath: str | None = None
237
+ startFramePath: str | None = None
238
+ endFramePath: str | None = None
239
+ aspectRatio: Literal["16:9", "9:16"] = "16:9"
240
+
241
+
242
+ class GenerateImageRequest(BaseModel):
243
+ prompt: NonEmptyPrompt
244
+ width: int = 1024
245
+ height: int = 1024
246
+ numSteps: int = 4
247
+ numImages: int = 1
248
+
249
+
250
+ def _default_model_types() -> set[ModelFileType]:
251
+ return set()
252
+
253
+
254
+ class ModelDownloadRequest(BaseModel):
255
+ modelTypes: set[ModelFileType] = Field(default_factory=_default_model_types)
256
+
257
+
258
+ class RequiredModelsResponse(BaseModel):
259
+ modelTypes: list[ModelFileType]
260
+
261
+
262
+ class SuggestGapPromptRequest(BaseModel):
263
+ beforePrompt: str = ""
264
+ afterPrompt: str = ""
265
+ beforeFrame: str | None = None
266
+ afterFrame: str | None = None
267
+ gapDuration: float = 5
268
+ mode: str = "t2v"
269
+ inputImage: str | None = None
270
+
271
+
272
+ class RetakeRequest(BaseModel):
273
+ video_path: str
274
+ start_time: float = 0
275
+ duration: float = 0
276
+ prompt: str = ""
277
+ mode: str = "replace_video_only"
278
+ width: int | None = None
279
+ height: int | None = None
280
+
281
+
282
+ class IcLoraExtractRequest(BaseModel):
283
+ video_path: str
284
+ conditioning_type: Literal["canny", "depth"] = "canny"
285
+ frame_time: float = 0
286
+
287
+
288
+ class IcLoraImageInput(BaseModel):
289
+ path: str
290
+ frame: int = 0
291
+ strength: float = 1.0
292
+
293
+
294
+ def _default_ic_lora_images() -> list[IcLoraImageInput]:
295
+ return []
296
+
297
+
298
+ class IcLoraGenerateRequest(BaseModel):
299
+ video_path: str
300
+ conditioning_type: Literal["canny", "depth"]
301
+ prompt: NonEmptyPrompt
302
+ conditioning_strength: float = 1.0
303
+ num_inference_steps: int = 30
304
+ cfg_guidance_scale: float = 1.0
305
+ negative_prompt: str = ""
306
+ images: list[IcLoraImageInput] = Field(default_factory=_default_ic_lora_images)
LTX2.3/patches/app_factory.py ADDED
@@ -0,0 +1,1396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """FastAPI app factory decoupled from runtime bootstrap side effects."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import base64
6
+ import hmac
7
+ import os
8
+
9
+ # 防 OOM 与显存碎片化补丁:在 torch 初始化之前注入环境变量
10
+ os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True"
11
+ import torch # 提升到顶层导入
12
+ from collections.abc import Awaitable, Callable
13
+ from typing import TYPE_CHECKING
14
+ from pathlib import Path # 必须导入,用于处理 Windows 路径
15
+
16
+ from fastapi import FastAPI, Request, UploadFile, File
17
+ from fastapi.exceptions import RequestValidationError
18
+ from fastapi.middleware.cors import CORSMiddleware
19
+ from fastapi.responses import JSONResponse
20
+ from pydantic import ConfigDict
21
+ from fastapi.staticfiles import StaticFiles # 必须导入,用于挂载静态目录
22
+ from starlette.responses import Response as StarletteResponse
23
+ import shutil
24
+ import tempfile
25
+ import time
26
+ from api_types import ImageConditioningInput, GenerateVideoRequest
27
+
28
+ from _routes._errors import HTTPError
29
+ from _routes.generation import router as generation_router
30
+ from _routes.health import router as health_router
31
+ from _routes.ic_lora import router as ic_lora_router
32
+ from _routes.image_gen import router as image_gen_router
33
+ from _routes.models import router as models_router
34
+ from _routes.suggest_gap_prompt import router as suggest_gap_prompt_router
35
+ from _routes.retake import router as retake_router
36
+ from _routes.runtime_policy import router as runtime_policy_router
37
+ from _routes.settings import router as settings_router
38
+ from logging_policy import log_http_error, log_unhandled_exception
39
+ from state import init_state_service
40
+
41
+ if TYPE_CHECKING:
42
+ from app_handler import AppHandler
43
+
44
+ # 跨域配置:允许所有来源,解决本地网页调用限制
45
+ DEFAULT_ALLOWED_ORIGINS: list[str] = ["*"]
46
+
47
+
48
+ def _extend_generate_video_request_model() -> None:
49
+ """Keep custom video fields working across upstream request-model changes."""
50
+ annotations = dict(getattr(GenerateVideoRequest, "__annotations__", {}))
51
+ changed = False
52
+
53
+ for field_name in ("startFramePath", "endFramePath"):
54
+ if field_name not in annotations:
55
+ annotations[field_name] = str | None
56
+ setattr(GenerateVideoRequest, field_name, None)
57
+ changed = True
58
+
59
+ if changed:
60
+ GenerateVideoRequest.__annotations__ = annotations
61
+
62
+ existing_config = dict(getattr(GenerateVideoRequest, "model_config", {}) or {})
63
+ if existing_config.get("extra") != "allow":
64
+ existing_config["extra"] = "allow"
65
+ GenerateVideoRequest.model_config = ConfigDict(**existing_config)
66
+ changed = True
67
+
68
+ if changed:
69
+ GenerateVideoRequest.model_rebuild(force=True)
70
+
71
+
72
+ def create_app(
73
+ *,
74
+ handler: "AppHandler",
75
+ allowed_origins: list[str] | None = None,
76
+ title: str = "LTX-2 Video Generation Server",
77
+ auth_token: str = "",
78
+ admin_token: str = "",
79
+ ) -> FastAPI:
80
+ """Create a configured FastAPI app bound to the provided handler."""
81
+ init_state_service(handler)
82
+ _extend_generate_video_request_model()
83
+
84
+ app = FastAPI(title=title)
85
+ app.state.admin_token = admin_token # type: ignore[attr-defined]
86
+
87
+ # 彻底压制 WinError 10054 (客户端强制断开) 的底层警告报错
88
+ import sys, asyncio
89
+
90
+ if sys.platform == "win32":
91
+ try:
92
+ loop = asyncio.get_event_loop()
93
+
94
+ def silence_winerror_10054(loop, context):
95
+ exc = context.get("exception")
96
+ if (
97
+ isinstance(exc, ConnectionResetError)
98
+ and getattr(exc, "winerror", None) == 10054
99
+ ):
100
+ return
101
+ loop.default_exception_handler(context)
102
+
103
+ loop.set_exception_handler(silence_winerror_10054)
104
+ except Exception:
105
+ pass
106
+
107
+ # --- 核心修复:对准 LTX 真正的输出目录 (AppData) ---
108
+ def get_dynamic_output_path():
109
+ base_dir = (
110
+ Path(os.environ.get("LOCALAPPDATA", os.path.expanduser("~/AppData/Local")))
111
+ / "LTXDesktop"
112
+ ).resolve()
113
+ config_file = base_dir / "custom_dir.txt"
114
+ if config_file.exists():
115
+ try:
116
+ custom_dir = config_file.read_text(encoding="utf-8").strip()
117
+ if custom_dir:
118
+ p = Path(custom_dir)
119
+ p.mkdir(parents=True, exist_ok=True)
120
+ return p
121
+ except Exception:
122
+ pass
123
+ default_dir = base_dir / "outputs"
124
+ default_dir.mkdir(parents=True, exist_ok=True)
125
+ return default_dir
126
+
127
+ actual_output_path = get_dynamic_output_path()
128
+ handler.config.outputs_dir = actual_output_path
129
+
130
+ upload_tmp_path = actual_output_path / "uploads"
131
+
132
+ # 如果文件夹不存在则创建,防止挂载失败
133
+ if not actual_output_path.exists():
134
+ actual_output_path.mkdir(parents=True, exist_ok=True)
135
+ if not upload_tmp_path.exists():
136
+ upload_tmp_path.mkdir(parents=True, exist_ok=True)
137
+
138
+ # 挂载静态服务:将该目录映射到 http://127.0.0.1:3000/outputs
139
+ app.mount(
140
+ "/outputs", StaticFiles(directory=str(actual_output_path)), name="outputs"
141
+ )
142
+ # -----------------------------------------------
143
+
144
+ # 配置 CORS
145
+ app.add_middleware(
146
+ CORSMiddleware,
147
+ allow_origins=allowed_origins or DEFAULT_ALLOWED_ORIGINS,
148
+ allow_methods=["*"],
149
+ allow_headers=["*"],
150
+ )
151
+
152
+ # === [全局隔离补丁] ===
153
+ # 强制将每一个新的 HTTP 线程/协程请求的默认显卡都强绑定到用户选定的设备上
154
+ @app.middleware("http")
155
+ async def _sync_gpu_middleware(
156
+ request: Request,
157
+ call_next: Callable[[Request], Awaitable[StarletteResponse]],
158
+ ) -> StarletteResponse:
159
+ import torch
160
+
161
+ if (
162
+ torch.cuda.is_available()
163
+ and getattr(handler.config.device, "type", "") == "cuda"
164
+ ):
165
+ idx = handler.config.device.index
166
+ if idx is not None:
167
+ # 能够强行夺取那些底层写死了 cuda:0 而忽略 config.device 的第三方库
168
+ torch.cuda.set_device(idx)
169
+ return await call_next(request)
170
+
171
+ # 认证中间件
172
+ @app.middleware("http")
173
+ async def _auth_middleware(
174
+ request: Request,
175
+ call_next: Callable[[Request], Awaitable[StarletteResponse]],
176
+ ) -> StarletteResponse:
177
+ # 关键修复:如果是获取生成的图片,直接放行,不检查 Token
178
+ if (
179
+ request.url.path.startswith("/outputs")
180
+ or request.url.path == "/api/system/upload-image"
181
+ ):
182
+ return await call_next(request)
183
+
184
+ if not auth_token:
185
+ return await call_next(request)
186
+ if request.method == "OPTIONS":
187
+ return await call_next(request)
188
+
189
+ def _token_matches(candidate: str) -> bool:
190
+ return hmac.compare_digest(candidate, auth_token)
191
+
192
+ # WebSocket 认证
193
+ if request.headers.get("upgrade", "").lower() == "websocket":
194
+ if _token_matches(request.query_params.get("token", "")):
195
+ return await call_next(request)
196
+ return JSONResponse(status_code=401, content={"error": "Unauthorized"})
197
+
198
+ # HTTP 认证 (Bearer/Basic)
199
+ auth_header = request.headers.get("authorization", "")
200
+ if auth_header.startswith("Bearer ") and _token_matches(auth_header[7:]):
201
+ return await call_next(request)
202
+ if auth_header.startswith("Basic "):
203
+ try:
204
+ decoded = base64.b64decode(auth_header[6:]).decode()
205
+ _, _, password = decoded.partition(":")
206
+ if _token_matches(password):
207
+ return await call_next(request)
208
+ except Exception:
209
+ pass
210
+ return JSONResponse(status_code=401, content={"error": "Unauthorized"})
211
+
212
+ # 异常处理逻辑
213
+ _FALLBACK = "An unexpected error occurred"
214
+
215
+ async def _route_http_error_handler(
216
+ request: Request, exc: Exception
217
+ ) -> JSONResponse:
218
+ if isinstance(exc, HTTPError):
219
+ log_http_error(request, exc)
220
+ return JSONResponse(
221
+ status_code=exc.status_code, content={"error": exc.detail or _FALLBACK}
222
+ )
223
+ return JSONResponse(status_code=500, content={"error": str(exc) or _FALLBACK})
224
+
225
+ async def _validation_error_handler(
226
+ request: Request, exc: Exception
227
+ ) -> JSONResponse:
228
+ if isinstance(exc, RequestValidationError):
229
+ return JSONResponse(
230
+ status_code=422, content={"error": str(exc) or _FALLBACK}
231
+ )
232
+ return JSONResponse(status_code=422, content={"error": str(exc) or _FALLBACK})
233
+
234
+ async def _route_generic_error_handler(
235
+ request: Request, exc: Exception
236
+ ) -> JSONResponse:
237
+ log_unhandled_exception(request, exc)
238
+ return JSONResponse(status_code=500, content={"error": str(exc) or _FALLBACK})
239
+
240
+ app.add_exception_handler(RequestValidationError, _validation_error_handler)
241
+ app.add_exception_handler(HTTPError, _route_http_error_handler)
242
+ app.add_exception_handler(Exception, _route_generic_error_handler)
243
+
244
+ # --- 系统功能接口 ---
245
+ @app.post("/api/system/clear-gpu")
246
+ async def route_clear_gpu():
247
+ try:
248
+ import torch
249
+ import gc
250
+ import asyncio
251
+
252
+ # 1. 尝试终止任务并重置运行状态
253
+ if getattr(handler.generation, "is_generation_running", lambda: False)():
254
+ try:
255
+ handler.generation.cancel_generation()
256
+ except Exception:
257
+ pass
258
+ await asyncio.sleep(0.5)
259
+
260
+ # 暴力重置死锁状态
261
+ if hasattr(handler.generation, "_generation_id"):
262
+ handler.generation._generation_id = None
263
+ if hasattr(handler.generation, "_is_generating"):
264
+ handler.generation._is_generating = False
265
+
266
+ # 2. 强制卸载模型: 临时屏蔽底层锁定器
267
+ try:
268
+ mock_swapped = False
269
+ orig_running = None
270
+ if hasattr(handler.pipelines, "_generation_service"):
271
+ orig_running = (
272
+ handler.pipelines._generation_service.is_generation_running
273
+ )
274
+ handler.pipelines._generation_service.is_generation_running = (
275
+ lambda: False
276
+ )
277
+ mock_swapped = True
278
+ try:
279
+ handler.pipelines.unload_gpu_pipeline()
280
+ finally:
281
+ if mock_swapped:
282
+ handler.pipelines._generation_service.is_generation_running = (
283
+ orig_running
284
+ )
285
+ except Exception as e:
286
+ print(f"Force unload warning: {e}")
287
+
288
+ # 3. 深度清理
289
+ gc.collect()
290
+ if torch.cuda.is_available():
291
+ torch.cuda.empty_cache()
292
+ torch.cuda.ipc_collect()
293
+ return {
294
+ "status": "success",
295
+ "message": "GPU memory cleared and models unloaded",
296
+ }
297
+ except Exception as e:
298
+ return JSONResponse(status_code=500, content={"error": str(e)})
299
+
300
+ @app.post("/api/system/reset-state")
301
+ async def route_reset_state():
302
+ """轻量级状态重置:只清除 generation 状态锁,不卸载 GPU 管线。
303
+ 在每次新渲染开始前由前端调用,确保后端状态干净可用。"""
304
+ try:
305
+ gen = handler.generation
306
+ # 强制清除所有可能导致 is_generation_running() 返回 True 的标志
307
+ for attr in (
308
+ "_is_generating",
309
+ "_generation_id",
310
+ "_cancelled",
311
+ "_is_cancelled",
312
+ ):
313
+ if hasattr(gen, attr):
314
+ if attr in ("_is_generating", "_cancelled", "_is_cancelled"):
315
+ setattr(gen, attr, False)
316
+ else:
317
+ setattr(gen, attr, None)
318
+ # 某些实现用 threading.Event
319
+ for attr in ("_cancel_event",):
320
+ if hasattr(gen, attr):
321
+ try:
322
+ getattr(gen, attr).clear()
323
+ except Exception:
324
+ pass
325
+ print("[reset-state] Generation state has been reset cleanly.")
326
+ return {"status": "success", "message": "Generation state reset"}
327
+ except Exception as e:
328
+ import traceback
329
+
330
+ traceback.print_exc()
331
+ return JSONResponse(status_code=500, content={"error": str(e)})
332
+
333
+ @app.post("/api/system/set-dir")
334
+ async def route_set_dir(request: Request):
335
+ try:
336
+ data = await request.json()
337
+ new_dir = data.get("directory", "").strip()
338
+ base_dir = (
339
+ Path(
340
+ os.environ.get(
341
+ "LOCALAPPDATA", os.path.expanduser("~/AppData/Local")
342
+ )
343
+ )
344
+ / "LTXDesktop"
345
+ ).resolve()
346
+ config_file = base_dir / "custom_dir.txt"
347
+ if new_dir:
348
+ p = Path(new_dir)
349
+ p.mkdir(parents=True, exist_ok=True)
350
+ config_file.write_text(new_dir, encoding="utf-8")
351
+ else:
352
+ if config_file.exists():
353
+ config_file.unlink()
354
+ # 立即更新全局 config 控制
355
+ handler.config.outputs_dir = get_dynamic_output_path()
356
+ return {"status": "success", "directory": str(get_dynamic_output_path())}
357
+ except Exception as e:
358
+ return JSONResponse(status_code=500, content={"error": str(e)})
359
+
360
+ @app.get("/api/system/get-dir")
361
+ async def route_get_dir():
362
+ return {"status": "success", "directory": str(get_dynamic_output_path())}
363
+
364
+ @app.get("/api/system/browse-dir")
365
+ async def route_browse_dir():
366
+ try:
367
+ import subprocess
368
+
369
+ # 强制将对话框置顶层:通过 STA 线程 + Topmost 属性,避免被窗口锥入后台
370
+ ps_script = (
371
+ "[System.Reflection.Assembly]::LoadWithPartialName('System.Windows.Forms') | Out-Null;"
372
+ "[System.Reflection.Assembly]::LoadWithPartialName('System.Drawing') | Out-Null;"
373
+ "$f = New-Object System.Windows.Forms.FolderBrowserDialog;"
374
+ "$f.Description = '\u9009\u62e9 LTX \u89c6\u9891\u548c\u56fe\u50cf\u751f\u6210\u7684\u5168\u5c40\u8f93\u51fa\u76ee\u5f55';"
375
+ "$f.ShowNewFolderButton = $true;"
376
+ # 创建一个雐形助手窗口作为 parent 确保对话框在最顶层
377
+ "$owner = New-Object System.Windows.Forms.Form;"
378
+ "$owner.TopMost = $true;"
379
+ "$owner.StartPosition = 'CenterScreen';"
380
+ "$owner.Size = New-Object System.Drawing.Size(1, 1);"
381
+ "$owner.Show();"
382
+ "$owner.BringToFront();"
383
+ "$owner.Focus();"
384
+ "if ($f.ShowDialog($owner) -eq 'OK') { echo $f.SelectedPath };"
385
+ "$owner.Dispose();"
386
+ )
387
+
388
+ def run_ps():
389
+ process = subprocess.Popen(
390
+ ["powershell", "-STA", "-NoProfile", "-Command", ps_script],
391
+ stdout=subprocess.PIPE,
392
+ stderr=subprocess.PIPE,
393
+ text=True,
394
+ # 移除 CREATE_NO_WINDOW 以允许 UI 线程正常弹出
395
+ )
396
+ stdout, _ = process.communicate()
397
+ return stdout.strip()
398
+
399
+ from starlette.concurrency import run_in_threadpool
400
+
401
+ selected_dir = await run_in_threadpool(run_ps)
402
+ return {"status": "success", "directory": selected_dir}
403
+ except Exception as e:
404
+ return JSONResponse(status_code=500, content={"error": str(e)})
405
+
406
+ @app.get("/api/system/file")
407
+ async def route_serve_file(path: str):
408
+ from fastapi.responses import FileResponse
409
+
410
+ if os.path.exists(path):
411
+ return FileResponse(path)
412
+ return JSONResponse(status_code=404, content={"error": "File not found"})
413
+
414
+ @app.get("/api/system/list-gpus")
415
+ async def route_list_gpus():
416
+ try:
417
+ import torch
418
+
419
+ gpus = []
420
+ if torch.cuda.is_available():
421
+ current_idx = 0
422
+ dev = getattr(handler.config, "device", None)
423
+ if dev is not None and getattr(dev, "index", None) is not None:
424
+ current_idx = dev.index
425
+ for i in range(torch.cuda.device_count()):
426
+ try:
427
+ name = torch.cuda.get_device_name(i)
428
+ except Exception:
429
+ name = f"GPU {i}"
430
+ try:
431
+ vram_bytes = torch.cuda.get_device_properties(i).total_memory
432
+ vram_gb = vram_bytes / (1024**3)
433
+ vram_mb = vram_bytes / (1024**2)
434
+ except Exception:
435
+ vram_gb = 0.0
436
+ vram_mb = 0
437
+ gpus.append(
438
+ {
439
+ "id": i,
440
+ "name": name,
441
+ "vram": f"{vram_gb:.1f} GB",
442
+ "vram_mb": int(vram_mb),
443
+ "active": (i == current_idx),
444
+ }
445
+ )
446
+ return {"status": "success", "gpus": gpus}
447
+ except Exception as e:
448
+ return JSONResponse(status_code=500, content={"error": str(e)})
449
+
450
+ @app.post("/api/system/switch-gpu")
451
+ async def route_switch_gpu(request: Request):
452
+ try:
453
+ import torch
454
+ import gc
455
+ import asyncio
456
+
457
+ data = await request.json()
458
+ gpu_id = data.get("gpu_id")
459
+
460
+ if (
461
+ gpu_id is None
462
+ or not torch.cuda.is_available()
463
+ or gpu_id >= torch.cuda.device_count()
464
+ ):
465
+ return JSONResponse(
466
+ status_code=400, content={"error": "Invalid GPU ID"}
467
+ )
468
+
469
+ # 先尝试终止任何可能的卡死任务
470
+ if getattr(handler.generation, "is_generation_running", lambda: False)():
471
+ try:
472
+ handler.generation.cancel_generation()
473
+ except Exception:
474
+ pass
475
+ await asyncio.sleep(0.5)
476
+ if hasattr(handler.generation, "_generation_id"):
477
+ handler.generation._generation_id = None
478
+ if hasattr(handler.generation, "_is_generating"):
479
+ handler.generation._is_generating = False
480
+
481
+ # 1. 卸载当前 GPU 上的模型: 临时屏蔽底层锁定器
482
+ try:
483
+ mock_swapped = False
484
+ orig_running = None
485
+ if hasattr(handler.pipelines, "_generation_service"):
486
+ orig_running = (
487
+ handler.pipelines._generation_service.is_generation_running
488
+ )
489
+ handler.pipelines._generation_service.is_generation_running = (
490
+ lambda: False
491
+ )
492
+ mock_swapped = True
493
+ try:
494
+ handler.pipelines.unload_gpu_pipeline()
495
+ finally:
496
+ if mock_swapped:
497
+ handler.pipelines._generation_service.is_generation_running = (
498
+ orig_running
499
+ )
500
+ except Exception:
501
+ pass
502
+ gc.collect()
503
+ torch.cuda.empty_cache()
504
+
505
+ # 2. 切换全局设备配置
506
+ new_device = torch.device(f"cuda:{gpu_id}")
507
+ handler.config.device = new_device
508
+
509
+ # 3. 核心修复:设置当前进程的默认 CUDA 设备
510
+ # 这会影响到 torch.cuda.current_device() 和后续的模型加载
511
+ torch.cuda.set_device(gpu_id)
512
+
513
+ # 针对底层库可能直接读取 CUDA_VISIBLE_DEVICES 的情况
514
+ # 注意:torch 初始化后修改此变量不一定生效,但对某些库可能有引导作用
515
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
516
+
517
+ # 4. 【核心修复】同步更新 TextEncoder 的设备指针
518
+ # 根本原因: LTXTextEncoder.self.device 在初始化时硬绑定了旧 GPU,
519
+ # 切换设备后 text context 仍在旧 GPU 上,与已迁移到新 GPU 的
520
+ # Transformer 产生 "cuda:0 and cuda:1" 设备不一致冲突。
521
+ try:
522
+ te_state = None
523
+ # 尝试多种路径访问 text_encoder 状态
524
+ if hasattr(handler, "state") and hasattr(handler.state, "text_encoder"):
525
+ te_state = handler.state.text_encoder
526
+ elif hasattr(handler, "_state") and hasattr(
527
+ handler._state, "text_encoder"
528
+ ):
529
+ te_state = handler._state.text_encoder
530
+
531
+ if te_state is not None:
532
+ # 4a. 更新 LTXTextEncoder 服务自身的 device 属性
533
+ if hasattr(te_state, "service") and hasattr(
534
+ te_state.service, "device"
535
+ ):
536
+ te_state.service.device = new_device
537
+ print(f"[TextEncoder] device updated to {new_device}")
538
+
539
+ # 4b. 将缓存的 encoder 权重迁移到 CPU,下次推理时再按新设备重加载
540
+ if (
541
+ hasattr(te_state, "cached_encoder")
542
+ and te_state.cached_encoder is not None
543
+ ):
544
+ try:
545
+ te_state.cached_encoder.to(torch.device("cpu"))
546
+ except Exception:
547
+ pass
548
+ te_state.cached_encoder = None
549
+ print(
550
+ "[TextEncoder] cached encoder cleared (will reload on new GPU)"
551
+ )
552
+
553
+ # 4c. 清除 API embeddings 缓存(tensor 绑定旧 GPU)
554
+ if hasattr(te_state, "api_embeddings"):
555
+ te_state.api_embeddings = None
556
+
557
+ # 4d. 清除 prompt cache(其中 tensor 也绑定旧 GPU)
558
+ if hasattr(te_state, "prompt_cache") and te_state.prompt_cache:
559
+ te_state.prompt_cache.clear()
560
+ print("[TextEncoder] prompt cache cleared")
561
+ except Exception as _te_err:
562
+ print(f"[TextEncoder] device sync warning (non-fatal): {_te_err}")
563
+
564
+ print(
565
+ f"Switched active GPU to: {torch.cuda.get_device_name(gpu_id)} (ID: {gpu_id})"
566
+ )
567
+ return {"status": "success", "message": f"Switched to GPU {gpu_id}"}
568
+ except Exception as e:
569
+ return JSONResponse(status_code=500, content={"error": str(e)})
570
+
571
+ # --- 核心增强:首尾帧插值与视频超分支持 ---
572
+ from handlers.video_generation_handler import VideoGenerationHandler
573
+ from services.retake_pipeline.ltx_retake_pipeline import LTXRetakePipeline
574
+ from server_utils.media_validation import normalize_optional_path
575
+ from PIL import Image
576
+
577
+ # 1. 增强插值功能 (Monkey Patch VideoGenerationHandler)
578
+ _orig_generate = VideoGenerationHandler.generate
579
+ _orig_generate_video = VideoGenerationHandler.generate_video
580
+
581
+ def patched_generate(self, req: GenerateVideoRequest):
582
+ # === [DEBUG] 打印当前生成状态 ===
583
+ gen = self._generation
584
+ is_running = (
585
+ gen.is_generation_running()
586
+ if hasattr(gen, "is_generation_running")
587
+ else "?方法不存在"
588
+ )
589
+ gen_id = getattr(gen, "_generation_id", "?属性不存在")
590
+ is_gen = getattr(gen, "_is_generating", "?属性不存在")
591
+ cancelled = getattr(
592
+ gen, "_cancelled", getattr(gen, "_is_cancelled", "?属性不存在")
593
+ )
594
+ print(f"\n[PATCH][patched_generate] ==> 收到新请求")
595
+ print(f" is_generation_running() = {is_running}")
596
+ print(f" _generation_id = {gen_id}")
597
+ print(f" _is_generating = {is_gen}")
598
+ print(f" _cancelled = {cancelled}")
599
+ start_frame_path = normalize_optional_path(getattr(req, "startFramePath", None))
600
+ end_frame_path = normalize_optional_path(getattr(req, "endFramePath", None))
601
+ aspect_ratio = getattr(req, "aspectRatio", None)
602
+ print(f" startFramePath = {start_frame_path}")
603
+ print(f" endFramePath = {end_frame_path}")
604
+ print(f" aspectRatio = {aspect_ratio}")
605
+
606
+ # 检查是否有音频
607
+ audio_path = normalize_optional_path(getattr(req, "audioPath", None))
608
+ print(f"[PATCH] audio_path = {audio_path}")
609
+
610
+ # 检查是否有图片(图生视频)
611
+ image_path = normalize_optional_path(getattr(req, "imagePath", None))
612
+ print(f"[PATCH] image_path = {image_path}")
613
+
614
+ # 始终使用自定义逻辑(支持首尾帧和竖屏)
615
+ print(f"[PATCH] 使用自定义逻辑处理")
616
+
617
+ # 计算分辨率
618
+ import uuid
619
+
620
+ resolution = req.resolution
621
+ duration = int(float(req.duration))
622
+ fps = int(float(req.fps))
623
+
624
+ # 720p 分辨率:横屏 1280x720,竖屏 720x1280
625
+ RESOLUTION_MAP = {
626
+ "540p": (960, 540),
627
+ "720p": (1280, 720),
628
+ "1080p": (1920, 1080),
629
+ }
630
+
631
+ def get_16_9_size(res):
632
+ return RESOLUTION_MAP.get(res, (1280, 720))
633
+
634
+ def get_9_16_size(res):
635
+ w, h = get_16_9_size(res)
636
+ return h, w # 交换宽高
637
+
638
+ if req.aspectRatio == "9:16":
639
+ width, height = get_9_16_size(resolution)
640
+ else:
641
+ width, height = get_16_9_size(resolution)
642
+
643
+ # 计算帧数
644
+ num_frames = ((duration * fps) // 8) * 8 + 1
645
+ num_frames = max(num_frames, 9)
646
+
647
+ print(f"[PATCH] 计算得到的分辨率: {width}x{height}, 帧数: {num_frames}")
648
+
649
+ # 设置首尾帧路径
650
+ self._start_frame_path = start_frame_path
651
+ self._end_frame_path = end_frame_path
652
+
653
+ # 无论有没有音频,都使用自定义逻辑支持首尾帧
654
+ try:
655
+ result = patched_generate_video(
656
+ self,
657
+ prompt=req.prompt,
658
+ image=None,
659
+ image_path=image_path,
660
+ height=height,
661
+ width=width,
662
+ num_frames=num_frames,
663
+ fps=fps,
664
+ seed=self._resolve_seed(),
665
+ camera_motion=req.cameraMotion,
666
+ negative_prompt=req.negativePrompt,
667
+ audio_path=audio_path,
668
+ )
669
+ print(f"[PATCH][patched_generate] <== 完成, 返回状态: complete")
670
+ return type("Response", (), {"status": "complete", "video_path": result})()
671
+ except Exception as e:
672
+ import traceback
673
+
674
+ print(f"[PATCH][patched_generate] 错误: {e}")
675
+ traceback.print_exc()
676
+ raise
677
+
678
+ def patched_generate_video(
679
+ self,
680
+ prompt,
681
+ image,
682
+ image_path=None,
683
+ height=None,
684
+ width=None,
685
+ num_frames=None,
686
+ fps=None,
687
+ seed=None,
688
+ camera_motion=None,
689
+ negative_prompt=None,
690
+ audio_path=None,
691
+ ):
692
+ # === [DEBUG] 打印当前生成状态 ===
693
+ gen = self._generation
694
+ is_running = (
695
+ gen.is_generation_running()
696
+ if hasattr(gen, "is_generation_running")
697
+ else "?方法不存在"
698
+ )
699
+ gen_id = getattr(gen, "_generation_id", "?属性不存在")
700
+ is_gen = getattr(gen, "_is_generating", "?属性不存在")
701
+ print(f"[PATCH][patched_generate_video] ==> 开始推理")
702
+ print(f" is_generation_running() = {is_running}")
703
+ print(f" _generation_id = {gen_id}")
704
+ print(f" _is_generating = {is_gen}")
705
+ print(
706
+ f" resolution = {width}x{height}, frames={num_frames}, fps={fps}"
707
+ )
708
+ print(f" image param = {type(image)}, {image is not None}")
709
+ print(f" image_path = {image_path}")
710
+ # ==================================
711
+ from ltx_pipelines.utils.args import (
712
+ ImageConditioningInput as LtxImageConditioningInput,
713
+ )
714
+
715
+ images_inputs = []
716
+ temp_paths = []
717
+ start_path = getattr(self, "_start_frame_path", None)
718
+ end_path = getattr(self, "_end_frame_path", None)
719
+ print(f"[PATCH] start_path={start_path}, end_path={end_path}")
720
+
721
+ # 如果没有首尾帧但有 image_path,使用 image_path 作为起始帧
722
+ if not start_path and not end_path and image_path:
723
+ print(f"[PATCH] 使用 image_path 作为起始帧: {image_path}")
724
+ start_path = image_path
725
+
726
+ # 检查是否有来自 imagePath 的数据(当只用首帧时)
727
+ has_image_param = image is not None
728
+ if has_image_param:
729
+ print(f"[PATCH] image param is available, will be used as start frame")
730
+
731
+ latent_num_frames = (num_frames - 1) // 8 + 1
732
+ last_latent_idx = latent_num_frames - 1
733
+ print(
734
+ f"[PATCH] latent_num_frames={latent_num_frames}, last_latent_idx={last_latent_idx}"
735
+ )
736
+
737
+ target_start_path = start_path if start_path else None
738
+ if not target_start_path and image is not None:
739
+ tmp = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name
740
+ image.save(tmp)
741
+ temp_paths.append(tmp)
742
+ target_start_path = tmp
743
+ print(f"[PATCH] Using image param as start frame: {target_start_path}")
744
+
745
+ if target_start_path:
746
+ start_img = self._prepare_image(target_start_path, width, height)
747
+ tmp = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name
748
+ start_img.save(tmp)
749
+ temp_paths.append(tmp)
750
+ # 转换 Windows 路径为正斜杠
751
+ tmp_normalized = tmp.replace("\\", "/")
752
+ images_inputs.append(
753
+ LtxImageConditioningInput(
754
+ path=tmp_normalized, frame_idx=0, strength=1.0
755
+ )
756
+ )
757
+ print(f"[PATCH] Added start frame: {tmp_normalized}, frame_idx=0")
758
+
759
+ if end_path:
760
+ end_img = self._prepare_image(end_path, width, height)
761
+ tmp = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name
762
+ end_img.save(tmp)
763
+ temp_paths.append(tmp)
764
+ # 转换 Windows 路径为正斜杠
765
+ tmp_normalized = tmp.replace("\\", "/")
766
+ images_inputs.append(
767
+ LtxImageConditioningInput(
768
+ path=tmp_normalized, frame_idx=last_latent_idx, strength=1.0
769
+ )
770
+ )
771
+ print(
772
+ f"[PATCH] Added end frame: {tmp_normalized}, frame_idx={last_latent_idx}"
773
+ )
774
+
775
+ print(f"[PATCH] images_inputs count: {len(images_inputs)}")
776
+ if images_inputs:
777
+ for idx, img in enumerate(images_inputs):
778
+ print(
779
+ f"[PATCH] images_inputs[{idx}]: path={getattr(img, 'path', 'N/A')}, frame_idx={getattr(img, 'frame_idx', 'N/A')}, strength={getattr(img, 'strength', 'N/A')}"
780
+ )
781
+
782
+ print(f"[PATCH] audio_path = {audio_path}")
783
+
784
+ if self._generation.is_generation_cancelled():
785
+ raise RuntimeError("Generation was cancelled")
786
+
787
+ # 导入 uuid
788
+ import uuid
789
+
790
+ generation_id = uuid.uuid4().hex[:8]
791
+
792
+ # 根据是否有音频选择不同的 pipeline
793
+ if audio_path:
794
+ print(f"[PATCH] 加载 A2V pipeline(支持音频)")
795
+ pipeline_state = self._pipelines.load_a2v_pipeline()
796
+ num_inference_steps = 11 # A2V 需要这个参数
797
+ else:
798
+ print(f"[PATCH] 加载 Fast pipeline")
799
+ pipeline_state = self._pipelines.load_gpu_pipeline(
800
+ "fast", should_warm=False
801
+ )
802
+ num_inference_steps = None
803
+
804
+ # 启动 generation 状态(在 pipeline 加载之后)
805
+ self._generation.start_generation(generation_id)
806
+
807
+ # 处理 negative_prompt
808
+ neg_prompt = (
809
+ negative_prompt if negative_prompt else self.config.default_negative_prompt
810
+ )
811
+ enhanced_prompt = prompt + self.config.camera_motion_prompts.get(
812
+ camera_motion, ""
813
+ )
814
+
815
+ # 强制使用动态目录,忽略底层原始逻辑
816
+ dyn_dir = get_dynamic_output_path()
817
+ output_path = dyn_dir / f"generation_{uuid.uuid4().hex[:8]}.mp4"
818
+
819
+ try:
820
+ self._text.prepare_text_encoding(enhanced_prompt, enhance_prompt=False)
821
+ # 调整为 32 的倍数(LTX 要求)
822
+ height = round(height / 32) * 32
823
+ width = round(width / 32) * 32
824
+
825
+ if audio_path:
826
+ # A2V pipeline 参数
827
+ gen_kwargs = {
828
+ "prompt": enhanced_prompt,
829
+ "negative_prompt": neg_prompt,
830
+ "seed": seed,
831
+ "height": height,
832
+ "width": width,
833
+ "num_frames": num_frames,
834
+ "frame_rate": fps,
835
+ "num_inference_steps": num_inference_steps,
836
+ "images": images_inputs,
837
+ "audio_path": audio_path,
838
+ "audio_start_time": 0.0,
839
+ "audio_max_duration": None,
840
+ "output_path": str(output_path),
841
+ }
842
+ else:
843
+ # Fast pipeline 参数
844
+ gen_kwargs = {
845
+ "prompt": enhanced_prompt,
846
+ "seed": seed,
847
+ "height": height,
848
+ "width": width,
849
+ "num_frames": num_frames,
850
+ "frame_rate": fps,
851
+ "images": images_inputs,
852
+ "output_path": str(output_path),
853
+ }
854
+
855
+ pipeline_state.pipeline.generate(**gen_kwargs)
856
+
857
+ # 标记完成
858
+ self._generation.complete_generation(str(output_path))
859
+ return str(output_path)
860
+ finally:
861
+ self._text.clear_api_embeddings()
862
+ for p in temp_paths:
863
+ if os.path.exists(p):
864
+ os.unlink(p)
865
+ self._start_frame_path = None
866
+ self._end_frame_path = None
867
+
868
+ VideoGenerationHandler.generate = patched_generate
869
+ VideoGenerationHandler.generate_video = patched_generate_video
870
+
871
+ # 2. 增强视频超分功能 (Monkey Patch LTXRetakePipeline)
872
+ _orig_ltx_retake_run = LTXRetakePipeline._run
873
+
874
+ def patched_ltx_retake_run(
875
+ self, video_path, prompt, start_time, end_time, seed, **kwargs
876
+ ):
877
+ # 拦截并修改目标宽高
878
+ target_w = getattr(self, "_target_width", None)
879
+ target_h = getattr(self, "_target_height", None)
880
+ target_strength = getattr(self, "_target_strength", 0.7)
881
+ is_upscale = target_w is not None and target_h is not None
882
+
883
+ import ltx_pipelines.utils.media_io as media_io
884
+ import services.retake_pipeline.ltx_retake_pipeline as lrp
885
+ import ltx_pipelines.utils.samplers as samplers
886
+ import ltx_pipelines.utils.helpers as helpers
887
+
888
+ _orig_get_meta = media_io.get_videostream_metadata
889
+ _orig_lrp_get_meta = getattr(lrp, "get_videostream_metadata", _orig_get_meta)
890
+ _orig_euler_loop = samplers.euler_denoising_loop
891
+ _orig_noise_video = helpers.noise_video_state
892
+
893
+ fps, num_frames, src_w, src_h = _orig_get_meta(video_path)
894
+
895
+ if is_upscale:
896
+ print(
897
+ f">>> 启动超分内核: {src_w}x{src_h} -> {target_w}x{target_h} (强度: {target_strength})"
898
+ )
899
+
900
+ # 1. 注入分辨率
901
+ def get_meta_patched(path):
902
+ return fps, num_frames, target_w, target_h
903
+
904
+ media_io.get_videostream_metadata = get_meta_patched
905
+ lrp.get_videostream_metadata = get_meta_patched
906
+
907
+ # 2. 注入起始噪声 (SDEdit 核心:加噪到指定强度)
908
+ def noise_video_patched(*args, **kwargs_inner):
909
+ kwargs_inner["noise_scale"] = target_strength
910
+ return _orig_noise_video(*args, **kwargs_inner)
911
+
912
+ helpers.noise_video_state = noise_video_patched
913
+
914
+ # 3. 注入采样起点 (从对应噪声位开始去噪)
915
+ def patched_euler_loop(
916
+ sigmas, video_state, audio_state, stepper, denoise_fn
917
+ ):
918
+ full_len = len(sigmas)
919
+ skip_idx = 0
920
+ for i, s in enumerate(sigmas):
921
+ if s <= target_strength:
922
+ skip_idx = i
923
+ break
924
+ skip_idx = min(skip_idx, full_len - 2)
925
+ new_sigmas = sigmas[skip_idx:]
926
+ print(
927
+ f">>> 采样拦截成功: 原步数 {full_len}, 现步数 {len(new_sigmas)}, 起始强度 {new_sigmas[0].item():.2f}"
928
+ )
929
+ return _orig_euler_loop(
930
+ new_sigmas, video_state, audio_state, stepper, denoise_fn
931
+ )
932
+
933
+ samplers.euler_denoising_loop = patched_euler_loop
934
+
935
+ kwargs["regenerate_video"] = False
936
+ kwargs["regenerate_audio"] = False
937
+
938
+ try:
939
+ return _orig_ltx_retake_run(
940
+ self, video_path, prompt, start_time, end_time, seed, **kwargs
941
+ )
942
+ finally:
943
+ media_io.get_videostream_metadata = _orig_get_meta
944
+ lrp.get_videostream_metadata = _orig_lrp_get_meta
945
+ samplers.euler_denoising_loop = _orig_euler_loop
946
+ helpers.noise_video_state = _orig_noise_video
947
+
948
+ return _orig_ltx_retake_run(
949
+ self, video_path, prompt, start_time, end_time, seed, **kwargs
950
+ )
951
+
952
+ return _orig_ltx_retake_run(
953
+ self, video_path, prompt, start_time, end_time, seed, **kwargs
954
+ )
955
+
956
+ LTXRetakePipeline._run = patched_ltx_retake_run
957
+
958
+ # --- 最终视频超分接口实现 ---
959
+ @app.post("/api/system/upscale-video")
960
+ async def route_upscale_video(request: Request):
961
+ try:
962
+ import uuid
963
+ import os
964
+ from datetime import datetime
965
+ from ltx_pipelines.utils.media_io import get_videostream_metadata
966
+ from ltx_core.types import SpatioTemporalScaleFactors
967
+
968
+ data = await request.json()
969
+ video_path = data.get("video_path")
970
+ target_res = data.get("resolution", "1080p")
971
+ prompt = data.get("prompt", "high quality, detailed, 4k")
972
+ strength = data.get("strength", 0.7) # 获取前端传来的重绘幅度
973
+
974
+ if not video_path or not os.path.exists(video_path):
975
+ return JSONResponse(
976
+ status_code=400, content={"error": "Invalid video path"}
977
+ )
978
+
979
+ # 计算目标宽高 (必须是 32 的倍数)
980
+ res_map = {"1080p": (1920, 1088), "720p": (1280, 704), "544p": (960, 544)}
981
+ target_w, target_h = res_map.get(target_res, (1920, 1088))
982
+
983
+ fps, num_frames, _, _ = get_videostream_metadata(video_path)
984
+
985
+ # 校验帧数 8k+1,如果不符则自动调整
986
+ scale = SpatioTemporalScaleFactors.default()
987
+ if (num_frames - 1) % scale.time != 0:
988
+ # 计算需要调整到的最近的有效帧数 (8k+1)
989
+ # 找到最接近的8k+1帧数
990
+ target_k = (num_frames - 1) // scale.time
991
+ # 选择最接近的k值:向下或向上取整
992
+ current_k = (num_frames - 1) // scale.time
993
+ current_remainder = (num_frames - 1) % scale.time
994
+
995
+ # 比较向上和向下取整哪个更接近
996
+ down_k = current_k
997
+ up_k = current_k + 1
998
+
999
+ # 向下取整的帧数
1000
+ down_frames = down_k * scale.time + 1
1001
+ # 向上取整的帧数
1002
+ up_frames = up_k * scale.time + 1
1003
+
1004
+ # 选择差异最小的
1005
+ if abs(num_frames - down_frames) <= abs(num_frames - up_frames):
1006
+ adjusted_frames = down_frames
1007
+ else:
1008
+ adjusted_frames = up_frames
1009
+
1010
+ print(
1011
+ f">>> 帧数调整: {num_frames} -> {adjusted_frames} (符合 8k+1 规则)"
1012
+ )
1013
+
1014
+ # 调整视频帧数 - 截断多余的帧或填充黑帧
1015
+ adjusted_video_path = None
1016
+ try:
1017
+ import cv2
1018
+ import numpy as np
1019
+ import tempfile
1020
+
1021
+ # 使用cv2读取视频
1022
+ cap = cv2.VideoCapture(video_path)
1023
+ if not cap.isOpened():
1024
+ raise Exception("无法打开视频文件")
1025
+
1026
+ frames = []
1027
+ while True:
1028
+ ret, frame = cap.read()
1029
+ if not ret:
1030
+ break
1031
+ frames.append(frame)
1032
+ cap.release()
1033
+
1034
+ original_frame_count = len(frames)
1035
+
1036
+ if adjusted_frames < original_frame_count:
1037
+ # 截断多余的帧
1038
+ frames = frames[:adjusted_frames]
1039
+ print(
1040
+ f">>> 已截断视频: {original_frame_count} -> {len(frames)} 帧"
1041
+ )
1042
+ else:
1043
+ # 填充黑帧 (复制最后一帧)
1044
+ last_frame = frames[-1] if frames else None
1045
+ if last_frame is not None:
1046
+ h, w = last_frame.shape[:2]
1047
+ black_frame = np.zeros((h, w, 3), dtype=np.uint8)
1048
+ while len(frames) < adjusted_frames:
1049
+ frames.append(black_frame.copy())
1050
+ print(
1051
+ f">>> 已填充视频: {original_frame_count} -> {len(frames)} 帧"
1052
+ )
1053
+
1054
+ # 保存调整后的视频到临时文件
1055
+ adjusted_video_fd = tempfile.NamedTemporaryFile(
1056
+ suffix=".mp4", delete=False
1057
+ )
1058
+ adjusted_video_path = adjusted_video_fd.name
1059
+ adjusted_video_fd.close()
1060
+
1061
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
1062
+ out = cv2.VideoWriter(
1063
+ adjusted_video_path,
1064
+ fourcc,
1065
+ fps,
1066
+ (frames[0].shape[1], frames[0].shape[0]),
1067
+ )
1068
+ for frame in frames:
1069
+ out.write(frame)
1070
+ out.release()
1071
+
1072
+ video_path = adjusted_video_path
1073
+ num_frames = adjusted_frames
1074
+ print(
1075
+ f">>> 视频帧数调整完成: {original_frame_count} -> {num_frames}"
1076
+ )
1077
+
1078
+ except ImportError:
1079
+ # cv2不可用,尝试使用LTX内置方法
1080
+ try:
1081
+ from ltx_pipelines.utils.media_io import (
1082
+ read_video_stream,
1083
+ write_video_stream,
1084
+ )
1085
+ import numpy as np
1086
+
1087
+ frames, audio_data = read_video_stream(video_path, fps)
1088
+ original_frame_count = len(frames)
1089
+
1090
+ if adjusted_frames < original_frame_count:
1091
+ frames = frames[:adjusted_frames]
1092
+ else:
1093
+ while len(frames) < adjusted_frames:
1094
+ frames = np.concatenate([frames, frames[-1:]], axis=0)
1095
+
1096
+ import tempfile
1097
+
1098
+ adjusted_video_fd = tempfile.NamedTemporaryFile(
1099
+ suffix=".mp4", delete=False
1100
+ )
1101
+ adjusted_video_path = adjusted_video_fd.name
1102
+ adjusted_video_fd.close()
1103
+
1104
+ write_video_stream(adjusted_video_path, frames, fps)
1105
+ video_path = adjusted_video_path
1106
+ num_frames = adjusted_frames
1107
+ print(
1108
+ f">>> 视频帧数调整完成: {original_frame_count} -> {num_frames}"
1109
+ )
1110
+
1111
+ except Exception as e2:
1112
+ print(f">>> 视频帧数自动调整失败: {e2}")
1113
+ return JSONResponse(
1114
+ status_code=400,
1115
+ content={
1116
+ "error": f"视频帧数({num_frames})不符合 8k+1 规则,且自动调整失败。请手动将视频帧数调整为 8k+1 格式(如 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97, 105 等)。"
1117
+ },
1118
+ )
1119
+ except Exception as e:
1120
+ print(f">>> 视频帧数自动调整失败: {e}")
1121
+ return JSONResponse(
1122
+ status_code=400,
1123
+ content={
1124
+ "error": f"视频帧数({num_frames})不符合 8k+1 规则,且自动调整失败。请手动将视频帧数调整为 8k+1 格式(如 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97, 105 等)。"
1125
+ },
1126
+ )
1127
+
1128
+ # 1. 加载模型
1129
+ pipeline_state = handler.pipelines.load_retake_pipeline(distilled=True)
1130
+
1131
+ # 3. 启动任务
1132
+ generation_id = uuid.uuid4().hex[:8]
1133
+ handler.generation.start_generation(generation_id)
1134
+
1135
+ # 核心修正:确保文件保存在动态的输出目录
1136
+ save_dir = get_dynamic_output_path()
1137
+ filename = f"upscale_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{generation_id}.mp4"
1138
+ full_output_path = save_dir / filename
1139
+
1140
+ # 3. 执行真正的超分逻辑
1141
+ try:
1142
+ # 注入目标分辨率和重绘幅度
1143
+ pipeline_state.pipeline._target_width = target_w
1144
+ pipeline_state.pipeline._target_height = target_h
1145
+ pipeline_state.pipeline._target_strength = strength
1146
+
1147
+ def do_generate():
1148
+ pipeline_state.pipeline.generate(
1149
+ video_path=str(video_path),
1150
+ prompt=prompt,
1151
+ start_time=0.0,
1152
+ end_time=float(num_frames / fps),
1153
+ seed=int(time.time()) % 2147483647,
1154
+ output_path=str(full_output_path),
1155
+ distilled=True,
1156
+ regenerate_video=True,
1157
+ regenerate_audio=False,
1158
+ )
1159
+
1160
+ # 重要修复:放到线程池运行,避免阻塞主循环导致前端拿不到显存数据
1161
+ from starlette.concurrency import run_in_threadpool
1162
+
1163
+ await run_in_threadpool(do_generate)
1164
+
1165
+ handler.generation.complete_generation(str(full_output_path))
1166
+ return {"status": "complete", "video_path": filename}
1167
+ except Exception as e:
1168
+ # OOM 异常逃逸修复:强制返回友好的异常信息
1169
+ try:
1170
+ handler.generation.cancel_generation()
1171
+ except Exception:
1172
+ pass
1173
+ if hasattr(handler.generation, "_generation_id"):
1174
+ handler.generation._generation_id = None
1175
+ if hasattr(handler.generation, "_is_generating"):
1176
+ handler.generation._is_generating = False
1177
+
1178
+ error_msg = str(e)
1179
+ if "CUDA out of memory" in error_msg:
1180
+ error_msg = "🚨 显存不足 (OOM):视频时长过长或目标分辨率超出了当前显卡的承载极限,请降低目标分辨率重试!"
1181
+ raise RuntimeError(error_msg) from e
1182
+ finally:
1183
+ if hasattr(pipeline_state.pipeline, "_target_width"):
1184
+ del pipeline_state.pipeline._target_width
1185
+ if hasattr(pipeline_state.pipeline, "_target_height"):
1186
+ del pipeline_state.pipeline._target_height
1187
+ if hasattr(pipeline_state.pipeline, "_target_strength"):
1188
+ del pipeline_state.pipeline._target_strength
1189
+ import gc
1190
+
1191
+ gc.collect()
1192
+ if (
1193
+ getattr(torch, "cuda", None) is not None
1194
+ and torch.cuda.is_available()
1195
+ ):
1196
+ torch.cuda.empty_cache()
1197
+
1198
+ except Exception as e:
1199
+ import traceback
1200
+
1201
+ traceback.print_exc()
1202
+ return JSONResponse(status_code=500, content={"error": str(e)})
1203
+
1204
+ # ------------------
1205
+
1206
+ @app.post("/api/system/upload-image")
1207
+ async def route_upload_image(request: Request):
1208
+ try:
1209
+ import uuid
1210
+ import base64
1211
+
1212
+ # 接收 JSON 而不是 Multipart,绕过 python-multipart 缺失问题
1213
+ data = await request.json()
1214
+ b64_data = data.get("image")
1215
+ filename = data.get("filename", "image.png")
1216
+
1217
+ if not b64_data:
1218
+ return JSONResponse(
1219
+ status_code=400, content={"error": "No image data provided"}
1220
+ )
1221
+
1222
+ # 处理 base64 头部 (例如 data:image/png;base64,...)
1223
+ if "," in b64_data:
1224
+ b64_data = b64_data.split(",")[1]
1225
+
1226
+ image_bytes = base64.b64decode(b64_data)
1227
+
1228
+ # 确保上传目录存在
1229
+ upload_dir = get_dynamic_output_path() / "uploads"
1230
+ upload_dir.mkdir(parents=True, exist_ok=True)
1231
+
1232
+ safe_filename = "".join([c for c in filename if c.isalnum() or c in "._-"])
1233
+ file_path = upload_dir / f"up_{uuid.uuid4().hex[:6]}_{safe_filename}"
1234
+
1235
+ with file_path.open("wb") as buffer:
1236
+ buffer.write(image_bytes)
1237
+
1238
+ return {"status": "success", "path": str(file_path)}
1239
+ except Exception as e:
1240
+ import traceback
1241
+
1242
+ error_msg = f"{type(e).__name__}: {str(e)}\n{traceback.format_exc()}"
1243
+ print(f"Upload error: {error_msg}")
1244
+ return JSONResponse(
1245
+ status_code=500, content={"error": str(e), "detail": error_msg}
1246
+ )
1247
+
1248
+ # ------------------
1249
+
1250
+ @app.get("/api/system/history")
1251
+ async def route_get_history(request: Request):
1252
+ try:
1253
+ import os
1254
+
1255
+ page = int(request.query_params.get("page", 1))
1256
+ limit = int(request.query_params.get("limit", 20))
1257
+
1258
+ history = []
1259
+ dyn_path = get_dynamic_output_path()
1260
+ if dyn_path.exists():
1261
+ for filename in os.listdir(dyn_path):
1262
+ if filename == "uploads":
1263
+ continue
1264
+ full_path = dyn_path / filename
1265
+ if full_path.is_file() and filename.lower().endswith(
1266
+ (".mp4", ".png", ".jpg", ".webp")
1267
+ ):
1268
+ mtime = os.path.getmtime(full_path)
1269
+ history.append(
1270
+ {
1271
+ "filename": filename,
1272
+ "type": "video"
1273
+ if filename.lower().endswith(".mp4")
1274
+ else "image",
1275
+ "mtime": mtime,
1276
+ "fullpath": str(full_path),
1277
+ }
1278
+ )
1279
+ history.sort(key=lambda x: x["mtime"], reverse=True)
1280
+
1281
+ total_items = len(history)
1282
+ total_pages = (total_items + limit - 1) // limit
1283
+ start_idx = (page - 1) * limit
1284
+ end_idx = start_idx + limit
1285
+
1286
+ return {
1287
+ "status": "success",
1288
+ "history": history[start_idx:end_idx],
1289
+ "total_pages": total_pages,
1290
+ "current_page": page,
1291
+ "total_items": total_items,
1292
+ }
1293
+ except Exception as e:
1294
+ return JSONResponse(status_code=500, content={"error": str(e)})
1295
+
1296
+ @app.post("/api/system/delete-file")
1297
+ async def route_delete_file(request: Request):
1298
+ try:
1299
+ import os
1300
+
1301
+ data = await request.json()
1302
+ filename = data.get("filename", "")
1303
+
1304
+ if not filename:
1305
+ return JSONResponse(
1306
+ status_code=400, content={"error": "Filename is required"}
1307
+ )
1308
+
1309
+ dyn_path = get_dynamic_output_path()
1310
+ file_path = dyn_path / filename
1311
+
1312
+ if file_path.exists() and file_path.is_file():
1313
+ file_path.unlink()
1314
+ return {"status": "success", "message": "File deleted"}
1315
+ else:
1316
+ return JSONResponse(
1317
+ status_code=404, content={"error": "File not found"}
1318
+ )
1319
+ except Exception as e:
1320
+ return JSONResponse(status_code=500, content={"error": str(e)})
1321
+
1322
+ # 路由注册
1323
+ app.include_router(health_router)
1324
+ app.include_router(generation_router)
1325
+ app.include_router(models_router)
1326
+ app.include_router(settings_router)
1327
+ app.include_router(image_gen_router)
1328
+ app.include_router(suggest_gap_prompt_router)
1329
+ app.include_router(retake_router)
1330
+ app.include_router(ic_lora_router)
1331
+ app.include_router(runtime_policy_router)
1332
+
1333
+ # --- [安全补丁] 状态栏显示修复 ---
1334
+
1335
+ # --- 最终状态栏修复补丁: 只要服务运行且 GPU 没死,就视为就绪 ---
1336
+ from handlers.health_handler import HealthHandler
1337
+
1338
+ if not hasattr(HealthHandler, "_fixed_v2"):
1339
+ _orig_get_health = HealthHandler.get_health
1340
+
1341
+ def patched_health_v2(self):
1342
+ resp = _orig_get_health(self)
1343
+ # 解析:如果后端逻辑还在判断模型未加载,我们检查一下核心状态
1344
+ # 如果系统没有崩溃,我们就强制标记为已加载,让前端允许交互
1345
+ if not resp.models_loaded:
1346
+ # 我们认为只要 API 能通,底层状态服务(state)只要存在,就视为由于异步加载引起的暂时性 False
1347
+ # 直接返回 True,前端会显示"待机就绪"
1348
+ resp.models_loaded = True
1349
+ return resp
1350
+
1351
+ HealthHandler.get_health = patched_health_v2
1352
+ HealthHandler._fixed_v2 = True
1353
+ # ------------------------------------------------------------
1354
+
1355
+ # --- 修复显存采集指针:使得显存监控永远对准当前选定工作的 GPU ---
1356
+ from services.gpu_info.gpu_info_impl import GpuInfoImpl
1357
+
1358
+ if not hasattr(GpuInfoImpl, "_fixed_vram_patch"):
1359
+ _orig_get_gpu_info = GpuInfoImpl.get_gpu_info
1360
+
1361
+ def patched_get_gpu_info(self):
1362
+ import torch
1363
+
1364
+ if self.get_cuda_available():
1365
+ idx = 0
1366
+ if (
1367
+ hasattr(handler.config.device, "index")
1368
+ and handler.config.device.index is not None
1369
+ ):
1370
+ idx = handler.config.device.index
1371
+ try:
1372
+ import pynvml
1373
+
1374
+ pynvml.nvmlInit()
1375
+ handle = pynvml.nvmlDeviceGetHandleByIndex(idx)
1376
+ raw_name = pynvml.nvmlDeviceGetName(handle)
1377
+ name = (
1378
+ raw_name.decode("utf-8", errors="replace")
1379
+ if isinstance(raw_name, bytes)
1380
+ else str(raw_name)
1381
+ )
1382
+ memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
1383
+ pynvml.nvmlShutdown()
1384
+ return {
1385
+ "name": f"{name} [ID: {idx}]",
1386
+ "vram": memory.total // (1024 * 1024),
1387
+ "vramUsed": memory.used // (1024 * 1024),
1388
+ }
1389
+ except Exception:
1390
+ pass
1391
+ return _orig_get_gpu_info(self)
1392
+
1393
+ GpuInfoImpl.get_gpu_info = patched_get_gpu_info
1394
+ GpuInfoImpl._fixed_vram_patch = True
1395
+
1396
+ return app
LTX2.3/patches/handlers/video_generation_handler.py ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Video generation orchestration handler."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ import os
7
+ import tempfile
8
+ import time
9
+ import uuid
10
+ from datetime import datetime
11
+ from pathlib import Path
12
+ from threading import RLock
13
+ from typing import TYPE_CHECKING
14
+
15
+ from PIL import Image
16
+
17
+ from api_types import (
18
+ GenerateVideoRequest,
19
+ GenerateVideoResponse,
20
+ ImageConditioningInput,
21
+ VideoCameraMotion,
22
+ )
23
+ from _routes._errors import HTTPError
24
+ from handlers.base import StateHandlerBase
25
+ from handlers.generation_handler import GenerationHandler
26
+ from handlers.pipelines_handler import PipelinesHandler
27
+ from handlers.text_handler import TextHandler
28
+ from runtime_config.model_download_specs import resolve_model_path
29
+ from server_utils.media_validation import (
30
+ normalize_optional_path,
31
+ validate_audio_file,
32
+ validate_image_file,
33
+ )
34
+ from services.interfaces import LTXAPIClient
35
+ from state.app_state_types import AppState
36
+ from state.app_settings import should_video_generate_with_ltx_api
37
+
38
+ if TYPE_CHECKING:
39
+ from runtime_config.runtime_config import RuntimeConfig
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+ FORCED_API_MODEL_MAP: dict[str, str] = {
44
+ "fast": "ltx-2-3-fast",
45
+ "pro": "ltx-2-3-pro",
46
+ }
47
+ FORCED_API_RESOLUTION_MAP: dict[str, dict[str, str]] = {
48
+ "1080p": {"16:9": "1920x1080", "9:16": "1080x1920"},
49
+ "1440p": {"16:9": "2560x1440", "9:16": "1440x2560"},
50
+ "2160p": {"16:9": "3840x2160", "9:16": "2160x3840"},
51
+ }
52
+ A2V_FORCED_API_RESOLUTION = "1920x1080"
53
+ FORCED_API_ALLOWED_ASPECT_RATIOS = {"16:9", "9:16"}
54
+ FORCED_API_ALLOWED_FPS = {24, 25, 48, 50}
55
+
56
+
57
+ def _get_allowed_durations(model_id: str, resolution_label: str, fps: int) -> set[int]:
58
+ if model_id == "ltx-2-3-fast" and resolution_label == "1080p" and fps in {24, 25}:
59
+ return {6, 8, 10, 12, 14, 16, 18, 20}
60
+ return {6, 8, 10}
61
+
62
+
63
+ class VideoGenerationHandler(StateHandlerBase):
64
+ def __init__(
65
+ self,
66
+ state: AppState,
67
+ lock: RLock,
68
+ generation_handler: GenerationHandler,
69
+ pipelines_handler: PipelinesHandler,
70
+ text_handler: TextHandler,
71
+ ltx_api_client: LTXAPIClient,
72
+ config: RuntimeConfig,
73
+ ) -> None:
74
+ super().__init__(state, lock, config)
75
+ self._generation = generation_handler
76
+ self._pipelines = pipelines_handler
77
+ self._text = text_handler
78
+ self._ltx_api_client = ltx_api_client
79
+
80
+ def generate(self, req: GenerateVideoRequest) -> GenerateVideoResponse:
81
+ if should_video_generate_with_ltx_api(
82
+ force_api_generations=self.config.force_api_generations,
83
+ settings=self.state.app_settings,
84
+ ):
85
+ return self._generate_forced_api(req)
86
+
87
+ if self._generation.is_generation_running():
88
+ raise HTTPError(409, "Generation already in progress")
89
+
90
+ resolution = req.resolution
91
+
92
+ duration = int(float(req.duration))
93
+ fps = int(float(req.fps))
94
+
95
+ audio_path = normalize_optional_path(req.audioPath)
96
+ if audio_path:
97
+ return self._generate_a2v(req, duration, fps, audio_path=audio_path)
98
+
99
+ logger.info("Resolution %s - using fast pipeline", resolution)
100
+
101
+ RESOLUTION_MAP_16_9: dict[str, tuple[int, int]] = {
102
+ "540p": (960, 540),
103
+ "720p": (1280, 720),
104
+ "1080p": (1920, 1080),
105
+ }
106
+
107
+ def get_16_9_size(res: str) -> tuple[int, int]:
108
+ return RESOLUTION_MAP_16_9.get(res, (1280, 720))
109
+
110
+ def get_9_16_size(res: str) -> tuple[int, int]:
111
+ w, h = get_16_9_size(res)
112
+ return h, w
113
+
114
+ match req.aspectRatio:
115
+ case "9:16":
116
+ width, height = get_9_16_size(resolution)
117
+ case "16:9":
118
+ width, height = get_16_9_size(resolution)
119
+
120
+ num_frames = self._compute_num_frames(duration, fps)
121
+
122
+ image = None
123
+ image_path = normalize_optional_path(req.imagePath)
124
+ if image_path:
125
+ image = self._prepare_image(image_path, width, height)
126
+ logger.info("Image: %s -> %sx%s", image_path, width, height)
127
+
128
+ generation_id = self._make_generation_id()
129
+ seed = self._resolve_seed()
130
+
131
+ try:
132
+ self._pipelines.load_gpu_pipeline("fast", should_warm=False)
133
+ self._generation.start_generation(generation_id)
134
+
135
+ output_path = self.generate_video(
136
+ prompt=req.prompt,
137
+ image=image,
138
+ height=height,
139
+ width=width,
140
+ num_frames=num_frames,
141
+ fps=fps,
142
+ seed=seed,
143
+ camera_motion=req.cameraMotion,
144
+ negative_prompt=req.negativePrompt,
145
+ )
146
+
147
+ self._generation.complete_generation(output_path)
148
+ return GenerateVideoResponse(status="complete", video_path=output_path)
149
+
150
+ except Exception as e:
151
+ self._generation.fail_generation(str(e))
152
+ if "cancelled" in str(e).lower():
153
+ logger.info("Generation cancelled by user")
154
+ return GenerateVideoResponse(status="cancelled")
155
+
156
+ raise HTTPError(500, str(e)) from e
157
+
158
+ def generate_video(
159
+ self,
160
+ prompt: str,
161
+ image: Image.Image | None,
162
+ height: int,
163
+ width: int,
164
+ num_frames: int,
165
+ fps: float,
166
+ seed: int,
167
+ camera_motion: VideoCameraMotion,
168
+ negative_prompt: str,
169
+ ) -> str:
170
+ t_total_start = time.perf_counter()
171
+ gen_mode = "i2v" if image is not None else "t2v"
172
+ logger.info(
173
+ "[%s] Generation started (model=fast, %dx%d, %d frames, %d fps)",
174
+ gen_mode,
175
+ width,
176
+ height,
177
+ num_frames,
178
+ int(fps),
179
+ )
180
+
181
+ if self._generation.is_generation_cancelled():
182
+ raise RuntimeError("Generation was cancelled")
183
+
184
+ if not resolve_model_path(
185
+ self.models_dir, self.config.model_download_specs, "checkpoint"
186
+ ).exists():
187
+ raise RuntimeError(
188
+ "Models not downloaded. Please download the AI models first using the Model Status menu."
189
+ )
190
+
191
+ total_steps = 8
192
+
193
+ self._generation.update_progress("loading_model", 5, 0, total_steps)
194
+ t_load_start = time.perf_counter()
195
+ pipeline_state = self._pipelines.load_gpu_pipeline("fast", should_warm=False)
196
+ t_load_end = time.perf_counter()
197
+ logger.info("[%s] Pipeline load: %.2fs", gen_mode, t_load_end - t_load_start)
198
+
199
+ self._generation.update_progress("encoding_text", 10, 0, total_steps)
200
+
201
+ enhanced_prompt = prompt + self.config.camera_motion_prompts.get(
202
+ camera_motion, ""
203
+ )
204
+
205
+ images: list[ImageConditioningInput] = []
206
+ temp_image_path: str | None = None
207
+ if image is not None:
208
+ temp_image_path = tempfile.NamedTemporaryFile(
209
+ suffix=".png", delete=False
210
+ ).name
211
+ image.save(temp_image_path)
212
+ images = [
213
+ ImageConditioningInput(path=temp_image_path, frame_idx=0, strength=1.0)
214
+ ]
215
+
216
+ output_path = self._make_output_path()
217
+
218
+ try:
219
+ settings = self.state.app_settings
220
+ use_api_encoding = not self._text.should_use_local_encoding()
221
+ if image is not None:
222
+ enhance = use_api_encoding and settings.prompt_enhancer_enabled_i2v
223
+ else:
224
+ enhance = use_api_encoding and settings.prompt_enhancer_enabled_t2v
225
+
226
+ encoding_method = "api" if use_api_encoding else "local"
227
+ t_text_start = time.perf_counter()
228
+ self._text.prepare_text_encoding(enhanced_prompt, enhance_prompt=enhance)
229
+ t_text_end = time.perf_counter()
230
+ logger.info(
231
+ "[%s] Text encoding (%s): %.2fs",
232
+ gen_mode,
233
+ encoding_method,
234
+ t_text_end - t_text_start,
235
+ )
236
+
237
+ self._generation.update_progress("inference", 15, 0, total_steps)
238
+
239
+ height = round(height / 64) * 64
240
+ width = round(width / 64) * 64
241
+
242
+ t_inference_start = time.perf_counter()
243
+ pipeline_state.pipeline.generate(
244
+ prompt=enhanced_prompt,
245
+ seed=seed,
246
+ height=height,
247
+ width=width,
248
+ num_frames=num_frames,
249
+ frame_rate=fps,
250
+ images=images,
251
+ output_path=str(output_path),
252
+ )
253
+ t_inference_end = time.perf_counter()
254
+ logger.info(
255
+ "[%s] Inference: %.2fs", gen_mode, t_inference_end - t_inference_start
256
+ )
257
+
258
+ if self._generation.is_generation_cancelled():
259
+ if output_path.exists():
260
+ output_path.unlink()
261
+ raise RuntimeError("Generation was cancelled")
262
+
263
+ t_total_end = time.perf_counter()
264
+ logger.info(
265
+ "[%s] Total generation: %.2fs (load=%.2fs, text=%.2fs, inference=%.2fs)",
266
+ gen_mode,
267
+ t_total_end - t_total_start,
268
+ t_load_end - t_load_start,
269
+ t_text_end - t_text_start,
270
+ t_inference_end - t_inference_start,
271
+ )
272
+
273
+ self._generation.update_progress("complete", 100, total_steps, total_steps)
274
+ return str(output_path)
275
+ finally:
276
+ self._text.clear_api_embeddings()
277
+ if temp_image_path and os.path.exists(temp_image_path):
278
+ os.unlink(temp_image_path)
279
+
280
+ def _generate_a2v(
281
+ self, req: GenerateVideoRequest, duration: int, fps: int, *, audio_path: str
282
+ ) -> GenerateVideoResponse:
283
+ if req.model != "pro":
284
+ logger.warning(
285
+ "A2V local requested with model=%s; A2V always uses pro pipeline",
286
+ req.model,
287
+ )
288
+ validated_audio_path = validate_audio_file(audio_path)
289
+ audio_path_str = str(validated_audio_path)
290
+
291
+ # 支持竖屏和横屏
292
+ RESOLUTION_MAP: dict[str, tuple[int, int]] = {
293
+ "540p": (960, 540),
294
+ "720p": (1280, 720),
295
+ "1080p": (1920, 1080),
296
+ }
297
+
298
+ base_w, base_h = RESOLUTION_MAP.get(req.resolution, (1280, 720))
299
+
300
+ # 根据 aspectRatio 调整分辨率
301
+ if req.aspectRatio == "9:16":
302
+ width, height = base_h, base_w # 竖屏
303
+ else:
304
+ width, height = base_w, base_h # 横屏
305
+
306
+ num_frames = self._compute_num_frames(duration, fps)
307
+
308
+ image = None
309
+ temp_image_path: str | None = None
310
+ image_path = normalize_optional_path(req.imagePath)
311
+ if image_path:
312
+ image = self._prepare_image(image_path, width, height)
313
+
314
+ # 获取首尾帧
315
+ start_frame_path = normalize_optional_path(getattr(req, "startFramePath", None))
316
+ end_frame_path = normalize_optional_path(getattr(req, "endFramePath", None))
317
+
318
+ seed = self._resolve_seed()
319
+
320
+ generation_id = self._make_generation_id()
321
+
322
+ temp_image_paths: list[str] = []
323
+ try:
324
+ a2v_state = self._pipelines.load_a2v_pipeline()
325
+ self._generation.start_generation(generation_id)
326
+
327
+ enhanced_prompt = req.prompt + self.config.camera_motion_prompts.get(
328
+ req.cameraMotion, ""
329
+ )
330
+ neg = (
331
+ req.negativePrompt
332
+ if req.negativePrompt
333
+ else self.config.default_negative_prompt
334
+ )
335
+
336
+ images: list[ImageConditioningInput] = []
337
+ temp_image_paths: list[str] = []
338
+
339
+ # 首帧
340
+ if start_frame_path:
341
+ start_img = self._prepare_image(start_frame_path, width, height)
342
+ temp_start_path = tempfile.NamedTemporaryFile(
343
+ suffix=".png", delete=False
344
+ ).name
345
+ start_img.save(temp_start_path)
346
+ temp_image_paths.append(temp_start_path)
347
+ images.append(
348
+ ImageConditioningInput(
349
+ path=temp_start_path, frame_idx=0, strength=1.0
350
+ )
351
+ )
352
+
353
+ # 中间图片(如果有)
354
+ if image is not None and not start_frame_path:
355
+ temp_image_path = tempfile.NamedTemporaryFile(
356
+ suffix=".png", delete=False
357
+ ).name
358
+ image.save(temp_image_path)
359
+ temp_image_paths.append(temp_image_path)
360
+ images.append(
361
+ ImageConditioningInput(
362
+ path=temp_image_path, frame_idx=0, strength=1.0
363
+ )
364
+ )
365
+
366
+ # 尾帧
367
+ if end_frame_path:
368
+ last_latent_idx = (num_frames - 1) // 8 + 1 - 1
369
+ end_img = self._prepare_image(end_frame_path, width, height)
370
+ temp_end_path = tempfile.NamedTemporaryFile(
371
+ suffix=".png", delete=False
372
+ ).name
373
+ end_img.save(temp_end_path)
374
+ temp_image_paths.append(temp_end_path)
375
+ images.append(
376
+ ImageConditioningInput(
377
+ path=temp_end_path, frame_idx=last_latent_idx, strength=1.0
378
+ )
379
+ )
380
+
381
+ output_path = self._make_output_path()
382
+
383
+ total_steps = 11 # distilled: 8 steps (stage 1) + 3 steps (stage 2)
384
+
385
+ a2v_settings = self.state.app_settings
386
+ a2v_use_api = not self._text.should_use_local_encoding()
387
+ if image is not None:
388
+ a2v_enhance = a2v_use_api and a2v_settings.prompt_enhancer_enabled_i2v
389
+ else:
390
+ a2v_enhance = a2v_use_api and a2v_settings.prompt_enhancer_enabled_t2v
391
+
392
+ self._generation.update_progress("loading_model", 5, 0, total_steps)
393
+ self._generation.update_progress("encoding_text", 10, 0, total_steps)
394
+ self._text.prepare_text_encoding(
395
+ enhanced_prompt, enhance_prompt=a2v_enhance
396
+ )
397
+ self._generation.update_progress("inference", 15, 0, total_steps)
398
+
399
+ a2v_state.pipeline.generate(
400
+ prompt=enhanced_prompt,
401
+ negative_prompt=neg,
402
+ seed=seed,
403
+ height=height,
404
+ width=width,
405
+ num_frames=num_frames,
406
+ frame_rate=fps,
407
+ num_inference_steps=total_steps,
408
+ images=images,
409
+ audio_path=audio_path_str,
410
+ audio_start_time=0.0,
411
+ audio_max_duration=None,
412
+ output_path=str(output_path),
413
+ )
414
+
415
+ if self._generation.is_generation_cancelled():
416
+ if output_path.exists():
417
+ output_path.unlink()
418
+ raise RuntimeError("Generation was cancelled")
419
+
420
+ self._generation.update_progress("complete", 100, total_steps, total_steps)
421
+ self._generation.complete_generation(str(output_path))
422
+ return GenerateVideoResponse(status="complete", video_path=str(output_path))
423
+
424
+ except Exception as e:
425
+ self._generation.fail_generation(str(e))
426
+ if "cancelled" in str(e).lower():
427
+ logger.info("Generation cancelled by user")
428
+ return GenerateVideoResponse(status="cancelled")
429
+ raise HTTPError(500, str(e)) from e
430
+ finally:
431
+ self._text.clear_api_embeddings()
432
+ # 清理所有临时图片
433
+ for tmp_path in temp_image_paths:
434
+ if tmp_path and os.path.exists(tmp_path):
435
+ try:
436
+ os.unlink(tmp_path)
437
+ except Exception:
438
+ pass
439
+ if temp_image_path and os.path.exists(temp_image_path):
440
+ try:
441
+ os.unlink(temp_image_path)
442
+ except Exception:
443
+ pass
444
+
445
+ def _prepare_image(self, image_path: str, width: int, height: int) -> Image.Image:
446
+ validated_path = validate_image_file(image_path)
447
+ try:
448
+ img = Image.open(validated_path).convert("RGB")
449
+ except Exception:
450
+ raise HTTPError(400, f"Invalid image file: {image_path}") from None
451
+ img_w, img_h = img.size
452
+ target_ratio = width / height
453
+ img_ratio = img_w / img_h
454
+ if img_ratio > target_ratio:
455
+ new_h = height
456
+ new_w = int(img_w * (height / img_h))
457
+ else:
458
+ new_w = width
459
+ new_h = int(img_h * (width / img_w))
460
+ resized = img.resize((new_w, new_h), Image.Resampling.LANCZOS)
461
+ left = (new_w - width) // 2
462
+ top = (new_h - height) // 2
463
+ return resized.crop((left, top, left + width, top + height))
464
+
465
+ @staticmethod
466
+ def _make_generation_id() -> str:
467
+ return uuid.uuid4().hex[:8]
468
+
469
+ @staticmethod
470
+ def _compute_num_frames(duration: int, fps: int) -> int:
471
+ n = ((duration * fps) // 8) * 8 + 1
472
+ return max(n, 9)
473
+
474
+ def _resolve_seed(self) -> int:
475
+ settings = self.state.app_settings
476
+ if settings.seed_locked:
477
+ logger.info("Using locked seed: %s", settings.locked_seed)
478
+ return settings.locked_seed
479
+ return int(time.time()) % 2147483647
480
+
481
+ def _make_output_path(self) -> Path:
482
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
483
+ return (
484
+ self.config.outputs_dir
485
+ / f"ltx2_video_{timestamp}_{self._make_generation_id()}.mp4"
486
+ )
487
+
488
+ def _generate_forced_api(self, req: GenerateVideoRequest) -> GenerateVideoResponse:
489
+ if self._generation.is_generation_running():
490
+ raise HTTPError(409, "Generation already in progress")
491
+
492
+ generation_id = self._make_generation_id()
493
+ self._generation.start_api_generation(generation_id)
494
+
495
+ audio_path = normalize_optional_path(req.audioPath)
496
+ image_path = normalize_optional_path(req.imagePath)
497
+ has_input_audio = bool(audio_path)
498
+ has_input_image = bool(image_path)
499
+
500
+ try:
501
+ self._generation.update_progress("validating_request", 5, None, None)
502
+
503
+ api_key = self.state.app_settings.ltx_api_key.strip()
504
+ logger.info(
505
+ "Forced API generation route selected (key_present=%s)", bool(api_key)
506
+ )
507
+ if not api_key:
508
+ raise HTTPError(400, "PRO_API_KEY_REQUIRED")
509
+
510
+ requested_model = req.model.strip().lower()
511
+ api_model_id = FORCED_API_MODEL_MAP.get(requested_model)
512
+ if api_model_id is None:
513
+ raise HTTPError(400, "INVALID_FORCED_API_MODEL")
514
+
515
+ resolution_label = req.resolution
516
+ resolution_by_aspect = FORCED_API_RESOLUTION_MAP.get(resolution_label)
517
+ if resolution_by_aspect is None:
518
+ raise HTTPError(400, "INVALID_FORCED_API_RESOLUTION")
519
+
520
+ aspect_ratio = req.aspectRatio.strip()
521
+ if aspect_ratio not in FORCED_API_ALLOWED_ASPECT_RATIOS:
522
+ raise HTTPError(400, "INVALID_FORCED_API_ASPECT_RATIO")
523
+
524
+ api_resolution = resolution_by_aspect[aspect_ratio]
525
+
526
+ prompt = req.prompt
527
+
528
+ if self._generation.is_generation_cancelled():
529
+ raise RuntimeError("Generation was cancelled")
530
+
531
+ if has_input_audio:
532
+ if requested_model != "pro":
533
+ logger.warning(
534
+ "A2V requested with model=%s; overriding to 'pro'",
535
+ requested_model,
536
+ )
537
+ api_model_id = FORCED_API_MODEL_MAP["pro"]
538
+ if api_resolution != A2V_FORCED_API_RESOLUTION:
539
+ logger.warning(
540
+ "A2V requested with resolution=%s; overriding to '%s'",
541
+ api_resolution,
542
+ A2V_FORCED_API_RESOLUTION,
543
+ )
544
+ api_resolution = A2V_FORCED_API_RESOLUTION
545
+ validated_audio_path = validate_audio_file(audio_path)
546
+ validated_image_path: Path | None = None
547
+ if image_path is not None:
548
+ validated_image_path = validate_image_file(image_path)
549
+
550
+ self._generation.update_progress("uploading_audio", 20, None, None)
551
+ audio_uri = self._ltx_api_client.upload_file(
552
+ api_key=api_key,
553
+ file_path=str(validated_audio_path),
554
+ )
555
+ image_uri: str | None = None
556
+ if validated_image_path is not None:
557
+ self._generation.update_progress("uploading_image", 35, None, None)
558
+ image_uri = self._ltx_api_client.upload_file(
559
+ api_key=api_key,
560
+ file_path=str(validated_image_path),
561
+ )
562
+ self._generation.update_progress("inference", 55, None, None)
563
+ video_bytes = self._ltx_api_client.generate_audio_to_video(
564
+ api_key=api_key,
565
+ prompt=prompt,
566
+ audio_uri=audio_uri,
567
+ image_uri=image_uri,
568
+ model=api_model_id,
569
+ resolution=api_resolution,
570
+ )
571
+ self._generation.update_progress("downloading_output", 85, None, None)
572
+ elif has_input_image:
573
+ validated_image_path = validate_image_file(image_path)
574
+
575
+ duration = self._parse_forced_numeric_field(
576
+ req.duration, "INVALID_FORCED_API_DURATION"
577
+ )
578
+ fps = self._parse_forced_numeric_field(
579
+ req.fps, "INVALID_FORCED_API_FPS"
580
+ )
581
+ if fps not in FORCED_API_ALLOWED_FPS:
582
+ raise HTTPError(400, "INVALID_FORCED_API_FPS")
583
+ if duration not in _get_allowed_durations(
584
+ api_model_id, resolution_label, fps
585
+ ):
586
+ raise HTTPError(400, "INVALID_FORCED_API_DURATION")
587
+
588
+ generate_audio = self._parse_audio_flag(req.audio)
589
+ self._generation.update_progress("uploading_image", 20, None, None)
590
+ image_uri = self._ltx_api_client.upload_file(
591
+ api_key=api_key,
592
+ file_path=str(validated_image_path),
593
+ )
594
+ self._generation.update_progress("inference", 55, None, None)
595
+ video_bytes = self._ltx_api_client.generate_image_to_video(
596
+ api_key=api_key,
597
+ prompt=prompt,
598
+ image_uri=image_uri,
599
+ model=api_model_id,
600
+ resolution=api_resolution,
601
+ duration=float(duration),
602
+ fps=float(fps),
603
+ generate_audio=generate_audio,
604
+ camera_motion=req.cameraMotion,
605
+ )
606
+ self._generation.update_progress("downloading_output", 85, None, None)
607
+ else:
608
+ duration = self._parse_forced_numeric_field(
609
+ req.duration, "INVALID_FORCED_API_DURATION"
610
+ )
611
+ fps = self._parse_forced_numeric_field(
612
+ req.fps, "INVALID_FORCED_API_FPS"
613
+ )
614
+ if fps not in FORCED_API_ALLOWED_FPS:
615
+ raise HTTPError(400, "INVALID_FORCED_API_FPS")
616
+ if duration not in _get_allowed_durations(
617
+ api_model_id, resolution_label, fps
618
+ ):
619
+ raise HTTPError(400, "INVALID_FORCED_API_DURATION")
620
+
621
+ generate_audio = self._parse_audio_flag(req.audio)
622
+ self._generation.update_progress("inference", 55, None, None)
623
+ video_bytes = self._ltx_api_client.generate_text_to_video(
624
+ api_key=api_key,
625
+ prompt=prompt,
626
+ model=api_model_id,
627
+ resolution=api_resolution,
628
+ duration=float(duration),
629
+ fps=float(fps),
630
+ generate_audio=generate_audio,
631
+ camera_motion=req.cameraMotion,
632
+ )
633
+ self._generation.update_progress("downloading_output", 85, None, None)
634
+
635
+ if self._generation.is_generation_cancelled():
636
+ raise RuntimeError("Generation was cancelled")
637
+
638
+ output_path = self._write_forced_api_video(video_bytes)
639
+ if self._generation.is_generation_cancelled():
640
+ output_path.unlink(missing_ok=True)
641
+ raise RuntimeError("Generation was cancelled")
642
+
643
+ self._generation.update_progress("complete", 100, None, None)
644
+ self._generation.complete_generation(str(output_path))
645
+ return GenerateVideoResponse(status="complete", video_path=str(output_path))
646
+ except HTTPError as e:
647
+ self._generation.fail_generation(e.detail)
648
+ raise
649
+ except Exception as e:
650
+ self._generation.fail_generation(str(e))
651
+ if "cancelled" in str(e).lower():
652
+ logger.info("Generation cancelled by user")
653
+ return GenerateVideoResponse(status="cancelled")
654
+ raise HTTPError(500, str(e)) from e
655
+
656
+ def _write_forced_api_video(self, video_bytes: bytes) -> Path:
657
+ output_path = self._make_output_path()
658
+ output_path.write_bytes(video_bytes)
659
+ return output_path
660
+
661
+ @staticmethod
662
+ def _parse_forced_numeric_field(raw_value: str, error_detail: str) -> int:
663
+ try:
664
+ return int(float(raw_value))
665
+ except (TypeError, ValueError):
666
+ raise HTTPError(400, error_detail) from None
667
+
668
+ @staticmethod
669
+ def _parse_audio_flag(audio_value: str | bool) -> bool:
670
+ if isinstance(audio_value, bool):
671
+ return audio_value
672
+ normalized = audio_value.strip().lower()
673
+ return normalized in {"1", "true", "yes", "on"}
LTX2.3/patches/launcher.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import sys
3
+ import os
4
+
5
+ patch_dir = r"C:\Users\1-xuanran\Desktop\LTX多合一启动器\patches"
6
+ backend_dir = r"C:\Program Files\LTX Desktop\resources\backend"
7
+
8
+ # 防御性清除:强行剥离所有的默认 backend_dir 引用
9
+ sys.path = [p for p in sys.path if p and os.path.normpath(p) != os.path.normpath(backend_dir)]
10
+ sys.path = [p for p in sys.path if p and p != "." and p != ""]
11
+
12
+ # 绝对插队注入:优先搜索 PATCHES_DIR
13
+ sys.path.insert(0, patch_dir)
14
+ sys.path.insert(1, backend_dir)
15
+
16
+ import uvicorn
17
+ from ltx2_server import app
18
+
19
+ if __name__ == '__main__':
20
+ uvicorn.run(app, host="0.0.0.0", port=3000, log_level="info", access_log=False)
LTX2.3/run.bat ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ title LTX-2 Cinematic Workstation
3
+
4
+ echo =========================================================
5
+ echo LTX-2 Cinematic UI Booting...
6
+ echo =========================================================
7
+ echo.
8
+
9
+ set "LTX_PY=%USERPROFILE%\AppData\Local\LTXDesktop\python\python.exe"
10
+ set "LTX_UI_URL=http://127.0.0.1:4000/"
11
+
12
+ if exist "%LTX_PY%" (
13
+ echo [SUCCESS] LTX Bundled Python environment detected!
14
+ echo [INFO] Browser will open automatically when UI is ready...
15
+ start "" powershell -NoProfile -WindowStyle Hidden -Command "$ProgressPreference='SilentlyContinue'; $deadline=(Get-Date).AddSeconds(60); while((Get-Date) -lt $deadline){ try { Invoke-WebRequest -UseBasicParsing '%LTX_UI_URL%' -TimeoutSec 2 | Out-Null; Start-Process '%LTX_UI_URL%'; exit 0 } catch { Start-Sleep -Seconds 1 } }"
16
+ echo [INFO] Starting workspace natively...
17
+ echo ---------------------------------------------------------
18
+ "%LTX_PY%" main.py
19
+ pause
20
+ exit /b
21
+ )
22
+
23
+ python --version >nul 2>&1
24
+ if %errorlevel% equ 0 (
25
+ echo [WARNING] LTX Bundled Python not found.
26
+ echo [INFO] Browser will open automatically when UI is ready...
27
+ start "" powershell -NoProfile -WindowStyle Hidden -Command "$ProgressPreference='SilentlyContinue'; $deadline=(Get-Date).AddSeconds(60); while((Get-Date) -lt $deadline){ try { Invoke-WebRequest -UseBasicParsing '%LTX_UI_URL%' -TimeoutSec 2 | Out-Null; Start-Process '%LTX_UI_URL%'; exit 0 } catch { Start-Sleep -Seconds 1 } }"
28
+ echo [INFO] Falling back to global Python environment...
29
+ echo ---------------------------------------------------------
30
+ python main.py
31
+ pause
32
+ exit /b
33
+ )
34
+
35
+ echo [ERROR] FATAL: No Python interpreter found on this system.
36
+ echo [INFO] Please run install.bat to download and set up Python!
37
+ echo.
38
+ pause