lalopenguin commited on
Commit
a7df645
·
1 Parent(s): a9849ca

Fix GPU context for Zero GPU

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +16 -24
README.md CHANGED
@@ -7,7 +7,7 @@ sdk: gradio
7
  sdk_version: 5.42.0
8
  app_file: app.py
9
  pinned: false
10
- suggested_hardware: a10g-small
11
  ---
12
 
13
  # LTX Video Studio
 
7
  sdk_version: 5.42.0
8
  app_file: app.py
9
  pinned: false
10
+ suggested_hardware: zero-a10g
11
  ---
12
 
13
  # LTX Video Studio
app.py CHANGED
@@ -152,21 +152,6 @@ input[type="range"] {
152
  }
153
  """
154
 
155
- # ============================================
156
- # MODEL LOADING
157
- # ============================================
158
- pipe = None
159
-
160
- def load_model():
161
- global pipe
162
- if pipe is None:
163
- pipe = LTXPipeline.from_pretrained(
164
- "Lightricks/LTX-Video",
165
- torch_dtype=torch.bfloat16
166
- )
167
- pipe.to("cuda")
168
- return pipe
169
-
170
  # ============================================
171
  # VIDEO GENERATION FUNCTION
172
  # ============================================
@@ -188,32 +173,39 @@ def generate_video(
188
  return None
189
 
190
  progress(0, desc="Loading model...")
191
- pipeline = load_model()
 
 
 
 
 
 
 
192
 
193
  # Handle seed
194
  if seed == -1:
195
  seed = random.randint(0, 2**32 - 1)
196
 
197
- generator = torch.Generator(device="cuda").manual_seed(seed)
198
 
199
  progress(0.1, desc="Generating video frames...")
200
 
201
  # Generate video
202
- output = pipeline(
203
  prompt=prompt,
204
  negative_prompt=negative_prompt if negative_prompt else None,
205
- num_frames=num_frames,
206
- guidance_scale=guidance_scale,
207
- num_inference_steps=num_inference_steps,
208
  generator=generator,
209
- height=height,
210
- width=width,
211
  )
212
 
213
  progress(0.9, desc="Exporting video...")
214
 
215
  # Export to video file
216
- video_path = export_to_video(output.frames[0], fps=fps)
217
 
218
  progress(1.0, desc="Complete!")
219
 
 
152
  }
153
  """
154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  # ============================================
156
  # VIDEO GENERATION FUNCTION
157
  # ============================================
 
173
  return None
174
 
175
  progress(0, desc="Loading model...")
176
+
177
+ # Load model inside GPU context
178
+ pipe = LTXPipeline.from_pretrained(
179
+ "Lightricks/LTX-Video",
180
+ torch_dtype=torch.bfloat16
181
+ )
182
+ pipe.to("cuda")
183
+ pipe.enable_model_cpu_offload()
184
 
185
  # Handle seed
186
  if seed == -1:
187
  seed = random.randint(0, 2**32 - 1)
188
 
189
+ generator = torch.Generator(device="cuda").manual_seed(int(seed))
190
 
191
  progress(0.1, desc="Generating video frames...")
192
 
193
  # Generate video
194
+ output = pipe(
195
  prompt=prompt,
196
  negative_prompt=negative_prompt if negative_prompt else None,
197
+ num_frames=int(num_frames),
198
+ guidance_scale=float(guidance_scale),
199
+ num_inference_steps=int(num_inference_steps),
200
  generator=generator,
201
+ height=int(height),
202
+ width=int(width),
203
  )
204
 
205
  progress(0.9, desc="Exporting video...")
206
 
207
  # Export to video file
208
+ video_path = export_to_video(output.frames[0], fps=int(fps))
209
 
210
  progress(1.0, desc="Complete!")
211