fuvty commited on
Commit
8768ac4
·
1 Parent(s): 8672996

[debug] zeroGPU

Browse files
Files changed (1) hide show
  1. app.py +24 -47
app.py CHANGED
@@ -51,16 +51,15 @@ class ModelManager:
51
  c2c_checkpoint_path: Path to C2C checkpoint directory
52
  device: Device to use (cuda, cpu, or auto)
53
  """
54
- # For ZeroGPU, load models to CPU and move to GPU in decorated functions
 
55
  if device == "auto":
56
- if ZEROGPU_AVAILABLE:
57
- self.device = torch.device("cpu")
58
- print("ZeroGPU detected: Loading models to CPU (will move to GPU on-demand)")
59
- else:
60
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
61
  else:
62
  self.device = torch.device(device)
63
  print(f"Using device: {self.device}")
 
 
64
 
65
  # Model configurations
66
  self.single_model_name = single_model_name
@@ -221,16 +220,12 @@ class ModelManager:
221
  @spaces.GPU(duration=60)
222
  def generate_single(self, user_input: str) -> Generator[str, None, None]:
223
  """Generate response from single model with streaming."""
224
- # For ZeroGPU, move model to GPU on-demand
225
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
226
- if ZEROGPU_AVAILABLE:
227
- self.single_model.to(device)
228
-
229
  messages = [{"role": "system", "content": ""}, {"role": "user", "content": user_input}]
230
  text = self.single_tokenizer.apply_chat_template(
231
  messages, tokenize=False, add_generation_prompt=True, enable_thinking=False
232
  )
233
- inputs = self.single_tokenizer(text, return_tensors="pt").to(device)
234
 
235
  # Setup streamer
236
  streamer = TextIteratorStreamer(
@@ -252,23 +247,16 @@ class ModelManager:
252
  thread.start()
253
 
254
  # Stream tokens
 
255
  for token in streamer:
256
- yield token
257
- thread.join()
258
-
259
- if ZEROGPU_AVAILABLE:
260
- self.single_model.to("cpu")
261
-
262
-
263
  @spaces.GPU(duration=90)
264
  def generate_t2t(self, user_input: str) -> Generator[tuple[str, str], None, None]:
265
  """Generate response from T2T model with streaming (returns context, answer)."""
266
- # For ZeroGPU, move model to GPU on-demand
267
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
268
- if ZEROGPU_AVAILABLE:
269
- self.t2t_model.context_model.to(device)
270
- self.t2t_model.answer_model.to(device)
271
-
272
  # Stage 1: Context generation
273
  context_streamer = TextIteratorStreamer(
274
  self.t2t_model.context_tokenizer,
@@ -283,7 +271,7 @@ class ModelManager:
283
  add_generation_prompt=True,
284
  return_tensors="pt",
285
  enable_thinking=False
286
- ).to(device)
287
 
288
  generation_kwargs = {
289
  'input_ids': inputs,
@@ -332,7 +320,7 @@ class ModelManager:
332
  add_generation_prompt=True,
333
  return_tensors="pt",
334
  enable_thinking=False
335
- ).to(device)
336
 
337
  generation_kwargs = {
338
  'input_ids': inputs,
@@ -349,25 +337,16 @@ class ModelManager:
349
  for token in answer_streamer:
350
  answer_text += token
351
  yield context_text, answer_text
352
- thread.join()
353
-
354
- if ZEROGPU_AVAILABLE:
355
- self.t2t_model.context_model.to("cpu")
356
- self.t2t_model.answer_model.to("cpu")
357
-
358
  @spaces.GPU(duration=60)
359
  def generate_c2c(self, user_input: str) -> Generator[str, None, None]:
360
  """Generate response from C2C model with streaming."""
361
- # For ZeroGPU, move model to GPU on-demand
362
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
363
- if ZEROGPU_AVAILABLE:
364
- self.c2c_model.to(device)
365
-
366
  messages = [{"role": "system", "content": ""}, {"role": "user", "content": user_input}]
367
  text = self.c2c_tokenizer.apply_chat_template(
368
  messages, tokenize=False, add_generation_prompt=True, enable_thinking=False
369
  )
370
- inputs = self.c2c_tokenizer(text, return_tensors="pt").to(device)
371
 
372
  # Setup streamer
373
  streamer = TextIteratorStreamer(
@@ -380,12 +359,12 @@ class ModelManager:
380
  full_length = inputs.input_ids.shape[1]
381
  instruction_index = torch.tensor([1, 0], dtype=torch.long).repeat(
382
  full_length - 1, 1
383
- ).unsqueeze(0).to(device)
384
  label_index = torch.tensor([-1, 0], dtype=torch.long).repeat(
385
  1, 1
386
- ).unsqueeze(0).to(device)
387
  position_ids = inputs.attention_mask.long().cumsum(-1) - 1 if inputs.attention_mask is not None else \
388
- torch.arange(full_length, dtype=torch.long).unsqueeze(0).to(device)
389
 
390
  # Generation parameters
391
  generation_kwargs = {
@@ -402,12 +381,10 @@ class ModelManager:
402
  thread.start()
403
 
404
  # Stream tokens
 
405
  for token in streamer:
406
- yield token
407
- thread.join()
408
-
409
- if ZEROGPU_AVAILABLE:
410
- self.c2c_model.to("cpu")
411
 
412
 
413
  def create_demo(model_manager: ModelManager):
 
51
  c2c_checkpoint_path: Path to C2C checkpoint directory
52
  device: Device to use (cuda, cpu, or auto)
53
  """
54
+ # For ZeroGPU, models should be loaded to CUDA directly
55
+ # The @spaces.GPU decorator handles GPU allocation automatically
56
  if device == "auto":
57
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
 
58
  else:
59
  self.device = torch.device(device)
60
  print(f"Using device: {self.device}")
61
+ if ZEROGPU_AVAILABLE:
62
+ print("ZeroGPU detected: Models will be loaded to CUDA (decorator handles allocation)")
63
 
64
  # Model configurations
65
  self.single_model_name = single_model_name
 
220
  @spaces.GPU(duration=60)
221
  def generate_single(self, user_input: str) -> Generator[str, None, None]:
222
  """Generate response from single model with streaming."""
223
+ # @spaces.GPU decorator handles GPU allocation automatically
 
 
 
 
224
  messages = [{"role": "system", "content": ""}, {"role": "user", "content": user_input}]
225
  text = self.single_tokenizer.apply_chat_template(
226
  messages, tokenize=False, add_generation_prompt=True, enable_thinking=False
227
  )
228
+ inputs = self.single_tokenizer(text, return_tensors="pt").to(self.device)
229
 
230
  # Setup streamer
231
  streamer = TextIteratorStreamer(
 
247
  thread.start()
248
 
249
  # Stream tokens
250
+ generated_text = ""
251
  for token in streamer:
252
+ generated_text += token
253
+ yield generated_text
254
+
 
 
 
 
255
  @spaces.GPU(duration=90)
256
  def generate_t2t(self, user_input: str) -> Generator[tuple[str, str], None, None]:
257
  """Generate response from T2T model with streaming (returns context, answer)."""
258
+ # @spaces.GPU decorator handles GPU allocation automatically
259
+
 
 
 
 
260
  # Stage 1: Context generation
261
  context_streamer = TextIteratorStreamer(
262
  self.t2t_model.context_tokenizer,
 
271
  add_generation_prompt=True,
272
  return_tensors="pt",
273
  enable_thinking=False
274
+ ).to(self.device)
275
 
276
  generation_kwargs = {
277
  'input_ids': inputs,
 
320
  add_generation_prompt=True,
321
  return_tensors="pt",
322
  enable_thinking=False
323
+ ).to(self.device)
324
 
325
  generation_kwargs = {
326
  'input_ids': inputs,
 
337
  for token in answer_streamer:
338
  answer_text += token
339
  yield context_text, answer_text
340
+
 
 
 
 
 
341
  @spaces.GPU(duration=60)
342
  def generate_c2c(self, user_input: str) -> Generator[str, None, None]:
343
  """Generate response from C2C model with streaming."""
344
+ # @spaces.GPU decorator handles GPU allocation automatically
 
 
 
 
345
  messages = [{"role": "system", "content": ""}, {"role": "user", "content": user_input}]
346
  text = self.c2c_tokenizer.apply_chat_template(
347
  messages, tokenize=False, add_generation_prompt=True, enable_thinking=False
348
  )
349
+ inputs = self.c2c_tokenizer(text, return_tensors="pt").to(self.device)
350
 
351
  # Setup streamer
352
  streamer = TextIteratorStreamer(
 
359
  full_length = inputs.input_ids.shape[1]
360
  instruction_index = torch.tensor([1, 0], dtype=torch.long).repeat(
361
  full_length - 1, 1
362
+ ).unsqueeze(0).to(self.device)
363
  label_index = torch.tensor([-1, 0], dtype=torch.long).repeat(
364
  1, 1
365
+ ).unsqueeze(0).to(self.device)
366
  position_ids = inputs.attention_mask.long().cumsum(-1) - 1 if inputs.attention_mask is not None else \
367
+ torch.arange(full_length, dtype=torch.long).unsqueeze(0).to(self.device)
368
 
369
  # Generation parameters
370
  generation_kwargs = {
 
381
  thread.start()
382
 
383
  # Stream tokens
384
+ generated_text = ""
385
  for token in streamer:
386
+ generated_text += token
387
+ yield generated_text
 
 
 
388
 
389
 
390
  def create_demo(model_manager: ModelManager):