fuvty commited on
Commit
9eee2a6
·
1 Parent(s): 5c971e3

[debug] ZeroGPU

Browse files
Files changed (1) hide show
  1. app.py +17 -32
app.py CHANGED
@@ -7,8 +7,8 @@ This creates a web interface to compare three inference modes simultaneously:
7
  3. C2C: Rosetta model with projectors
8
 
9
  ZeroGPU Support:
10
- - Models are loaded to CPU at startup
11
- - @spaces.GPU decorator moves models to GPU on-demand for each inference
12
  - Works seamlessly on both ZeroGPU and regular GPU environments
13
  """
14
 
@@ -51,16 +51,15 @@ class ModelManager:
51
  c2c_checkpoint_path: Path to C2C checkpoint directory
52
  device: Device to use (cuda, cpu, or auto)
53
  """
54
- # For ZeroGPU, load models to CPU and move to GPU in decorated functions
 
55
  if device == "auto":
56
- if ZEROGPU_AVAILABLE:
57
- self.device = torch.device("cpu")
58
- print("ZeroGPU detected: Loading models to CPU (will move to GPU on-demand)")
59
- else:
60
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
61
  else:
62
  self.device = torch.device(device)
63
  print(f"Using device: {self.device}")
 
 
64
 
65
  # Model configurations
66
  self.single_model_name = single_model_name
@@ -221,16 +220,12 @@ class ModelManager:
221
  @spaces.GPU(duration=60)
222
  def generate_single(self, user_input: str) -> Generator[str, None, None]:
223
  """Generate response from single model with streaming."""
224
- # Move model to GPU for ZeroGPU
225
- device = torch.device("cuda" if ZEROGPU_AVAILABLE else self.device)
226
- if ZEROGPU_AVAILABLE and self.single_model.device.type != "cuda":
227
- self.single_model.to(device)
228
-
229
  messages = [{"role": "system", "content": ""}, {"role": "user", "content": user_input}]
230
  text = self.single_tokenizer.apply_chat_template(
231
  messages, tokenize=False, add_generation_prompt=True, enable_thinking=False
232
  )
233
- inputs = self.single_tokenizer(text, return_tensors="pt").to(device)
234
 
235
  # Setup streamer
236
  streamer = TextIteratorStreamer(
@@ -260,13 +255,7 @@ class ModelManager:
260
  @spaces.GPU(duration=90)
261
  def generate_t2t(self, user_input: str) -> Generator[tuple[str, str], None, None]:
262
  """Generate response from T2T model with streaming (returns context, answer)."""
263
- # Move models to GPU for ZeroGPU
264
- device = torch.device("cuda" if ZEROGPU_AVAILABLE else self.device)
265
- if ZEROGPU_AVAILABLE:
266
- if self.t2t_model.context_model.device.type != "cuda":
267
- self.t2t_model.context_model.to(device)
268
- if self.t2t_model.answer_model.device.type != "cuda":
269
- self.t2t_model.answer_model.to(device)
270
 
271
  # Stage 1: Context generation
272
  context_streamer = TextIteratorStreamer(
@@ -282,7 +271,7 @@ class ModelManager:
282
  add_generation_prompt=True,
283
  return_tensors="pt",
284
  enable_thinking=False
285
- ).to(device)
286
 
287
  generation_kwargs = {
288
  'input_ids': inputs,
@@ -331,7 +320,7 @@ class ModelManager:
331
  add_generation_prompt=True,
332
  return_tensors="pt",
333
  enable_thinking=False
334
- ).to(device)
335
 
336
  generation_kwargs = {
337
  'input_ids': inputs,
@@ -352,16 +341,12 @@ class ModelManager:
352
  @spaces.GPU(duration=60)
353
  def generate_c2c(self, user_input: str) -> Generator[str, None, None]:
354
  """Generate response from C2C model with streaming."""
355
- # Move model to GPU for ZeroGPU
356
- device = torch.device("cuda" if ZEROGPU_AVAILABLE else self.device)
357
- if ZEROGPU_AVAILABLE and self.c2c_model.device.type != "cuda":
358
- self.c2c_model.to(device)
359
-
360
  messages = [{"role": "system", "content": ""}, {"role": "user", "content": user_input}]
361
  text = self.c2c_tokenizer.apply_chat_template(
362
  messages, tokenize=False, add_generation_prompt=True, enable_thinking=False
363
  )
364
- inputs = self.c2c_tokenizer(text, return_tensors="pt").to(device)
365
 
366
  # Setup streamer
367
  streamer = TextIteratorStreamer(
@@ -374,12 +359,12 @@ class ModelManager:
374
  full_length = inputs.input_ids.shape[1]
375
  instruction_index = torch.tensor([1, 0], dtype=torch.long).repeat(
376
  full_length - 1, 1
377
- ).unsqueeze(0).to(device)
378
  label_index = torch.tensor([-1, 0], dtype=torch.long).repeat(
379
  1, 1
380
- ).unsqueeze(0).to(device)
381
  position_ids = inputs.attention_mask.long().cumsum(-1) - 1 if inputs.attention_mask is not None else \
382
- torch.arange(full_length, dtype=torch.long).unsqueeze(0).to(device)
383
 
384
  # Generation parameters
385
  generation_kwargs = {
 
7
  3. C2C: Rosetta model with projectors
8
 
9
  ZeroGPU Support:
10
+ - Models are loaded to CUDA at startup
11
+ - @spaces.GPU decorator handles GPU allocation automatically for each inference
12
  - Works seamlessly on both ZeroGPU and regular GPU environments
13
  """
14
 
 
51
  c2c_checkpoint_path: Path to C2C checkpoint directory
52
  device: Device to use (cuda, cpu, or auto)
53
  """
54
+ # For ZeroGPU, models should be loaded to CUDA directly
55
+ # The @spaces.GPU decorator handles GPU allocation automatically
56
  if device == "auto":
57
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
 
58
  else:
59
  self.device = torch.device(device)
60
  print(f"Using device: {self.device}")
61
+ if ZEROGPU_AVAILABLE:
62
+ print("ZeroGPU detected: Models will be loaded to CUDA (decorator handles allocation)")
63
 
64
  # Model configurations
65
  self.single_model_name = single_model_name
 
220
  @spaces.GPU(duration=60)
221
  def generate_single(self, user_input: str) -> Generator[str, None, None]:
222
  """Generate response from single model with streaming."""
223
+ # @spaces.GPU decorator handles GPU allocation automatically
 
 
 
 
224
  messages = [{"role": "system", "content": ""}, {"role": "user", "content": user_input}]
225
  text = self.single_tokenizer.apply_chat_template(
226
  messages, tokenize=False, add_generation_prompt=True, enable_thinking=False
227
  )
228
+ inputs = self.single_tokenizer(text, return_tensors="pt").to(self.device)
229
 
230
  # Setup streamer
231
  streamer = TextIteratorStreamer(
 
255
  @spaces.GPU(duration=90)
256
  def generate_t2t(self, user_input: str) -> Generator[tuple[str, str], None, None]:
257
  """Generate response from T2T model with streaming (returns context, answer)."""
258
+ # @spaces.GPU decorator handles GPU allocation automatically
 
 
 
 
 
 
259
 
260
  # Stage 1: Context generation
261
  context_streamer = TextIteratorStreamer(
 
271
  add_generation_prompt=True,
272
  return_tensors="pt",
273
  enable_thinking=False
274
+ ).to(self.device)
275
 
276
  generation_kwargs = {
277
  'input_ids': inputs,
 
320
  add_generation_prompt=True,
321
  return_tensors="pt",
322
  enable_thinking=False
323
+ ).to(self.device)
324
 
325
  generation_kwargs = {
326
  'input_ids': inputs,
 
341
  @spaces.GPU(duration=60)
342
  def generate_c2c(self, user_input: str) -> Generator[str, None, None]:
343
  """Generate response from C2C model with streaming."""
344
+ # @spaces.GPU decorator handles GPU allocation automatically
 
 
 
 
345
  messages = [{"role": "system", "content": ""}, {"role": "user", "content": user_input}]
346
  text = self.c2c_tokenizer.apply_chat_template(
347
  messages, tokenize=False, add_generation_prompt=True, enable_thinking=False
348
  )
349
+ inputs = self.c2c_tokenizer(text, return_tensors="pt").to(self.device)
350
 
351
  # Setup streamer
352
  streamer = TextIteratorStreamer(
 
359
  full_length = inputs.input_ids.shape[1]
360
  instruction_index = torch.tensor([1, 0], dtype=torch.long).repeat(
361
  full_length - 1, 1
362
+ ).unsqueeze(0).to(self.device)
363
  label_index = torch.tensor([-1, 0], dtype=torch.long).repeat(
364
  1, 1
365
+ ).unsqueeze(0).to(self.device)
366
  position_ids = inputs.attention_mask.long().cumsum(-1) - 1 if inputs.attention_mask is not None else \
367
+ torch.arange(full_length, dtype=torch.long).unsqueeze(0).to(self.device)
368
 
369
  # Generation parameters
370
  generation_kwargs = {