Nekochu commited on
Commit
153f929
·
1 Parent(s): 9ed24c7

remove accelerate (causes meta tensors), clean up patches

Browse files
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. app.py +0 -9
Dockerfile CHANGED
@@ -72,7 +72,7 @@ RUN curl -fL --retry 3 --retry-delay 5 -o /app/models/vae-BF16.gguf \
72
  # Install Python deps for Gradio UI + training
73
  RUN pip3 install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu \
74
  "gradio[mcp]==5.29.0" requests torch safetensors \
75
- transformers>=4.51.0 peft>=0.18.0 accelerate>=1.12.0 \
76
  loguru "torchaudio==2.4.0" "diffusers==0.30.3" lightning numpy tensorboard soundfile \
77
  einops vector_quantize_pytorch
78
 
 
72
  # Install Python deps for Gradio UI + training
73
  RUN pip3 install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu \
74
  "gradio[mcp]==5.29.0" requests torch safetensors \
75
+ transformers>=4.51.0 peft>=0.18.0 \
76
  loguru "torchaudio==2.4.0" "diffusers==0.30.3" lightning numpy tensorboard soundfile \
77
  einops vector_quantize_pytorch
78
 
app.py CHANGED
@@ -339,15 +339,6 @@ try:
339
  torch.backends.cuda.enable_flash_sdp(False)
340
  os.environ["ATTN_BACKEND"] = "sdpa"
341
 
342
- import transformers
343
- _orig_from_pretrained = transformers.AutoModel.from_pretrained
344
- def _cpu_from_pretrained(*args, **kwargs):
345
- kwargs['low_cpu_mem_usage'] = False
346
- kwargs.setdefault('torch_dtype', torch.float32)
347
- return _orig_from_pretrained(*args, **kwargs)
348
- transformers.AutoModel.from_pretrained = _cpu_from_pretrained
349
- log(" Patched AutoModel.from_pretrained: low_cpu_mem_usage=False, dtype=float32")
350
-
351
  import torchaudio
352
  _orig = torchaudio.load
353
  def _sf(p, *a, **kw):
 
339
  torch.backends.cuda.enable_flash_sdp(False)
340
  os.environ["ATTN_BACKEND"] = "sdpa"
341
 
 
 
 
 
 
 
 
 
 
342
  import torchaudio
343
  _orig = torchaudio.load
344
  def _sf(p, *a, **kw):