Thekingbalxd commited on
Commit
eec8885
·
verified ·
1 Parent(s): 1ca0559

Update inference_codeformer_cpu.py

Browse files
Files changed (1) hide show
  1. inference_codeformer_cpu.py +8 -16
inference_codeformer_cpu.py CHANGED
@@ -23,23 +23,11 @@ pretrain_model_url = {
23
  }
24
 
25
  def set_realesrgan(args):
26
- """
27
- CPU esetén letiltjuk a RealESRGAN-t (visszatér None-nal).
28
- Ha valaki mégis GPU-val futtatná és CUDA elérhető, itt létrehozzuk az upsampler-t.
29
- (A környezetünk CPU-only, így ez a függvény tipikusan None-t ad vissza.)
30
- """
31
- # importok helyben, mert ez a funkció opcionális
32
  from basicsr.archs.rrdbnet_arch import RRDBNet
33
  from basicsr.utils.realesrgan_utils import RealESRGANer
34
 
35
- # CPU környezetben nem használunk fp16-et
36
- use_half = False
37
-
38
- if not torch.cuda.is_available():
39
- warnings.warn('RealESRGAN: CUDA nem elérhető; a háttér feljavítás CPU-n letiltva.', RuntimeWarning)
40
- return None
41
 
42
- # Ha itt valaki GPU-n futtatja (nem jellemző ebben a CPU-only scriptben):
43
  model = RRDBNet(
44
  num_in_ch=3,
45
  num_out_ch=3,
@@ -48,17 +36,21 @@ def set_realesrgan(args):
48
  num_grow_ch=32,
49
  scale=2,
50
  )
 
51
  upsampler = RealESRGANer(
52
  scale=2,
53
  model_path="https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth",
54
  model=model,
55
- tile=args.bg_tile,
56
- tile_pad=40,
57
  pre_pad=0,
58
- half=use_half
 
59
  )
 
60
  return upsampler
61
 
 
62
  if __name__ == '__main__':
63
  # explicit CPU device
64
  device = torch.device("cpu")
 
23
  }
24
 
25
  def set_realesrgan(args):
 
 
 
 
 
 
26
  from basicsr.archs.rrdbnet_arch import RRDBNet
27
  from basicsr.utils.realesrgan_utils import RealESRGANer
28
 
29
+ device = torch.device("cpu")
 
 
 
 
 
30
 
 
31
  model = RRDBNet(
32
  num_in_ch=3,
33
  num_out_ch=3,
 
36
  num_grow_ch=32,
37
  scale=2,
38
  )
39
+
40
  upsampler = RealESRGANer(
41
  scale=2,
42
  model_path="https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth",
43
  model=model,
44
+ tile=min(args.bg_tile, 200), # COLAB CPU LIMIT
45
+ tile_pad=20,
46
  pre_pad=0,
47
+ half=False,
48
+ device=device
49
  )
50
+
51
  return upsampler
52
 
53
+
54
  if __name__ == '__main__':
55
  # explicit CPU device
56
  device = torch.device("cpu")