Update app.py
Browse files
app.py
CHANGED
|
@@ -24,8 +24,11 @@ os.makedirs(Ref_path)
|
|
| 24 |
os.makedirs(Ref_path_T)
|
| 25 |
os.makedirs('result')
|
| 26 |
|
| 27 |
-
os.system("wget https://www.dropbox.com/s/xv6inxwy0so4ni0/LR.png -O LR.png")
|
| 28 |
-
os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O Ref.png")
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
def resize(img):
|
| 31 |
max_side = 512
|
|
@@ -39,18 +42,18 @@ def resize(img):
|
|
| 39 |
return img
|
| 40 |
|
| 41 |
def inference(LR, Ref):
|
| 42 |
-
LR = resize(LR)
|
| 43 |
-
Ref = resize(Ref)
|
| 44 |
|
| 45 |
LR.save(os.path.join(LR_path, '0000.png'))
|
| 46 |
Ref.save(os.path.join(Ref_path, '0000.png'))
|
| 47 |
Ref.save(os.path.join(Ref_path_T, '0000.png'))
|
| 48 |
|
| 49 |
os.system("python -B run.py \
|
| 50 |
-
--mode
|
| 51 |
-
--config
|
| 52 |
--data RealMCVSR \
|
| 53 |
-
--ckpt_abs_name ckpt/
|
| 54 |
--data_offset ./test \
|
| 55 |
--output_offset ./result \
|
| 56 |
--qualitative_only \
|
|
@@ -59,7 +62,7 @@ def inference(LR, Ref):
|
|
| 59 |
|
| 60 |
return "result/0000.png"
|
| 61 |
|
| 62 |
-
title="RefVSR"
|
| 63 |
description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively."
|
| 64 |
|
| 65 |
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is the small-sized model trained with the proposed two-stage training strategy.</p><p style='text-align: center'>The sample frames are in HD resolution (1920x1080) and in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
|
|
|
|
| 24 |
os.makedirs(Ref_path_T)
|
| 25 |
os.makedirs('result')
|
| 26 |
|
| 27 |
+
#os.system("wget https://www.dropbox.com/s/xv6inxwy0so4ni0/LR.png -O LR.png")
|
| 28 |
+
#os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O Ref.png")
|
| 29 |
+
os.system("wget https://www.dropbox.com/s/vqekqdz80d85gi4/UW.png -O LR.png")
|
| 30 |
+
os.system("wget https://www.dropbox.com/s/lsopmquhpm87v83/W.png -O Ref.png")
|
| 31 |
+
|
| 32 |
|
| 33 |
def resize(img):
|
| 34 |
max_side = 512
|
|
|
|
| 42 |
return img
|
| 43 |
|
| 44 |
def inference(LR, Ref):
|
| 45 |
+
#LR = resize(LR)
|
| 46 |
+
#Ref = resize(Ref)
|
| 47 |
|
| 48 |
LR.save(os.path.join(LR_path, '0000.png'))
|
| 49 |
Ref.save(os.path.join(Ref_path, '0000.png'))
|
| 50 |
Ref.save(os.path.join(Ref_path_T, '0000.png'))
|
| 51 |
|
| 52 |
os.system("python -B run.py \
|
| 53 |
+
--mode RefVSR_MFID_8K \
|
| 54 |
+
--config config_RefVSR_MFID_8K \
|
| 55 |
--data RealMCVSR \
|
| 56 |
+
--ckpt_abs_name ckpt/RefVSR_MFID_8K.pytorch \
|
| 57 |
--data_offset ./test \
|
| 58 |
--output_offset ./result \
|
| 59 |
--qualitative_only \
|
|
|
|
| 62 |
|
| 63 |
return "result/0000.png"
|
| 64 |
|
| 65 |
+
title="RefVSR (under construction)"
|
| 66 |
description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively."
|
| 67 |
|
| 68 |
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is the small-sized model trained with the proposed two-stage training strategy.</p><p style='text-align: center'>The sample frames are in HD resolution (1920x1080) and in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
|