Spaces:
Running
Running
Upload 77 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- cores/__init__.py +1 -0
- cores/add.py +130 -0
- cores/clean.py +249 -0
- cores/init.py +31 -0
- cores/options.py +130 -0
- cores/style.py +50 -0
- cpp/CMakeLists.txt +38 -0
- cpp/README.md +3 -0
- cpp/example/CMakeLists.txt +17 -0
- cpp/example/deepmosaic.cpp +52 -0
- cpp/utils/CMakeLists.txt +14 -0
- cpp/utils/include/data.hpp +10 -0
- cpp/utils/include/util.hpp +26 -0
- cpp/utils/src/data.cpp +10 -0
- cpp/utils/src/util.cpp +49 -0
- deepmosaic.py +99 -0
- docs/Release_notes.txt +43 -0
- docs/exe_help.md +112 -0
- docs/exe_help_CN.md +114 -0
- docs/options_introduction.md +41 -0
- docs/options_introduction_CN.md +41 -0
- docs/pre-trained_models_introduction.md +28 -0
- docs/pre-trained_models_introduction_CN.md +28 -0
- docs/training_with_your_own_dataset.md +77 -0
- imgs/GUI.png +0 -0
- imgs/GUI_Instructions.jpg +0 -0
- imgs/example/SZU.jpg +0 -0
- imgs/example/SZU_summer2winter.jpg +0 -0
- imgs/example/SZU_vangogh.jpg +0 -0
- imgs/example/a_dcp.png +3 -0
- imgs/example/b_dcp.png +3 -0
- imgs/example/face_a_clean.jpg +0 -0
- imgs/example/face_a_mosaic.jpg +0 -0
- imgs/example/face_b_clean.jpg +0 -0
- imgs/example/face_b_mosaic.jpg +0 -0
- imgs/example/lena.jpg +0 -0
- imgs/example/lena_add.jpg +0 -0
- imgs/example/lena_clean.jpg +0 -0
- imgs/example/youknow.png +0 -0
- imgs/example/youknow_add.png +0 -0
- imgs/example/youknow_clean.png +0 -0
- imgs/hand.gif +3 -0
- imgs/lena.jpg +0 -0
- imgs/logo.ico +0 -0
- imgs/logo.png +0 -0
- imgs/logo_withwords.png +0 -0
- imgs/ruoruo.jpg +0 -0
- make_datasets/cut_video.py +32 -0
- make_datasets/draw_mask.py +96 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
imgs/example/a_dcp.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
imgs/example/b_dcp.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
imgs/hand.gif filter=lfs diff=lfs merge=lfs -text
|
cores/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .options import *
|
cores/add.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from queue import Queue
|
| 3 |
+
from threading import Thread
|
| 4 |
+
import time
|
| 5 |
+
import numpy as np
|
| 6 |
+
import cv2
|
| 7 |
+
from models import runmodel
|
| 8 |
+
from util import mosaic,util,ffmpeg,filt
|
| 9 |
+
from util import image_processing as impro
|
| 10 |
+
from .init import video_init
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
'''
|
| 14 |
+
---------------------Add Mosaic---------------------
|
| 15 |
+
'''
|
| 16 |
+
def addmosaic_img(opt,netS):
|
| 17 |
+
path = opt.media_path
|
| 18 |
+
print('Add Mosaic:',path)
|
| 19 |
+
img = impro.imread(path)
|
| 20 |
+
mask = runmodel.get_ROI_position(img,netS,opt)[0]
|
| 21 |
+
img = mosaic.addmosaic(img,mask,opt)
|
| 22 |
+
impro.imwrite(os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_add.jpg'),img)
|
| 23 |
+
|
| 24 |
+
def get_roi_positions(opt,netS,imagepaths,savemask=True):
|
| 25 |
+
# resume
|
| 26 |
+
continue_flag = False
|
| 27 |
+
if os.path.isfile(os.path.join(opt.temp_dir,'step.json')):
|
| 28 |
+
step = util.loadjson(os.path.join(opt.temp_dir,'step.json'))
|
| 29 |
+
resume_frame = int(step['frame'])
|
| 30 |
+
if int(step['step'])>2:
|
| 31 |
+
mask_index = np.load(os.path.join(opt.temp_dir,'mask_index.npy'))
|
| 32 |
+
return mask_index
|
| 33 |
+
if int(step['step'])>=2 and resume_frame>0:
|
| 34 |
+
pre_positions = np.load(os.path.join(opt.temp_dir,'roi_positions.npy'))
|
| 35 |
+
continue_flag = True
|
| 36 |
+
imagepaths = imagepaths[resume_frame:]
|
| 37 |
+
|
| 38 |
+
positions = []
|
| 39 |
+
t1 = time.time()
|
| 40 |
+
if not opt.no_preview:
|
| 41 |
+
cv2.namedWindow('mask', cv2.WINDOW_NORMAL)
|
| 42 |
+
print('Step:2/4 -- Find mosaic location')
|
| 43 |
+
|
| 44 |
+
img_read_pool = Queue(4)
|
| 45 |
+
def loader(imagepaths):
|
| 46 |
+
for imagepath in imagepaths:
|
| 47 |
+
img_origin = impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 48 |
+
img_read_pool.put(img_origin)
|
| 49 |
+
t = Thread(target=loader,args=(imagepaths,))
|
| 50 |
+
t.daemon = True
|
| 51 |
+
t.start()
|
| 52 |
+
|
| 53 |
+
for i,imagepath in enumerate(imagepaths,1):
|
| 54 |
+
img_origin = img_read_pool.get()
|
| 55 |
+
mask,x,y,size,area = runmodel.get_ROI_position(img_origin,netS,opt)
|
| 56 |
+
positions.append([x,y,area])
|
| 57 |
+
if savemask:
|
| 58 |
+
t = Thread(target=cv2.imwrite,args=(os.path.join(opt.temp_dir+'/ROI_mask',imagepath), mask,))
|
| 59 |
+
t.start()
|
| 60 |
+
if i%1000==0:
|
| 61 |
+
save_positions = np.array(positions)
|
| 62 |
+
if continue_flag:
|
| 63 |
+
save_positions = np.concatenate((pre_positions,save_positions),axis=0)
|
| 64 |
+
np.save(os.path.join(opt.temp_dir,'roi_positions.npy'),save_positions)
|
| 65 |
+
step = {'step':2,'frame':i+resume_frame}
|
| 66 |
+
util.savejson(os.path.join(opt.temp_dir,'step.json'),step)
|
| 67 |
+
|
| 68 |
+
#preview result and print
|
| 69 |
+
if not opt.no_preview:
|
| 70 |
+
cv2.imshow('mask',mask)
|
| 71 |
+
cv2.waitKey(1) & 0xFF
|
| 72 |
+
t2 = time.time()
|
| 73 |
+
print('\r',str(i)+'/'+str(len(imagepaths)),util.get_bar(100*i/len(imagepaths),num=35),util.counttime(t1,t2,i,len(imagepaths)),end='')
|
| 74 |
+
|
| 75 |
+
if not opt.no_preview:
|
| 76 |
+
cv2.destroyAllWindows()
|
| 77 |
+
|
| 78 |
+
print('\nOptimize ROI locations...')
|
| 79 |
+
if continue_flag:
|
| 80 |
+
positions = np.concatenate((pre_positions,positions),axis=0)
|
| 81 |
+
mask_index = filt.position_medfilt(np.array(positions), 7)
|
| 82 |
+
step = {'step':3,'frame':0}
|
| 83 |
+
util.savejson(os.path.join(opt.temp_dir,'step.json'),step)
|
| 84 |
+
np.save(os.path.join(opt.temp_dir,'roi_positions.npy'),positions)
|
| 85 |
+
np.save(os.path.join(opt.temp_dir,'mask_index.npy'),np.array(mask_index))
|
| 86 |
+
|
| 87 |
+
return mask_index
|
| 88 |
+
|
| 89 |
+
def addmosaic_video(opt,netS):
|
| 90 |
+
path = opt.media_path
|
| 91 |
+
fps,imagepaths = video_init(opt,path)[:2]
|
| 92 |
+
length = len(imagepaths)
|
| 93 |
+
start_frame = int(imagepaths[0][7:13])
|
| 94 |
+
mask_index = get_roi_positions(opt,netS,imagepaths)[(start_frame-1):]
|
| 95 |
+
|
| 96 |
+
t1 = time.time()
|
| 97 |
+
if not opt.no_preview:
|
| 98 |
+
cv2.namedWindow('preview', cv2.WINDOW_NORMAL)
|
| 99 |
+
|
| 100 |
+
# add mosaic
|
| 101 |
+
print('Step:3/4 -- Add Mosaic:')
|
| 102 |
+
t1 = time.time()
|
| 103 |
+
# print(mask_index)
|
| 104 |
+
for i,imagepath in enumerate(imagepaths,1):
|
| 105 |
+
mask = impro.imread(os.path.join(opt.temp_dir+'/ROI_mask',imagepaths[np.clip(mask_index[i-1]-start_frame,0,1000000)]),'gray')
|
| 106 |
+
img = impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 107 |
+
if impro.mask_area(mask)>100:
|
| 108 |
+
try:#Avoid unknown errors
|
| 109 |
+
img = mosaic.addmosaic(img, mask, opt)
|
| 110 |
+
except Exception as e:
|
| 111 |
+
print('Warning:',e)
|
| 112 |
+
t = Thread(target=cv2.imwrite,args=(os.path.join(opt.temp_dir+'/addmosaic_image',imagepath),img))
|
| 113 |
+
t.start()
|
| 114 |
+
os.remove(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 115 |
+
|
| 116 |
+
#preview result and print
|
| 117 |
+
if not opt.no_preview:
|
| 118 |
+
cv2.imshow('preview',img)
|
| 119 |
+
cv2.waitKey(1) & 0xFF
|
| 120 |
+
t2 = time.time()
|
| 121 |
+
print('\r',str(i)+'/'+str(length),util.get_bar(100*i/length,num=35),util.counttime(t1,t2,i,length),end='')
|
| 122 |
+
|
| 123 |
+
print()
|
| 124 |
+
if not opt.no_preview:
|
| 125 |
+
cv2.destroyAllWindows()
|
| 126 |
+
print('Step:4/4 -- Convert images to video')
|
| 127 |
+
ffmpeg.image2video( fps,
|
| 128 |
+
opt.temp_dir+'/addmosaic_image/output_%06d.'+opt.tempimage_type,
|
| 129 |
+
opt.temp_dir+'/voice_tmp.mp3',
|
| 130 |
+
os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_add.mp4'))
|
cores/clean.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import numpy as np
|
| 4 |
+
import cv2
|
| 5 |
+
import torch
|
| 6 |
+
from models import runmodel
|
| 7 |
+
from util import data,util,ffmpeg,filt
|
| 8 |
+
from util import image_processing as impro
|
| 9 |
+
from .init import video_init
|
| 10 |
+
from multiprocessing import Queue, Process
|
| 11 |
+
from threading import Thread
|
| 12 |
+
|
| 13 |
+
'''
|
| 14 |
+
---------------------Clean Mosaic---------------------
|
| 15 |
+
'''
|
| 16 |
+
def get_mosaic_positions(opt,netM,imagepaths,savemask=True):
|
| 17 |
+
# resume
|
| 18 |
+
continue_flag = False
|
| 19 |
+
if os.path.isfile(os.path.join(opt.temp_dir,'step.json')):
|
| 20 |
+
step = util.loadjson(os.path.join(opt.temp_dir,'step.json'))
|
| 21 |
+
resume_frame = int(step['frame'])
|
| 22 |
+
if int(step['step'])>2:
|
| 23 |
+
pre_positions = np.load(os.path.join(opt.temp_dir,'mosaic_positions.npy'))
|
| 24 |
+
return pre_positions
|
| 25 |
+
if int(step['step'])>=2 and resume_frame>0:
|
| 26 |
+
pre_positions = np.load(os.path.join(opt.temp_dir,'mosaic_positions.npy'))
|
| 27 |
+
continue_flag = True
|
| 28 |
+
imagepaths = imagepaths[resume_frame:]
|
| 29 |
+
|
| 30 |
+
positions = []
|
| 31 |
+
t1 = time.time()
|
| 32 |
+
if not opt.no_preview:
|
| 33 |
+
cv2.namedWindow('mosaic mask', cv2.WINDOW_NORMAL)
|
| 34 |
+
print('Step:2/4 -- Find mosaic location')
|
| 35 |
+
|
| 36 |
+
img_read_pool = Queue(4)
|
| 37 |
+
def loader(imagepaths):
|
| 38 |
+
for imagepath in imagepaths:
|
| 39 |
+
img_origin = impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 40 |
+
img_read_pool.put(img_origin)
|
| 41 |
+
t = Thread(target=loader,args=(imagepaths,))
|
| 42 |
+
t.setDaemon(True)
|
| 43 |
+
t.start()
|
| 44 |
+
|
| 45 |
+
for i,imagepath in enumerate(imagepaths,1):
|
| 46 |
+
img_origin = img_read_pool.get()
|
| 47 |
+
x,y,size,mask = runmodel.get_mosaic_position(img_origin,netM,opt)
|
| 48 |
+
positions.append([x,y,size])
|
| 49 |
+
if savemask:
|
| 50 |
+
t = Thread(target=cv2.imwrite,args=(os.path.join(opt.temp_dir+'/mosaic_mask',imagepath), mask,))
|
| 51 |
+
t.start()
|
| 52 |
+
if i%1000==0:
|
| 53 |
+
save_positions = np.array(positions)
|
| 54 |
+
if continue_flag:
|
| 55 |
+
save_positions = np.concatenate((pre_positions,save_positions),axis=0)
|
| 56 |
+
np.save(os.path.join(opt.temp_dir,'mosaic_positions.npy'),save_positions)
|
| 57 |
+
step = {'step':2,'frame':i+resume_frame}
|
| 58 |
+
util.savejson(os.path.join(opt.temp_dir,'step.json'),step)
|
| 59 |
+
|
| 60 |
+
#preview result and print
|
| 61 |
+
if not opt.no_preview:
|
| 62 |
+
cv2.imshow('mosaic mask',mask)
|
| 63 |
+
cv2.waitKey(1) & 0xFF
|
| 64 |
+
t2 = time.time()
|
| 65 |
+
print('\r',str(i)+'/'+str(len(imagepaths)),util.get_bar(100*i/len(imagepaths),num=35),util.counttime(t1,t2,i,len(imagepaths)),end='')
|
| 66 |
+
|
| 67 |
+
if not opt.no_preview:
|
| 68 |
+
cv2.destroyAllWindows()
|
| 69 |
+
print('\nOptimize mosaic locations...')
|
| 70 |
+
positions =np.array(positions)
|
| 71 |
+
if continue_flag:
|
| 72 |
+
positions = np.concatenate((pre_positions,positions),axis=0)
|
| 73 |
+
for i in range(3):positions[:,i] = filt.medfilt(positions[:,i],opt.medfilt_num)
|
| 74 |
+
step = {'step':3,'frame':0}
|
| 75 |
+
util.savejson(os.path.join(opt.temp_dir,'step.json'),step)
|
| 76 |
+
np.save(os.path.join(opt.temp_dir,'mosaic_positions.npy'),positions)
|
| 77 |
+
|
| 78 |
+
return positions
|
| 79 |
+
|
| 80 |
+
def cleanmosaic_img(opt,netG,netM):
|
| 81 |
+
|
| 82 |
+
path = opt.media_path
|
| 83 |
+
print('Clean Mosaic:',path)
|
| 84 |
+
img_origin = impro.imread(path)
|
| 85 |
+
x,y,size,mask = runmodel.get_mosaic_position(img_origin,netM,opt)
|
| 86 |
+
#cv2.imwrite('./mask/'+os.path.basename(path), mask)
|
| 87 |
+
img_result = img_origin.copy()
|
| 88 |
+
if size > 100 :
|
| 89 |
+
img_mosaic = img_origin[y-size:y+size,x-size:x+size]
|
| 90 |
+
if opt.traditional:
|
| 91 |
+
img_fake = runmodel.traditional_cleaner(img_mosaic,opt)
|
| 92 |
+
else:
|
| 93 |
+
img_fake = runmodel.run_pix2pix(img_mosaic,netG,opt)
|
| 94 |
+
img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather)
|
| 95 |
+
else:
|
| 96 |
+
print('Do not find mosaic')
|
| 97 |
+
impro.imwrite(os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_clean.jpg'),img_result)
|
| 98 |
+
|
| 99 |
+
def cleanmosaic_img_server(opt,img_origin,netG,netM):
|
| 100 |
+
x,y,size,mask = runmodel.get_mosaic_position(img_origin,netM,opt)
|
| 101 |
+
img_result = img_origin.copy()
|
| 102 |
+
if size > 100 :
|
| 103 |
+
img_mosaic = img_origin[y-size:y+size,x-size:x+size]
|
| 104 |
+
if opt.traditional:
|
| 105 |
+
img_fake = runmodel.traditional_cleaner(img_mosaic,opt)
|
| 106 |
+
else:
|
| 107 |
+
img_fake = runmodel.run_pix2pix(img_mosaic,netG,opt)
|
| 108 |
+
img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather)
|
| 109 |
+
return img_result
|
| 110 |
+
|
| 111 |
+
def cleanmosaic_video_byframe(opt,netG,netM):
|
| 112 |
+
path = opt.media_path
|
| 113 |
+
fps,imagepaths,height,width = video_init(opt,path)
|
| 114 |
+
start_frame = int(imagepaths[0][7:13])
|
| 115 |
+
positions = get_mosaic_positions(opt,netM,imagepaths,savemask=True)[(start_frame-1):]
|
| 116 |
+
|
| 117 |
+
t1 = time.time()
|
| 118 |
+
if not opt.no_preview:
|
| 119 |
+
cv2.namedWindow('clean', cv2.WINDOW_NORMAL)
|
| 120 |
+
|
| 121 |
+
# clean mosaic
|
| 122 |
+
print('Step:3/4 -- Clean Mosaic:')
|
| 123 |
+
length = len(imagepaths)
|
| 124 |
+
for i,imagepath in enumerate(imagepaths,0):
|
| 125 |
+
x,y,size = positions[i][0],positions[i][1],positions[i][2]
|
| 126 |
+
img_origin = impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 127 |
+
img_result = img_origin.copy()
|
| 128 |
+
if size > 100:
|
| 129 |
+
try:#Avoid unknown errors
|
| 130 |
+
img_mosaic = img_origin[y-size:y+size,x-size:x+size]
|
| 131 |
+
if opt.traditional:
|
| 132 |
+
img_fake = runmodel.traditional_cleaner(img_mosaic,opt)
|
| 133 |
+
else:
|
| 134 |
+
img_fake = runmodel.run_pix2pix(img_mosaic,netG,opt)
|
| 135 |
+
mask = cv2.imread(os.path.join(opt.temp_dir+'/mosaic_mask',imagepath),0)
|
| 136 |
+
img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather)
|
| 137 |
+
except Exception as e:
|
| 138 |
+
print('Warning:',e)
|
| 139 |
+
t = Thread(target=cv2.imwrite,args=(os.path.join(opt.temp_dir+'/replace_mosaic',imagepath), img_result,))
|
| 140 |
+
t.start()
|
| 141 |
+
os.remove(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 142 |
+
|
| 143 |
+
#preview result and print
|
| 144 |
+
if not opt.no_preview:
|
| 145 |
+
cv2.imshow('clean',img_result)
|
| 146 |
+
cv2.waitKey(1) & 0xFF
|
| 147 |
+
t2 = time.time()
|
| 148 |
+
print('\r',str(i+1)+'/'+str(length),util.get_bar(100*i/length,num=35),util.counttime(t1,t2,i+1,len(imagepaths)),end='')
|
| 149 |
+
print()
|
| 150 |
+
if not opt.no_preview:
|
| 151 |
+
cv2.destroyAllWindows()
|
| 152 |
+
print('Step:4/4 -- Convert images to video')
|
| 153 |
+
ffmpeg.image2video( fps,
|
| 154 |
+
opt.temp_dir+'/replace_mosaic/output_%06d.'+opt.tempimage_type,
|
| 155 |
+
opt.temp_dir+'/voice_tmp.mp3',
|
| 156 |
+
os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_clean.mp4'))
|
| 157 |
+
|
| 158 |
+
def cleanmosaic_video_fusion(opt,netG,netM):
|
| 159 |
+
path = opt.media_path
|
| 160 |
+
N,T,S = 2,5,3
|
| 161 |
+
LEFT_FRAME = (N*S)
|
| 162 |
+
POOL_NUM = LEFT_FRAME*2+1
|
| 163 |
+
INPUT_SIZE = 256
|
| 164 |
+
FRAME_POS = np.linspace(0, (T-1)*S,T,dtype=np.int64)
|
| 165 |
+
img_pool = []
|
| 166 |
+
previous_frame = None
|
| 167 |
+
init_flag = True
|
| 168 |
+
|
| 169 |
+
fps,imagepaths,height,width = video_init(opt,path)
|
| 170 |
+
start_frame = int(imagepaths[0][7:13])
|
| 171 |
+
positions = get_mosaic_positions(opt,netM,imagepaths,savemask=True)[(start_frame-1):]
|
| 172 |
+
t1 = time.time()
|
| 173 |
+
if not opt.no_preview:
|
| 174 |
+
cv2.namedWindow('clean', cv2.WINDOW_NORMAL)
|
| 175 |
+
|
| 176 |
+
# clean mosaic
|
| 177 |
+
print('Step:3/4 -- Clean Mosaic:')
|
| 178 |
+
length = len(imagepaths)
|
| 179 |
+
write_pool = Queue(4)
|
| 180 |
+
show_pool = Queue(4)
|
| 181 |
+
def write_result():
|
| 182 |
+
while True:
|
| 183 |
+
save_ori,imagepath,img_origin,img_fake,x,y,size = write_pool.get()
|
| 184 |
+
if save_ori:
|
| 185 |
+
img_result = img_origin
|
| 186 |
+
else:
|
| 187 |
+
mask = cv2.imread(os.path.join(opt.temp_dir+'/mosaic_mask',imagepath),0)
|
| 188 |
+
img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather)
|
| 189 |
+
if not opt.no_preview:
|
| 190 |
+
show_pool.put(img_result.copy())
|
| 191 |
+
cv2.imwrite(os.path.join(opt.temp_dir+'/replace_mosaic',imagepath),img_result)
|
| 192 |
+
os.remove(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 193 |
+
t = Thread(target=write_result,args=())
|
| 194 |
+
t.setDaemon(True)
|
| 195 |
+
t.start()
|
| 196 |
+
|
| 197 |
+
for i,imagepath in enumerate(imagepaths,0):
|
| 198 |
+
x,y,size = positions[i][0],positions[i][1],positions[i][2]
|
| 199 |
+
input_stream = []
|
| 200 |
+
# image read stream
|
| 201 |
+
if i==0 :# init
|
| 202 |
+
for j in range(POOL_NUM):
|
| 203 |
+
img_pool.append(impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepaths[np.clip(i+j-LEFT_FRAME,0,len(imagepaths)-1)])))
|
| 204 |
+
else: # load next frame
|
| 205 |
+
img_pool.pop(0)
|
| 206 |
+
img_pool.append(impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepaths[np.clip(i+LEFT_FRAME,0,len(imagepaths)-1)])))
|
| 207 |
+
img_origin = img_pool[LEFT_FRAME]
|
| 208 |
+
|
| 209 |
+
# preview result and print
|
| 210 |
+
if not opt.no_preview:
|
| 211 |
+
if show_pool.qsize()>3:
|
| 212 |
+
cv2.imshow('clean',show_pool.get())
|
| 213 |
+
cv2.waitKey(1) & 0xFF
|
| 214 |
+
|
| 215 |
+
if size>50:
|
| 216 |
+
try:#Avoid unknown errors
|
| 217 |
+
for pos in FRAME_POS:
|
| 218 |
+
input_stream.append(impro.resize(img_pool[pos][y-size:y+size,x-size:x+size], INPUT_SIZE,interpolation=cv2.INTER_CUBIC)[:,:,::-1])
|
| 219 |
+
if init_flag:
|
| 220 |
+
init_flag = False
|
| 221 |
+
previous_frame = input_stream[N]
|
| 222 |
+
previous_frame = data.im2tensor(previous_frame,bgr2rgb=True,gpu_id=opt.gpu_id)
|
| 223 |
+
|
| 224 |
+
input_stream = np.array(input_stream).reshape(1,T,INPUT_SIZE,INPUT_SIZE,3).transpose((0,4,1,2,3))
|
| 225 |
+
input_stream = data.to_tensor(data.normalize(input_stream),gpu_id=opt.gpu_id)
|
| 226 |
+
with torch.no_grad():
|
| 227 |
+
unmosaic_pred = netG(input_stream,previous_frame)
|
| 228 |
+
img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = True)
|
| 229 |
+
previous_frame = unmosaic_pred
|
| 230 |
+
write_pool.put([False,imagepath,img_origin.copy(),img_fake.copy(),x,y,size])
|
| 231 |
+
except Exception as e:
|
| 232 |
+
init_flag = True
|
| 233 |
+
print('Error:',e)
|
| 234 |
+
else:
|
| 235 |
+
write_pool.put([True,imagepath,img_origin.copy(),-1,-1,-1,-1])
|
| 236 |
+
init_flag = True
|
| 237 |
+
|
| 238 |
+
t2 = time.time()
|
| 239 |
+
print('\r',str(i+1)+'/'+str(length),util.get_bar(100*i/length,num=35),util.counttime(t1,t2,i+1,len(imagepaths)),end='')
|
| 240 |
+
print()
|
| 241 |
+
write_pool.close()
|
| 242 |
+
show_pool.close()
|
| 243 |
+
if not opt.no_preview:
|
| 244 |
+
cv2.destroyAllWindows()
|
| 245 |
+
print('Step:4/4 -- Convert images to video')
|
| 246 |
+
ffmpeg.image2video( fps,
|
| 247 |
+
opt.temp_dir+'/replace_mosaic/output_%06d.'+opt.tempimage_type,
|
| 248 |
+
opt.temp_dir+'/voice_tmp.mp3',
|
| 249 |
+
os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_clean.mp4'))
|
cores/init.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from util import util,ffmpeg
|
| 3 |
+
|
| 4 |
+
'''
|
| 5 |
+
---------------------Video Init---------------------
|
| 6 |
+
'''
|
| 7 |
+
def video_init(opt,path):
|
| 8 |
+
fps,endtime,height,width = ffmpeg.get_video_infos(path)
|
| 9 |
+
if opt.fps !=0:
|
| 10 |
+
fps = opt.fps
|
| 11 |
+
|
| 12 |
+
# resume
|
| 13 |
+
if os.path.isfile(os.path.join(opt.temp_dir,'step.json')):
|
| 14 |
+
step = util.loadjson(os.path.join(opt.temp_dir,'step.json'))
|
| 15 |
+
if int(step['step'])>=1:
|
| 16 |
+
choose = input('There is an unfinished video. Continue it? [y/n] ')
|
| 17 |
+
if choose.lower() =='yes' or choose.lower() == 'y':
|
| 18 |
+
imagepaths = os.listdir(opt.temp_dir+'/video2image')
|
| 19 |
+
imagepaths.sort()
|
| 20 |
+
return fps,imagepaths,height,width
|
| 21 |
+
|
| 22 |
+
print('Step:1/4 -- Convert video to images')
|
| 23 |
+
util.file_init(opt)
|
| 24 |
+
ffmpeg.video2voice(path,opt.temp_dir+'/voice_tmp.mp3',opt.start_time,opt.last_time)
|
| 25 |
+
ffmpeg.video2image(path,opt.temp_dir+'/video2image/output_%06d.'+opt.tempimage_type,fps,opt.start_time,opt.last_time)
|
| 26 |
+
imagepaths = os.listdir(opt.temp_dir+'/video2image')
|
| 27 |
+
imagepaths.sort()
|
| 28 |
+
step = {'step':2,'frame':0}
|
| 29 |
+
util.savejson(os.path.join(opt.temp_dir,'step.json'),step)
|
| 30 |
+
|
| 31 |
+
return fps,imagepaths,height,width
|
cores/options.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Options():
|
| 7 |
+
def __init__(self):
|
| 8 |
+
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
| 9 |
+
self.initialized = False
|
| 10 |
+
|
| 11 |
+
def initialize(self):
|
| 12 |
+
|
| 13 |
+
#base
|
| 14 |
+
self.parser.add_argument('--debug', action='store_true', help='if specified, start debug mode')
|
| 15 |
+
self.parser.add_argument('--gpu_id', type=str,default='0', help='if -1, use cpu')
|
| 16 |
+
self.parser.add_argument('--media_path', type=str, default='./imgs/ruoruo.jpg',help='your videos or images path')
|
| 17 |
+
self.parser.add_argument('-ss', '--start_time', type=str, default='00:00:00',help='start position of video, default is the beginning of video')
|
| 18 |
+
self.parser.add_argument('-t', '--last_time', type=str, default='00:00:00',help='duration of the video, default is the entire video')
|
| 19 |
+
self.parser.add_argument('--mode', type=str, default='auto',help='Program running mode. auto | add | clean | style')
|
| 20 |
+
self.parser.add_argument('--model_path', type=str, default='./pretrained_models/mosaic/add_face.pth',help='pretrained model path')
|
| 21 |
+
self.parser.add_argument('--result_dir', type=str, default='./result',help='output media will be saved here')
|
| 22 |
+
self.parser.add_argument('--temp_dir', type=str, default='./tmp', help='Temporary files will go here')
|
| 23 |
+
self.parser.add_argument('--tempimage_type', type=str, default='jpg',help='type of temp image, png | jpg, png is better but occupy more storage space')
|
| 24 |
+
self.parser.add_argument('--netG', type=str, default='auto',
|
| 25 |
+
help='select model to use for netG(Clean mosaic and Transfer style) -> auto | unet_128 | unet_256 | resnet_9blocks | HD | video')
|
| 26 |
+
self.parser.add_argument('--fps', type=int, default=0,help='read and output fps, if 0-> origin')
|
| 27 |
+
self.parser.add_argument('--no_preview', action='store_true', help='if specified,do not preview images when processing video. eg.(when run it on server)')
|
| 28 |
+
self.parser.add_argument('--output_size', type=int, default=0,help='size of output media, if 0 -> origin')
|
| 29 |
+
self.parser.add_argument('--mask_threshold', type=int, default=64,help='Mosaic detection threshold (0~255). The smaller is it, the more likely judged as a mosaic area.')
|
| 30 |
+
|
| 31 |
+
#AddMosaic
|
| 32 |
+
self.parser.add_argument('--mosaic_mod', type=str, default='squa_avg',help='type of mosaic -> squa_avg | squa_random | squa_avg_circle_edge | rect_avg | random')
|
| 33 |
+
self.parser.add_argument('--mosaic_size', type=int, default=0,help='mosaic size,if 0 auto size')
|
| 34 |
+
self.parser.add_argument('--mask_extend', type=int, default=10,help='extend mosaic area')
|
| 35 |
+
|
| 36 |
+
#CleanMosaic
|
| 37 |
+
self.parser.add_argument('--mosaic_position_model_path', type=str, default='auto',help='name of model use to find mosaic position')
|
| 38 |
+
self.parser.add_argument('--traditional', action='store_true', help='if specified, use traditional image processing methods to clean mosaic')
|
| 39 |
+
self.parser.add_argument('--tr_blur', type=int, default=10, help='ksize of blur when using traditional method, it will affect final quality')
|
| 40 |
+
self.parser.add_argument('--tr_down', type=int, default=10, help='downsample when using traditional method,it will affect final quality')
|
| 41 |
+
self.parser.add_argument('--no_feather', action='store_true', help='if specified, no edge feather and color correction, but run faster')
|
| 42 |
+
self.parser.add_argument('--all_mosaic_area', action='store_true', help='if specified, find all mosaic area, else only find the largest area')
|
| 43 |
+
self.parser.add_argument('--medfilt_num', type=int, default=11,help='medfilt window of mosaic movement in the video')
|
| 44 |
+
self.parser.add_argument('--ex_mult', type=str, default='auto',help='mosaic area expansion')
|
| 45 |
+
|
| 46 |
+
#StyleTransfer
|
| 47 |
+
self.parser.add_argument('--preprocess', type=str, default='resize', help='resize and cropping of images at load time [ resize | resize_scale_width | edges | gray] or resize,edges(use comma to split)')
|
| 48 |
+
self.parser.add_argument('--edges', action='store_true', help='if specified, use edges to generate pictures,(input_nc = 1)')
|
| 49 |
+
self.parser.add_argument('--canny', type=int, default=150,help='threshold of canny')
|
| 50 |
+
self.parser.add_argument('--only_edges', action='store_true', help='if specified, output media will be edges')
|
| 51 |
+
|
| 52 |
+
self.initialized = True
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def getparse(self, test_flag = False):
|
| 56 |
+
if not self.initialized:
|
| 57 |
+
self.initialize()
|
| 58 |
+
self.opt = self.parser.parse_args()
|
| 59 |
+
|
| 60 |
+
model_name = os.path.basename(self.opt.model_path)
|
| 61 |
+
self.opt.temp_dir = os.path.join(self.opt.temp_dir, 'DeepMosaics_temp')
|
| 62 |
+
|
| 63 |
+
if self.opt.gpu_id != '-1':
|
| 64 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.opt.gpu_id)
|
| 65 |
+
import torch
|
| 66 |
+
if not torch.cuda.is_available():
|
| 67 |
+
self.opt.gpu_id = '-1'
|
| 68 |
+
# else:
|
| 69 |
+
# self.opt.gpu_id = '-1'
|
| 70 |
+
|
| 71 |
+
if test_flag:
|
| 72 |
+
if not os.path.exists(self.opt.media_path):
|
| 73 |
+
print('Error: Media does not exist!')
|
| 74 |
+
input('Please press any key to exit.\n')
|
| 75 |
+
sys.exit(0)
|
| 76 |
+
if not os.path.exists(self.opt.model_path):
|
| 77 |
+
print('Error: Model does not exist!')
|
| 78 |
+
input('Please press any key to exit.\n')
|
| 79 |
+
sys.exit(0)
|
| 80 |
+
|
| 81 |
+
if self.opt.mode == 'auto':
|
| 82 |
+
if 'clean' in model_name or self.opt.traditional:
|
| 83 |
+
self.opt.mode = 'clean'
|
| 84 |
+
elif 'add' in model_name:
|
| 85 |
+
self.opt.mode = 'add'
|
| 86 |
+
elif 'style' in model_name or 'edges' in model_name:
|
| 87 |
+
self.opt.mode = 'style'
|
| 88 |
+
else:
|
| 89 |
+
print('Please check model_path!')
|
| 90 |
+
input('Please press any key to exit.\n')
|
| 91 |
+
sys.exit(0)
|
| 92 |
+
|
| 93 |
+
if self.opt.output_size == 0 and self.opt.mode == 'style':
|
| 94 |
+
self.opt.output_size = 512
|
| 95 |
+
|
| 96 |
+
if 'edges' in model_name or 'edges' in self.opt.preprocess:
|
| 97 |
+
self.opt.edges = True
|
| 98 |
+
|
| 99 |
+
if self.opt.netG == 'auto' and self.opt.mode =='clean':
|
| 100 |
+
if 'unet_128' in model_name:
|
| 101 |
+
self.opt.netG = 'unet_128'
|
| 102 |
+
elif 'resnet_9blocks' in model_name:
|
| 103 |
+
self.opt.netG = 'resnet_9blocks'
|
| 104 |
+
elif 'HD' in model_name and 'video' not in model_name:
|
| 105 |
+
self.opt.netG = 'HD'
|
| 106 |
+
elif 'video' in model_name:
|
| 107 |
+
self.opt.netG = 'video'
|
| 108 |
+
else:
|
| 109 |
+
print('Type of Generator error!')
|
| 110 |
+
input('Please press any key to exit.\n')
|
| 111 |
+
sys.exit(0)
|
| 112 |
+
|
| 113 |
+
if self.opt.ex_mult == 'auto':
|
| 114 |
+
if 'face' in model_name:
|
| 115 |
+
self.opt.ex_mult = 1.1
|
| 116 |
+
else:
|
| 117 |
+
self.opt.ex_mult = 1.5
|
| 118 |
+
else:
|
| 119 |
+
self.opt.ex_mult = float(self.opt.ex_mult)
|
| 120 |
+
|
| 121 |
+
if self.opt.mosaic_position_model_path == 'auto' and self.opt.mode == 'clean':
|
| 122 |
+
_path = os.path.join(os.path.split(self.opt.model_path)[0],'mosaic_position.pth')
|
| 123 |
+
if os.path.isfile(_path):
|
| 124 |
+
self.opt.mosaic_position_model_path = _path
|
| 125 |
+
else:
|
| 126 |
+
input('Please check mosaic_position_model_path!')
|
| 127 |
+
input('Please press any key to exit.\n')
|
| 128 |
+
sys.exit(0)
|
| 129 |
+
|
| 130 |
+
return self.opt
|
cores/style.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import numpy as np
|
| 4 |
+
import cv2
|
| 5 |
+
from models import runmodel
|
| 6 |
+
from util import mosaic,util,ffmpeg,filt
|
| 7 |
+
from util import image_processing as impro
|
| 8 |
+
from .init import video_init
|
| 9 |
+
|
| 10 |
+
'''
|
| 11 |
+
---------------------Style Transfer---------------------
|
| 12 |
+
'''
|
| 13 |
+
def styletransfer_img(opt,netG):
|
| 14 |
+
print('Style Transfer_img:',opt.media_path)
|
| 15 |
+
img = impro.imread(opt.media_path)
|
| 16 |
+
img = runmodel.run_styletransfer(opt, netG, img)
|
| 17 |
+
suffix = os.path.basename(opt.model_path).replace('.pth','').replace('style_','')
|
| 18 |
+
impro.imwrite(os.path.join(opt.result_dir,os.path.splitext(os.path.basename(opt.media_path))[0]+'_'+suffix+'.jpg'),img)
|
| 19 |
+
|
| 20 |
+
def styletransfer_video(opt,netG):
|
| 21 |
+
path = opt.media_path
|
| 22 |
+
fps,imagepaths = video_init(opt,path)[:2]
|
| 23 |
+
print('Step:2/4 -- Transfer')
|
| 24 |
+
t1 = time.time()
|
| 25 |
+
if not opt.no_preview:
|
| 26 |
+
cv2.namedWindow('preview', cv2.WINDOW_NORMAL)
|
| 27 |
+
length = len(imagepaths)
|
| 28 |
+
|
| 29 |
+
for i,imagepath in enumerate(imagepaths,1):
|
| 30 |
+
img = impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 31 |
+
img = runmodel.run_styletransfer(opt, netG, img)
|
| 32 |
+
cv2.imwrite(os.path.join(opt.temp_dir+'/style_transfer',imagepath),img)
|
| 33 |
+
os.remove(os.path.join(opt.temp_dir+'/video2image',imagepath))
|
| 34 |
+
|
| 35 |
+
#preview result and print
|
| 36 |
+
if not opt.no_preview:
|
| 37 |
+
cv2.imshow('preview',img)
|
| 38 |
+
cv2.waitKey(1) & 0xFF
|
| 39 |
+
t2 = time.time()
|
| 40 |
+
print('\r',str(i)+'/'+str(length),util.get_bar(100*i/length,num=35),util.counttime(t1,t2,i,len(imagepaths)),end='')
|
| 41 |
+
|
| 42 |
+
print()
|
| 43 |
+
if not opt.no_preview:
|
| 44 |
+
cv2.destroyAllWindows()
|
| 45 |
+
suffix = os.path.basename(opt.model_path).replace('.pth','').replace('style_','')
|
| 46 |
+
print('Step:4/4 -- Convert images to video')
|
| 47 |
+
ffmpeg.image2video( fps,
|
| 48 |
+
opt.temp_dir+'/style_transfer/output_%06d.'+opt.tempimage_type,
|
| 49 |
+
opt.temp_dir+'/voice_tmp.mp3',
|
| 50 |
+
os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_'+suffix+'.mp4'))
|
cpp/CMakeLists.txt
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
|
| 2 |
+
set(CMAKE_CXX_STANDARD 14)
|
| 3 |
+
|
| 4 |
+
project(DeepMosaics)
|
| 5 |
+
set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) #链接库路径
|
| 6 |
+
|
| 7 |
+
set(Torch_DIR /home/hypo/libtorch/share/cmake/Torch)
|
| 8 |
+
find_package(Torch REQUIRED)
|
| 9 |
+
|
| 10 |
+
set(OpenCV_DIR /home/hypo/opencv-4.4.0)
|
| 11 |
+
find_package(OpenCV REQUIRED)
|
| 12 |
+
|
| 13 |
+
# Add sub directories
|
| 14 |
+
add_subdirectory(example)
|
| 15 |
+
add_subdirectory(utils)
|
| 16 |
+
|
| 17 |
+
# set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 14)
|
| 18 |
+
# cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
|
| 19 |
+
# project(main)
|
| 20 |
+
# set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) #链接库路径
|
| 21 |
+
|
| 22 |
+
# set(Torch_DIR /home/hypo/libtorch/share/cmake/Torch)
|
| 23 |
+
# find_package(Torch REQUIRED)
|
| 24 |
+
|
| 25 |
+
# set(OpenCV_DIR /home/hypo/opencv-4.4.0)
|
| 26 |
+
# find_package(OpenCV REQUIRED)
|
| 27 |
+
|
| 28 |
+
# # 查找当前目录下的所有源文件
|
| 29 |
+
# # 并将名称保存到 DIR_SRCS 变量
|
| 30 |
+
# # aux_source_directory(. DIR_SRCS)
|
| 31 |
+
# add_subdirectory(utils)
|
| 32 |
+
|
| 33 |
+
# add_executable(main main.cpp)
|
| 34 |
+
# # target_link_libraries(main )
|
| 35 |
+
# # include_directories( "${OpenCV_INCLUDE_DIRS}" )
|
| 36 |
+
# target_link_libraries( main "${TORCH_LIBRARIES}" "${OpenCV_LIBS}" utils)
|
| 37 |
+
|
| 38 |
+
# set_property(TARGET main PROPERTY CXX_STANDARD 14)
|
cpp/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### C++ version for DeepMosaics
|
| 2 |
+
* I am learning c++ through this project...
|
| 3 |
+
* It is under development...
|
cpp/example/CMakeLists.txt
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# project(example)
|
| 2 |
+
# add_executable("${PROJECT_NAME}" deepmosaic.cpp)
|
| 3 |
+
# target_link_libraries( "${PROJECT_NAME}"
|
| 4 |
+
# "${TORCH_LIBRARIES}"
|
| 5 |
+
# "${OpenCV_LIBS}"
|
| 6 |
+
# utils)
|
| 7 |
+
|
| 8 |
+
file(GLOB_RECURSE srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp")
|
| 9 |
+
foreach(sourcefile IN LISTS srcs)
|
| 10 |
+
string( REPLACE ".cpp" "" binname ${sourcefile})
|
| 11 |
+
add_executable( ${binname} ${sourcefile} )
|
| 12 |
+
target_link_libraries( ${binname}
|
| 13 |
+
"${TORCH_LIBRARIES}"
|
| 14 |
+
"${OpenCV_LIBS}"
|
| 15 |
+
utils)
|
| 16 |
+
# set_property(TARGET ${binname} PROPERTY CXX_STANDARD 14)
|
| 17 |
+
endforeach()
|
cpp/example/deepmosaic.cpp
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <stdio.h>
|
| 2 |
+
#include <string.h>
|
| 3 |
+
#include <unistd.h>
|
| 4 |
+
#include <iostream>
|
| 5 |
+
#include <list>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
#include <torch/script.h>
|
| 9 |
+
#include <torch/torch.h>
|
| 10 |
+
#include <opencv2/opencv.hpp>
|
| 11 |
+
|
| 12 |
+
#include "data.hpp"
|
| 13 |
+
#include "util.hpp"
|
| 14 |
+
|
| 15 |
+
int main() {
|
| 16 |
+
std::string path = util::current_path();
|
| 17 |
+
|
| 18 |
+
std::string net_path = "../res/models/mosaic_position.pth";
|
| 19 |
+
std::string img_path = "../res/test_media/face/d.jpg";
|
| 20 |
+
|
| 21 |
+
cv::Mat img = cv::imread(img_path);
|
| 22 |
+
cv::resize(img, img, cv::Size(360, 360), 2);
|
| 23 |
+
// img.convertTo(img, CV_32F);
|
| 24 |
+
torch::Tensor img_tensor =
|
| 25 |
+
torch::from_blob(img.data, {1, img.rows, img.cols, 3}, torch::kByte);
|
| 26 |
+
img_tensor = img_tensor.permute({0, 3, 1, 2});
|
| 27 |
+
img_tensor = img_tensor.toType(torch::kFloat);
|
| 28 |
+
img_tensor = img_tensor.div(255);
|
| 29 |
+
std::cout << img_tensor.sizes() << "\n";
|
| 30 |
+
|
| 31 |
+
// end = clock();
|
| 32 |
+
// dur = (double)(end - start);
|
| 33 |
+
// printf("Use Time:%f\n", (dur / CLOCKS_PER_SEC));
|
| 34 |
+
|
| 35 |
+
// std::string net_path = "../res/models/mosaic_position.pt";
|
| 36 |
+
// torch::jit::script::Module net;
|
| 37 |
+
// try{
|
| 38 |
+
// // if (!isfile(net_path)){
|
| 39 |
+
// // std::cerr<<"model does not exist\n";
|
| 40 |
+
// // }
|
| 41 |
+
|
| 42 |
+
// net = torch::jit::load(net_path);
|
| 43 |
+
// }
|
| 44 |
+
// catch(const std::exception& e){
|
| 45 |
+
// std::cerr << "error loading the model\n";
|
| 46 |
+
// return -1;
|
| 47 |
+
// }
|
| 48 |
+
|
| 49 |
+
// torch::Tensor example = torch::ones({1,3,360,360});
|
| 50 |
+
// torch::Tensor output = net.forward({example}).toTensor();
|
| 51 |
+
// std::cout<<"ok"<<std::endl;
|
| 52 |
+
}
|
cpp/utils/CMakeLists.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Set the project name
|
| 2 |
+
project (utils)
|
| 3 |
+
aux_source_directory(./src DIR_LIB_SRCS)
|
| 4 |
+
|
| 5 |
+
add_library(${PROJECT_NAME} SHARED ${DIR_LIB_SRCS})
|
| 6 |
+
|
| 7 |
+
# 添加.h文件查找路径
|
| 8 |
+
target_include_directories( ${PROJECT_NAME}
|
| 9 |
+
PUBLIC ${PROJECT_SOURCE_DIR}/include
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
# 添加掉用的第三方库
|
| 13 |
+
target_link_libraries( "${PROJECT_NAME}"
|
| 14 |
+
"${OpenCV_LIBS}")
|
cpp/utils/include/data.hpp
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef DATA_H
|
| 2 |
+
#define DATA_H
|
| 3 |
+
#include <opencv2/opencv.hpp>
|
| 4 |
+
|
| 5 |
+
namespace data {
|
| 6 |
+
void normalize(cv::Mat& matrix, double mean = 0.5, double std = 0.5);
|
| 7 |
+
|
| 8 |
+
} // namespace data
|
| 9 |
+
|
| 10 |
+
#endif
|
cpp/utils/include/util.hpp
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef UTIL_H
|
| 2 |
+
#define UTIL_H
|
| 3 |
+
#include <iostream>
|
| 4 |
+
#include <list>
|
| 5 |
+
namespace util {
|
| 6 |
+
|
| 7 |
+
class Timer {
|
| 8 |
+
private:
|
| 9 |
+
clock_t tstart, tend;
|
| 10 |
+
|
| 11 |
+
public:
|
| 12 |
+
void start();
|
| 13 |
+
void end();
|
| 14 |
+
};
|
| 15 |
+
|
| 16 |
+
// std::string path = util::current_path();
|
| 17 |
+
std::string current_path();
|
| 18 |
+
|
| 19 |
+
// std::string out = util::pathjoin({path, "b", "c"});
|
| 20 |
+
std::string pathjoin(const std::list<std::string>& strs);
|
| 21 |
+
|
| 22 |
+
bool isfile(const std::string& name);
|
| 23 |
+
|
| 24 |
+
} // namespace util
|
| 25 |
+
|
| 26 |
+
#endif
|
cpp/utils/src/data.cpp
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "data.hpp"
|
| 2 |
+
#include <opencv2/opencv.hpp>
|
| 3 |
+
|
| 4 |
+
namespace data {
|
| 5 |
+
void normalize(cv::Mat& matrix, double mean, double std) {
|
| 6 |
+
// matrix = (matrix / 255.0 - mean) / std;
|
| 7 |
+
matrix = matrix / (255.0 * std) - mean / std;
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
} // namespace data
|
cpp/utils/src/util.cpp
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "util.hpp"
|
| 2 |
+
#include <stdio.h>
|
| 3 |
+
#include <sys/stat.h>
|
| 4 |
+
#include <unistd.h>
|
| 5 |
+
#include <iostream>
|
| 6 |
+
#include <list>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
namespace util {
|
| 10 |
+
|
| 11 |
+
void Timer::start() {
|
| 12 |
+
tstart = clock();
|
| 13 |
+
}
|
| 14 |
+
void Timer::end() {
|
| 15 |
+
tend = clock();
|
| 16 |
+
double dur;
|
| 17 |
+
dur = (double)(tend - tstart);
|
| 18 |
+
std::cout << "Cost Time:" << (dur / CLOCKS_PER_SEC) << "\n";
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
std::string current_path() {
|
| 22 |
+
char* buffer;
|
| 23 |
+
buffer = getcwd(NULL, 0);
|
| 24 |
+
return buffer;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
std::string pathjoin(const std::list<std::string>& strs) {
|
| 28 |
+
std::string res = "";
|
| 29 |
+
int cnt = 0;
|
| 30 |
+
for (std::string s : strs) {
|
| 31 |
+
if (cnt == 0) {
|
| 32 |
+
res += s;
|
| 33 |
+
} else {
|
| 34 |
+
if (s[0] != '/') {
|
| 35 |
+
res += ("/" + s);
|
| 36 |
+
} else {
|
| 37 |
+
res += s;
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
cnt++;
|
| 41 |
+
}
|
| 42 |
+
return res;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
bool isfile(const std::string& name) {
|
| 46 |
+
struct stat buffer;
|
| 47 |
+
return (stat(name.c_str(), &buffer) == 0);
|
| 48 |
+
}
|
| 49 |
+
} // namespace util
|
deepmosaic.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import traceback
|
| 4 |
+
try:
|
| 5 |
+
from cores import Options,add,clean,style
|
| 6 |
+
from util import util
|
| 7 |
+
from models import loadmodel
|
| 8 |
+
except Exception as e:
|
| 9 |
+
print(e)
|
| 10 |
+
input('Please press any key to exit.\n')
|
| 11 |
+
sys.exit(0)
|
| 12 |
+
|
| 13 |
+
opt = Options().getparse(test_flag = True)
|
| 14 |
+
if not os.path.isdir(opt.temp_dir):
|
| 15 |
+
util.file_init(opt)
|
| 16 |
+
|
| 17 |
+
def main():
|
| 18 |
+
|
| 19 |
+
if os.path.isdir(opt.media_path):
|
| 20 |
+
files = util.Traversal(opt.media_path)
|
| 21 |
+
else:
|
| 22 |
+
files = [opt.media_path]
|
| 23 |
+
if opt.mode == 'add':
|
| 24 |
+
netS = loadmodel.bisenet(opt,'roi')
|
| 25 |
+
for file in files:
|
| 26 |
+
opt.media_path = file
|
| 27 |
+
if util.is_img(file):
|
| 28 |
+
add.addmosaic_img(opt,netS)
|
| 29 |
+
elif util.is_video(file):
|
| 30 |
+
add.addmosaic_video(opt,netS)
|
| 31 |
+
util.clean_tempfiles(opt, tmp_init = False)
|
| 32 |
+
else:
|
| 33 |
+
print('This type of file is not supported')
|
| 34 |
+
util.clean_tempfiles(opt, tmp_init = False)
|
| 35 |
+
|
| 36 |
+
elif opt.mode == 'clean':
|
| 37 |
+
netM = loadmodel.bisenet(opt,'mosaic')
|
| 38 |
+
if opt.traditional:
|
| 39 |
+
netG = None
|
| 40 |
+
elif opt.netG == 'video':
|
| 41 |
+
netG = loadmodel.video(opt)
|
| 42 |
+
else:
|
| 43 |
+
netG = loadmodel.pix2pix(opt)
|
| 44 |
+
|
| 45 |
+
for file in files:
|
| 46 |
+
opt.media_path = file
|
| 47 |
+
if util.is_img(file):
|
| 48 |
+
clean.cleanmosaic_img(opt,netG,netM)
|
| 49 |
+
elif util.is_video(file):
|
| 50 |
+
if opt.netG == 'video' and not opt.traditional:
|
| 51 |
+
clean.cleanmosaic_video_fusion(opt,netG,netM)
|
| 52 |
+
else:
|
| 53 |
+
clean.cleanmosaic_video_byframe(opt,netG,netM)
|
| 54 |
+
util.clean_tempfiles(opt, tmp_init = False)
|
| 55 |
+
else:
|
| 56 |
+
print('This type of file is not supported')
|
| 57 |
+
|
| 58 |
+
elif opt.mode == 'style':
|
| 59 |
+
netG = loadmodel.style(opt)
|
| 60 |
+
for file in files:
|
| 61 |
+
opt.media_path = file
|
| 62 |
+
if util.is_img(file):
|
| 63 |
+
style.styletransfer_img(opt,netG)
|
| 64 |
+
elif util.is_video(file):
|
| 65 |
+
style.styletransfer_video(opt,netG)
|
| 66 |
+
util.clean_tempfiles(opt, tmp_init = False)
|
| 67 |
+
else:
|
| 68 |
+
print('This type of file is not supported')
|
| 69 |
+
|
| 70 |
+
util.clean_tempfiles(opt, tmp_init = False)
|
| 71 |
+
|
| 72 |
+
if __name__ == '__main__':
|
| 73 |
+
if opt.debug:
|
| 74 |
+
main()
|
| 75 |
+
sys.exit(0)
|
| 76 |
+
try:
|
| 77 |
+
main()
|
| 78 |
+
print('Finished!')
|
| 79 |
+
except Exception as ex:
|
| 80 |
+
print('--------------------ERROR--------------------')
|
| 81 |
+
print('--------------Environment--------------')
|
| 82 |
+
print('DeepMosaics: 0.5.1')
|
| 83 |
+
print('Python:',sys.version)
|
| 84 |
+
import torch
|
| 85 |
+
print('Pytorch:',torch.__version__)
|
| 86 |
+
import cv2
|
| 87 |
+
print('OpenCV:',cv2.__version__)
|
| 88 |
+
import platform
|
| 89 |
+
print('Platform:',platform.platform())
|
| 90 |
+
|
| 91 |
+
print('--------------BUG--------------')
|
| 92 |
+
ex_type, ex_val, ex_stack = sys.exc_info()
|
| 93 |
+
print('Error Type:',ex_type)
|
| 94 |
+
print(ex_val)
|
| 95 |
+
for stack in traceback.extract_tb(ex_stack):
|
| 96 |
+
print(stack)
|
| 97 |
+
input('Please press any key to exit.\n')
|
| 98 |
+
#util.clean_tempfiles(tmp_init = False)
|
| 99 |
+
sys.exit(0)
|
docs/Release_notes.txt
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DeepMosaics: 0.5.1
|
| 2 |
+
Core building with:
|
| 3 |
+
Python: 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
|
| 4 |
+
Pytorch: 1.7.1
|
| 5 |
+
OpenCV: 4.1.2
|
| 6 |
+
Platform: Windows-10-10.0.19041-SP0
|
| 7 |
+
Driver Version: 461.40
|
| 8 |
+
CUDA:11.0
|
| 9 |
+
GUI building with C#
|
| 10 |
+
For more detail, please view on github: https://github.com/HypoX64/DeepMosaics
|
| 11 |
+
|
| 12 |
+
Releases History
|
| 13 |
+
V0.5.1
|
| 14 |
+
Fix:
|
| 15 |
+
1.Fix Some BUGs when restore unfinished tasks.
|
| 16 |
+
2.Fix that audio and video are not synchronized when the video is too long.
|
| 17 |
+
New:
|
| 18 |
+
1.Speed up video processing by Asynchronous.
|
| 19 |
+
V0.5.0
|
| 20 |
+
1.New video model (Perform better)
|
| 21 |
+
V0.4.1
|
| 22 |
+
1.Allow unfinished tasks to be restored.
|
| 23 |
+
2.Clean cache during processing.
|
| 24 |
+
3.Support CUDA 11.0.
|
| 25 |
+
V0.4.0
|
| 26 |
+
1.Support GPU.
|
| 27 |
+
2.Preview images when processing video.
|
| 28 |
+
3.Choose start position of video.
|
| 29 |
+
V0.3.0
|
| 30 |
+
1. Support BiSeNet(Better recognition of mosaics).
|
| 31 |
+
2. New videoHD model.
|
| 32 |
+
3. Better feathering method.
|
| 33 |
+
V0.2.0
|
| 34 |
+
1. Add video model.
|
| 35 |
+
2. Now you can input chinese path
|
| 36 |
+
3. Support style transfer
|
| 37 |
+
4. Support fps limit
|
| 38 |
+
V0.1.2
|
| 39 |
+
1. Support pix2pixHD model
|
| 40 |
+
V0.1.1
|
| 41 |
+
1. Check path, can't input illegal path
|
| 42 |
+
V0.1.0
|
| 43 |
+
1. Initial release.
|
docs/exe_help.md
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## DeepMosaics.exe Instructions
|
| 2 |
+
**[[中文版]](./exe_help_CN.md)**
|
| 3 |
+
This is a GUI version compiled in Windows.<br>
|
| 4 |
+
Download this version and pre-trained model via [[Google Drive]](https://drive.google.com/open?id=1LTERcN33McoiztYEwBxMuRjjgxh4DEPs) [[百度云,提取码1x0a]](https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ) <br>
|
| 5 |
+
Video tutorial => [[youtube]](https://www.youtube.com/watch?v=1kEmYawJ_vk) [[bilibili]](https://www.bilibili.com/video/BV1QK4y1a7Av)<br>
|
| 6 |
+
|
| 7 |
+
Attentions:<br>
|
| 8 |
+
|
| 9 |
+
- Require Windows_x86_64, Windows10 is better.<br>
|
| 10 |
+
- Different pre-trained models are suitable for different effects.<br>
|
| 11 |
+
- Run time depends on computer performance.<br>
|
| 12 |
+
- If output video cannot be played, you can try with [potplayer](https://daumpotplayer.com/download/).<br>
|
| 13 |
+
- GUI version update slower than source.<br>
|
| 14 |
+
|
| 15 |
+
### How to install
|
| 16 |
+
#### CPU version
|
| 17 |
+
* 1.Download and install Microsoft Visual C++
|
| 18 |
+
https://aka.ms/vs/16/release/vc_redist.x64.exe
|
| 19 |
+
#### GPU version
|
| 20 |
+
Only suppport NVidia GPU above gtx1060(Driver:above 460 & CUDA:11.0)
|
| 21 |
+
* 1.Download and install Microsoft Visual C++
|
| 22 |
+
https://aka.ms/vs/16/release/vc_redist.x64.exe
|
| 23 |
+
* 2.Update your gpu drive to 460(or above)
|
| 24 |
+
https://www.nvidia.com/en-us/geforce/drivers/
|
| 25 |
+
* 3.Download and install CUDA 11.0:
|
| 26 |
+
https://developer.nvidia.com/cuda-toolkit-archive
|
| 27 |
+
|
| 28 |
+
You can also download them on BaiduNetdisk
|
| 29 |
+
https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ
|
| 30 |
+
Password: 1x0a
|
| 31 |
+
|
| 32 |
+
### How to use
|
| 33 |
+
* step 1: Choose image or video.
|
| 34 |
+
* step 2: Choose model(Different pre-trained models are suitable for different effects)
|
| 35 |
+
* step 3: Run program and wait.
|
| 36 |
+
* step 4: Cheek reult in './result'.
|
| 37 |
+
|
| 38 |
+
### Introduction to pre-trained models
|
| 39 |
+
* Mosaic
|
| 40 |
+
|
| 41 |
+
| Name | Description |
|
| 42 |
+
| :------------------------------: | :---------------------------------------------------------: |
|
| 43 |
+
| add_face.pth | Add mosaic to all faces in images/videos. |
|
| 44 |
+
| clean_face_HD.pth | Clean mosaic to all faces in images/video.<br>(RAM > 8GB). |
|
| 45 |
+
| add_youknow.pth | Add mosaic to ... in images/videos. |
|
| 46 |
+
| clean_youknow_resnet_9blocks.pth | Clean mosaic to ... in images/videos. |
|
| 47 |
+
| clean_youknow_video.pth | Clean mosaic to ... in videos. It is better for processing video mosaics |
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
* Style Transfer
|
| 51 |
+
|
| 52 |
+
| Name | Description |
|
| 53 |
+
| :---------------------: | :-------------------------------------------------------: |
|
| 54 |
+
| style_apple2orange.pth | Convert apples to oranges. |
|
| 55 |
+
| style_orange2apple.pth | Convert oranges to apples |
|
| 56 |
+
| style_summer2winter.pth | Convert summer to winter. |
|
| 57 |
+
| style_winter2summer.pth | Convert winter to summer. |
|
| 58 |
+
| style_cezanne.pth | Convert photos/video to Paul Cézanne style. |
|
| 59 |
+
| style_monet.pth | Convert photos/video to Claude Monet style. |
|
| 60 |
+
| style_ukiyoe.pth | Convert photos/video to Ukiyoe style. |
|
| 61 |
+
| style_vangogh.pth | Convert photos/video to Van Gogh style. |
|
| 62 |
+
### Annotation
|
| 63 |
+
<br>
|
| 64 |
+
* 1. Choose image or video.
|
| 65 |
+
* 2. Choose model(Different pre-trained models are suitable for different effects).
|
| 66 |
+
* 3. Program running mode. (auto | add | clean | style)
|
| 67 |
+
* 4. Use GPU to run deep learning model. (The current version does not support gpu, if you need to use gpu please run source).
|
| 68 |
+
* 5. Limit the fps of the output video(0->original fps).
|
| 69 |
+
* 6. More options.
|
| 70 |
+
* 7. More options can be input.
|
| 71 |
+
* 8. Run program.
|
| 72 |
+
* 9. Open help file.
|
| 73 |
+
* 10. Sponsor our project.
|
| 74 |
+
* 11. Version information.
|
| 75 |
+
* 12. Open the URL on github.
|
| 76 |
+
|
| 77 |
+
### Introduction to options
|
| 78 |
+
If you need more effects, use '--option your-parameters' to enter what you need.
|
| 79 |
+
* Base
|
| 80 |
+
|
| 81 |
+
| Option | Description | Default |
|
| 82 |
+
| :----------: | :----------------------------------------: | :-------------------------------------: |
|
| 83 |
+
| --gpu_id | if -1, do not use gpu | 0 |
|
| 84 |
+
| --media_path | your videos or images path | ./imgs/ruoruo.jpg |
|
| 85 |
+
| --mode | program running mode(auto/clean/add/style) | 'auto' |
|
| 86 |
+
| --model_path | pretrained model path | ./pretrained_models/mosaic/add_face.pth |
|
| 87 |
+
| --result_dir | output media will be saved here | ./result |
|
| 88 |
+
| --fps | read and output fps, if 0-> origin | 0 |
|
| 89 |
+
|
| 90 |
+
* AddMosaic
|
| 91 |
+
|
| 92 |
+
| Option | Description | Default |
|
| 93 |
+
| :--------------: | :----------------------------------------------------------: | :------: |
|
| 94 |
+
| --mosaic_mod | type of mosaic -> squa_avg/ squa_random/ squa_avg_circle_edge/ rect_avg/random | squa_avg |
|
| 95 |
+
| --mosaic_size | mosaic size,if 0 -> auto size | 0 |
|
| 96 |
+
| --mask_extend | extend mosaic area | 10 |
|
| 97 |
+
| --mask_threshold | threshold of recognize mosaic position 0~255 | 64 |
|
| 98 |
+
|
| 99 |
+
* CleanMosaic
|
| 100 |
+
|
| 101 |
+
| Option | Description | Default |
|
| 102 |
+
| :-----------: | :----------------------------------------------------------: | :-----: |
|
| 103 |
+
| --traditional | if specified, use traditional image processing methods to clean mosaic | |
|
| 104 |
+
| --tr_blur | ksize of blur when using traditional method, it will affect final quality | 10 |
|
| 105 |
+
| --tr_down | downsample when using traditional method,it will affect final quality | 10 |
|
| 106 |
+
| --medfilt_num | medfilt window of mosaic movement in the video | 11 |
|
| 107 |
+
|
| 108 |
+
* Style Transfer
|
| 109 |
+
|
| 110 |
+
| Option | Description | Default |
|
| 111 |
+
| :-----------: | :----------------------------------: | :-----: |
|
| 112 |
+
| --output_size | size of output media, if 0 -> origin | 512 |
|
docs/exe_help_CN.md
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## DeepMosaics.exe 使用说明
|
| 2 |
+
下载程序以及预训练模型 [[Google Drive]](https://drive.google.com/open?id=1LTERcN33McoiztYEwBxMuRjjgxh4DEPs) [[百度云,提取码1x0a]](https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ) <br>
|
| 3 |
+
[视频教程](https://www.bilibili.com/video/BV1QK4y1a7Av)<br>
|
| 4 |
+
|
| 5 |
+
注意事项:<br>
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
- 程序的运行要求在64位Windows操作系统,我们仅在Windows10运行过,其他版本暂未经过测试<br>
|
| 9 |
+
- 请根据需求选择合适的预训练模型进行测试<br>
|
| 10 |
+
- 运行时间取决于电脑性能,对于视频文件,我们建议使用GPU运行<br>
|
| 11 |
+
- 如果输出的视频无法播放,这边建议您尝试[potplayer](https://daumpotplayer.com/download/).<br>
|
| 12 |
+
- 相比于源码,该版本的更新将会延后.
|
| 13 |
+
|
| 14 |
+
### 如何安装
|
| 15 |
+
#### CPU version
|
| 16 |
+
* 1.下载安装 Microsoft Visual C++
|
| 17 |
+
https://aka.ms/vs/16/release/vc_redist.x64.exe
|
| 18 |
+
#### GPU version
|
| 19 |
+
仅支持gtx1060及以上的NVidia显卡(要求460版本以上的驱动以及11.0版本的CUDA, 注意只能是11.0)
|
| 20 |
+
* 1.Download and install Microsoft Visual C++
|
| 21 |
+
https://aka.ms/vs/16/release/vc_redist.x64.exe
|
| 22 |
+
* 2.Update your gpu drive to 460(or above)
|
| 23 |
+
https://www.nvidia.com/en-us/geforce/drivers/
|
| 24 |
+
* 3.Download and install CUDA 11.0:
|
| 25 |
+
https://developer.nvidia.com/cuda-toolkit-archive
|
| 26 |
+
|
| 27 |
+
当然这些也能在百度云上下载
|
| 28 |
+
https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ
|
| 29 |
+
提取码: 1x0a
|
| 30 |
+
|
| 31 |
+
### 如何使用
|
| 32 |
+
|
| 33 |
+
* step 1: 选择需要处理的图片或视频
|
| 34 |
+
* step 2: 选择预训练模型(不同的预训练模型有不同的效果)
|
| 35 |
+
* step 3: 运行程序并等待
|
| 36 |
+
* step 4: 查看结果(储存在result文件夹下)
|
| 37 |
+
|
| 38 |
+
## 预训练模型说明
|
| 39 |
+
当前的预训练模型分为两类——添加/移除马赛克以及风格转换.
|
| 40 |
+
|
| 41 |
+
* 马赛克
|
| 42 |
+
|
| 43 |
+
| 文件名 | 描述 |
|
| 44 |
+
| :------------------------------: | :-------------------------------------------: |
|
| 45 |
+
| add_face.pth | 对图片或视频中的脸部打码 |
|
| 46 |
+
| clean_face_HD.pth | 对图片或视频中的脸部去码<br>(要求内存 > 8GB). |
|
| 47 |
+
| add_youknow.pth | 对图片或视频中的...内容打码 |
|
| 48 |
+
| clean_youknow_resnet_9blocks.pth | 对图片或视频中的...内容去码 |
|
| 49 |
+
| clean_youknow_video.pth | 对视频中的...内容去码,推荐使用带有'video'的模型去除视频中的马赛克 |
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
* 风格转换
|
| 53 |
+
|
| 54 |
+
| 文件名 | 描述 |
|
| 55 |
+
| :---------------------: | :-------------------------------------------------------: |
|
| 56 |
+
| style_apple2orange.pth | 苹果变橙子 |
|
| 57 |
+
| style_orange2apple.pth | 橙子变苹果 |
|
| 58 |
+
| style_summer2winter.pth | 夏天变冬天 |
|
| 59 |
+
| style_winter2summer.pth | 冬天变夏天 |
|
| 60 |
+
| style_cezanne.pth | 转化为Paul Cézanne 的绘画风格 |
|
| 61 |
+
| style_monet.pth | 转化为Claude Monet的绘画风格 |
|
| 62 |
+
| style_ukiyoe.pth | 转化为Ukiyoe的绘画风格 |
|
| 63 |
+
| style_vangogh.pth | 转化为Van Gogh的绘画风格 |
|
| 64 |
+
|
| 65 |
+
### GUI界面注释
|
| 66 |
+
<br>
|
| 67 |
+
* 1. 选择需要处理的图片或视频
|
| 68 |
+
* 2. 选择预训练模型
|
| 69 |
+
* 3. 程序运行模式 (auto | add | clean | style)
|
| 70 |
+
* 4. 使用GPU (该版本目前不支持GPU,若需要使用GPU请使用源码运行).
|
| 71 |
+
* 5. 限制输出的视频帧率(0->原始帧率).
|
| 72 |
+
* 6. 更多的选项以及参数
|
| 73 |
+
* 7. 自行输入更多参数,详见下文
|
| 74 |
+
* 8. 运行
|
| 75 |
+
* 9. 打开帮助文件
|
| 76 |
+
* 10. 支持我们
|
| 77 |
+
* 11. 版本信息
|
| 78 |
+
* 12. 打开项目的github页面
|
| 79 |
+
|
| 80 |
+
### 参数说明
|
| 81 |
+
如果需要更多的效果, 请按照 '--option your-parameters' 输入所需要的参数
|
| 82 |
+
* 基本
|
| 83 |
+
|
| 84 |
+
| 选项 | 描述 | 默认 |
|
| 85 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 86 |
+
| --gpu_id | if -1, do not use gpu | 0 |
|
| 87 |
+
| --media_path | 需要处理的视频或者照片的路径 | ./imgs/ruoruo.jpg |
|
| 88 |
+
| --mode | 运行模式(auto/clean/add/style) | 'auto' |
|
| 89 |
+
| --model_path | 预训练模型的路径 | ./pretrained_models/mosaic/add_face.pth |
|
| 90 |
+
| --result_dir | 保存路径 | ./result |
|
| 91 |
+
| --fps | 限制视频输出的fps,0则为默认 | 0 |
|
| 92 |
+
* 添加马赛克
|
| 93 |
+
|
| 94 |
+
| 选项 | 描述 | 默认 |
|
| 95 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 96 |
+
| --mosaic_mod | 马赛克类型 -> squa_avg/ squa_random/ squa_avg_circle_edge/ rect_avg/random | squa_avg |
|
| 97 |
+
| --mosaic_size | 马赛克大小,0则为自动 | 0 |
|
| 98 |
+
| --mask_extend | 拓展马赛克区域 | 10 |
|
| 99 |
+
| --mask_threshold | 马赛克区域识别阈值 0~255,越小越容易被判断为马赛克区域 | 64 |
|
| 100 |
+
|
| 101 |
+
* 去除马赛克
|
| 102 |
+
|
| 103 |
+
| 选项 | 描述 | 默认 |
|
| 104 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 105 |
+
| --traditional | 如果输入这个参数则使用传统方法清除马赛克 | |
|
| 106 |
+
| --tr_blur | 传统方法模糊尺寸 | 10 |
|
| 107 |
+
| --tr_down | 传统方法下采样尺寸 | 10 |
|
| 108 |
+
| --medfilt_num | medfilt window of mosaic movement in the video | 11 |
|
| 109 |
+
|
| 110 |
+
* 风格转换
|
| 111 |
+
|
| 112 |
+
| 选项 | 描述 | 默认 |
|
| 113 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 114 |
+
| --output_size | 输出媒体的尺寸,如果是0则为原始尺寸 |512|
|
docs/options_introduction.md
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Introduction to options
|
| 2 |
+
If you need more effects, use '--option your-parameters' to enter what you need.
|
| 3 |
+
|
| 4 |
+
### Base
|
| 5 |
+
|
| 6 |
+
| Option | Description | Default |
|
| 7 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 8 |
+
| --gpu_id | if -1, do not use gpu | 0 |
|
| 9 |
+
| --media_path | your videos or images path | ./imgs/ruoruo.jpg |
|
| 10 |
+
| --start_time | start position of video, default is the beginning of video | '00:00:00' |
|
| 11 |
+
| --last_time | limit the duration of the video, default is the entire video | '00:00:00' |
|
| 12 |
+
| --mode | program running mode(auto/clean/add/style) | 'auto' |
|
| 13 |
+
| --model_path | pretrained model path | ./pretrained_models/mosaic/add_face.pth |
|
| 14 |
+
| --result_dir | output media will be saved here| ./result |
|
| 15 |
+
| --temp_dir | Temporary files will go here | ./tmp |
|
| 16 |
+
| --fps | read and output fps, if 0-> origin | 0 |
|
| 17 |
+
| --no_preview | if specified,do not preview images when processing video. eg.(when run it on server) | Flase |
|
| 18 |
+
|
| 19 |
+
### AddMosaic
|
| 20 |
+
|
| 21 |
+
| Option | Description | Default |
|
| 22 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 23 |
+
| --mosaic_mod | type of mosaic -> squa_avg/ squa_random/ squa_avg_circle_edge/ rect_avg/random | squa_avg |
|
| 24 |
+
| --mosaic_size | mosaic size,if 0 -> auto size | 0 |
|
| 25 |
+
| --mask_extend | extend mosaic area | 10 |
|
| 26 |
+
| --mask_threshold | threshold of recognize mosaic position 0~255 | 64 |
|
| 27 |
+
|
| 28 |
+
### CleanMosaic
|
| 29 |
+
|
| 30 |
+
| Option | Description | Default |
|
| 31 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 32 |
+
| --traditional | if specified, use traditional image processing methods to clean mosaic | |
|
| 33 |
+
| --tr_blur | ksize of blur when using traditional method, it will affect final quality | 10 |
|
| 34 |
+
| --tr_down | downsample when using traditional method,it will affect final quality | 10 |
|
| 35 |
+
| --medfilt_num | medfilt window of mosaic movement in the video | 11 |
|
| 36 |
+
|
| 37 |
+
### Style Transfer
|
| 38 |
+
|
| 39 |
+
| Option | Description | Default |
|
| 40 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 41 |
+
| --output_size | size of output media, if 0 -> origin |512|
|
docs/options_introduction_CN.md
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## 参数说明
|
| 2 |
+
如果需要更多的效果, 请按照 '--option your-parameters' 输入所需要的参数
|
| 3 |
+
|
| 4 |
+
### 基本
|
| 5 |
+
|
| 6 |
+
| 选项 | 描述 | 默认 |
|
| 7 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 8 |
+
| --gpu_id | if -1, do not use gpu | 0 |
|
| 9 |
+
| --media_path | 需要处理的视频或者照片的路径 | ./imgs/ruoruo.jpg |
|
| 10 |
+
| --start_time | 视频开始处理的位置,默认从头开始 | '00:00:00' |
|
| 11 |
+
| --last_time | 处理的视频时长,默认是整个视频 | '00:00:00' |
|
| 12 |
+
| --mode | 运行模式(auto/clean/add/style) | 'auto' |
|
| 13 |
+
| --model_path | 预训练模型的路径 | ./pretrained_models/mosaic/add_face.pth |
|
| 14 |
+
| --result_dir | 保存路径 | ./result |
|
| 15 |
+
| --temp_dir | 临时文件存储目录 | ./tmp |
|
| 16 |
+
| --fps | 限制视频输出的fps,0则为默认 | 0 |
|
| 17 |
+
| --no_preview | 如果输入,将不会在处理视频时播放实时预览.比如当你在服务器运行的时候 | Flase |
|
| 18 |
+
|
| 19 |
+
### 添加马赛克
|
| 20 |
+
|
| 21 |
+
| 选项 | 描述 | 默认 |
|
| 22 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 23 |
+
| --mosaic_mod | 马赛克类型 -> squa_avg/ squa_random/ squa_avg_circle_edge/ rect_avg/random | squa_avg |
|
| 24 |
+
| --mosaic_size | 马赛克大小,0则为自动 | 0 |
|
| 25 |
+
| --mask_extend | 拓展马赛克区域 | 10 |
|
| 26 |
+
| --mask_threshold | 马赛克区域识别阈值 0~255 | 64 |
|
| 27 |
+
|
| 28 |
+
### 去除马赛克
|
| 29 |
+
|
| 30 |
+
| 选项 | 描述 | 默认 |
|
| 31 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 32 |
+
| --traditional | 如果输入这个参数则使用传统方法清除马赛克 | |
|
| 33 |
+
| --tr_blur | 传统方法模糊尺寸 | 10 |
|
| 34 |
+
| --tr_down | 传统方法下采样尺寸 | 10 |
|
| 35 |
+
| --medfilt_num | medfilt window of mosaic movement in the video | 11 |
|
| 36 |
+
|
| 37 |
+
### 风格转换
|
| 38 |
+
|
| 39 |
+
| 选项 | 描述 | 默认 |
|
| 40 |
+
| :----------: | :------------------------: | :-------------------------------------: |
|
| 41 |
+
| --output_size | 输出媒体的尺寸,如果是0则为原始尺寸 |512|
|
docs/pre-trained_models_introduction.md
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Introduction to pre-trained models
|
| 2 |
+
The current pre-trained models are divided into two categories(Add/Clean mosaic and StyleTransfer).
|
| 3 |
+
Download pre-trained model via [[Google Drive]](https://drive.google.com/open?id=1LTERcN33McoiztYEwBxMuRjjgxh4DEPs) [[百度云,提取码1x0a]](https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ) <br>
|
| 4 |
+
|
| 5 |
+
### Mosaic
|
| 6 |
+
|
| 7 |
+
| Name | Description |
|
| 8 |
+
| :------------------------------: | :-----------------------------------------------------: |
|
| 9 |
+
| add_face.pth | Add mosaic to faces in images/videos. |
|
| 10 |
+
| clean_face_HD.pth | Clean mosaic to faces in images/video.<br>(RAM > 8GB). |
|
| 11 |
+
| add_youknow.pth | Add mosaic to ... in images/videos. |
|
| 12 |
+
| clean_youknow_resnet_9blocks.pth | Clean mosaic to ... in images/videos. |
|
| 13 |
+
| clean_youknow_video.pth | Clean mosaic to ... in videos. It is better for processing video mosaics |
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
### Style Transfer
|
| 17 |
+
|
| 18 |
+
| Name | Description |
|
| 19 |
+
| :---------------------: | :-------------------------------------------------------: |
|
| 20 |
+
| style_apple2orange.pth | Convert apples to oranges. |
|
| 21 |
+
| style_orange2apple.pth | Convert oranges to apples |
|
| 22 |
+
| style_summer2winter.pth | Convert summer to winter. |
|
| 23 |
+
| style_winter2summer.pth | Convert winter to summer. |
|
| 24 |
+
| style_cezanne.pth | Convert photos/video to Paul Cézanne style. |
|
| 25 |
+
| style_monet.pth | Convert photos/video to Claude Monet style. |
|
| 26 |
+
| style_ukiyoe.pth | Convert photos/video to Ukiyoe style. |
|
| 27 |
+
| style_vangogh.pth | Convert photos/video to Van Gogh style. |
|
| 28 |
+
|
docs/pre-trained_models_introduction_CN.md
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## 预训练模型说明
|
| 2 |
+
当前的预训练模型分为两类——添加/移除马赛克以及风格转换.
|
| 3 |
+
可以通过以下方式下载预训练模型 [[Google Drive]](https://drive.google.com/open?id=1LTERcN33McoiztYEwBxMuRjjgxh4DEPs) [[百度云,提取码1x0a]](https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ) <br>
|
| 4 |
+
|
| 5 |
+
### 马赛克
|
| 6 |
+
|
| 7 |
+
| 文件名 | 描述 |
|
| 8 |
+
| :------------------------------: | :-------------------------------------------: |
|
| 9 |
+
| add_face.pth | 对图片或视频中的脸部打码 |
|
| 10 |
+
| clean_face_HD.pth | 对图片或视频中的脸部去码<br>(要求内存 > 8GB). |
|
| 11 |
+
| add_youknow.pth | 对图片或视频中的...内容打码 |
|
| 12 |
+
| clean_youknow_resnet_9blocks.pth | 对图片或视频中的...内容去码 |
|
| 13 |
+
| clean_youknow_video.pth | 对视频中的...内容去码,推荐使用带有'video'的模型去除视频中的马赛克 |
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
### 风格转换
|
| 17 |
+
|
| 18 |
+
| 文件名 | 描述 |
|
| 19 |
+
| :---------------------: | :-------------------------------------------------------: |
|
| 20 |
+
| style_apple2orange.pth | 苹果变橙子 |
|
| 21 |
+
| style_orange2apple.pth | 橙子变苹果 |
|
| 22 |
+
| style_summer2winter.pth | 夏天变冬天 |
|
| 23 |
+
| style_winter2summer.pth | 冬天变夏天 |
|
| 24 |
+
| style_cezanne.pth | 转化为Paul Cézanne 的绘画风格 |
|
| 25 |
+
| style_monet.pth | 转化为Claude Monet的绘画风格 |
|
| 26 |
+
| style_ukiyoe.pth | 转化为Ukiyoe的绘画风格 |
|
| 27 |
+
| style_vangogh.pth | 转化为Van Gogh的绘画风格 |
|
| 28 |
+
|
docs/training_with_your_own_dataset.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Training with your own dataset
|
| 2 |
+
Training with your own dataset requires a GPU with 6G memory (above GTX1060).<br>
|
| 3 |
+
We will make "face" as an example. If you don't have any picture, you can download [CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) or [WIDER](http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/WiderFace_Results.html).
|
| 4 |
+
|
| 5 |
+
## Getting Started
|
| 6 |
+
#### Prerequisites
|
| 7 |
+
- Linux, Mac OS, Windows
|
| 8 |
+
- Python 3.6+
|
| 9 |
+
- [ffmpeg 3.4.6](http://ffmpeg.org/)
|
| 10 |
+
- [Pytorch 1.0+](https://pytorch.org/)
|
| 11 |
+
- NVIDIA GPU(with more than 6G memory) + CUDA CuDNN<br>
|
| 12 |
+
#### Dependencies
|
| 13 |
+
This code depends on opencv-python, torchvision, matplotlib, tensorboardX, scikit-image available via conda install.
|
| 14 |
+
```bash
|
| 15 |
+
# or
|
| 16 |
+
pip install -r requirements.txt
|
| 17 |
+
```
|
| 18 |
+
#### Clone this repo
|
| 19 |
+
```bash
|
| 20 |
+
git clone https://github.com/HypoX64/DeepMosaics
|
| 21 |
+
cd DeepMosaics
|
| 22 |
+
```
|
| 23 |
+
## Make training datasets
|
| 24 |
+
```bash
|
| 25 |
+
cd make_datasets
|
| 26 |
+
```
|
| 27 |
+
### Add mosaic dataset
|
| 28 |
+
Please generate mask from images which you want to add mosaic(number of images should be above 1000). And then put the images in ```face/origin_image```, and masks in ```face/mask```.<br>
|
| 29 |
+
* You can use ```draw_mask.py```to generate them.
|
| 30 |
+
```bash
|
| 31 |
+
python draw_mask.py --datadir 'dir for your pictures' --savedir ../datasets/draw/face
|
| 32 |
+
#Press the left mouse button to draw the mask . Press 'S' to save mask, 'A' to reduce brush size, 'D' to increase brush size, 'W' to cancel drawing.
|
| 33 |
+
```
|
| 34 |
+
* If you want to get images from videos, you can use ```get_image_from_video.py```
|
| 35 |
+
```bash
|
| 36 |
+
python get_image_from_video.py --datadir 'dir for your videos' --savedir ../datasets/video2image --fps 1
|
| 37 |
+
```
|
| 38 |
+
### Clean mosaic dataset
|
| 39 |
+
We provide several methods for generating clean mosaic datasets. However, for better effect, we recommend train a addmosaic model in a small data first and use it to automatically generate datasets in a big data. (recommend: Method 2(for image) & Method 4(for video))
|
| 40 |
+
* Method 1: Use drawn mask to make pix2pix(HD) datasets (Require``` origin_image``` and ```mask```)
|
| 41 |
+
```bash
|
| 42 |
+
python make_pix2pix_dataset.py --datadir ../datasets/draw/face --hd --outsize 512 --fold 1 --name face --savedir ../datasets/pix2pix/face --mod drawn --minsize 128 --square
|
| 43 |
+
```
|
| 44 |
+
* Method 2: Use addmosaic model to make pix2pix(HD) datasets (Require addmosaic pre-trained model)
|
| 45 |
+
```bash
|
| 46 |
+
python make_pix2pix_dataset.py --datadir 'dir for your pictures' --hd --outsize 512 --fold 1 --name face --savedir ../datasets/pix2pix/face --mod network --model_path ../pretrained_models/mosaic/add_face.pth --minsize 128 --square --mask_threshold 128
|
| 47 |
+
```
|
| 48 |
+
* Method 3: Use Irregular Masks to make pix2pix(HD) datasets (Require [Irregular Masks](https://nv-adlr.github.io/publication/partialconv-inpainting))
|
| 49 |
+
```bash
|
| 50 |
+
python make_pix2pix_dataset.py --datadir 'dir for your pictures' --hd --outsize 512 --fold 1 --name face --savedir ../datasets/pix2pix/face --mod irregular --irrholedir ../datasets/Irregular_Holes_mask --square
|
| 51 |
+
```
|
| 52 |
+
* Method 4: Use addmosaic model to make video datasets (Require addmosaic pre-trained model. This is better for processing video mosaics)
|
| 53 |
+
```bash
|
| 54 |
+
python make_video_dataset.py --model_path ../pretrained_models/mosaic/add_face.pth --gpu_id 0 --datadir 'dir for your videos' --savedir ../datasets/video/face
|
| 55 |
+
```
|
| 56 |
+
## Training
|
| 57 |
+
### Add
|
| 58 |
+
```bash
|
| 59 |
+
cd train/add
|
| 60 |
+
python train.py --gpu_id 0 --dataset ../../datasets/draw/face --savename face --loadsize 512 --finesize 360 --batchsize 16
|
| 61 |
+
```
|
| 62 |
+
### Clean
|
| 63 |
+
* For image datasets (generated by ```make_pix2pix_dataset.py```)
|
| 64 |
+
We use [pix2pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) or [pix2pixHD](https://github.com/NVIDIA/pix2pixHD) to train model. We just take pix2pixHD as an example.
|
| 65 |
+
```bash
|
| 66 |
+
git clone https://github.com/NVIDIA/pix2pixHD
|
| 67 |
+
cd pix2pixHD
|
| 68 |
+
pip install dominate
|
| 69 |
+
python train.py --name face --resize_or_crop resize_and_crop --loadSize 563 --fineSize 512 --label_nc 0 --no_instance --dataroot ../datasets/pix2pix/face
|
| 70 |
+
```
|
| 71 |
+
* For video datasets (generated by ```make_video_dataset.py```)
|
| 72 |
+
```bash
|
| 73 |
+
cd train/clean
|
| 74 |
+
python train.py --dataset ../../datasets/video/face --savename face --n_blocks 4 --lambda_GAN 0.01 --loadsize 286 --finesize 256 --batchsize 16 --n_layers_D 2 --num_D 3 --n_epoch 200 --gpu_id 4,5,6,7 --load_thread 16
|
| 75 |
+
```
|
| 76 |
+
## Testing
|
| 77 |
+
Put saved network to ```./pretrained_models/mosaic/``` and rename it as ```add_face.pth``` or ```clean_face_HD.pth``` or ```clean_face_video_HD.pth```and then run ```deepmosaic.py --model_path ./pretrained_models/mosaic/your_model_name```
|
imgs/GUI.png
ADDED
|
imgs/GUI_Instructions.jpg
ADDED
|
imgs/example/SZU.jpg
ADDED
|
imgs/example/SZU_summer2winter.jpg
ADDED
|
imgs/example/SZU_vangogh.jpg
ADDED
|
imgs/example/a_dcp.png
ADDED
|
Git LFS Details
|
imgs/example/b_dcp.png
ADDED
|
Git LFS Details
|
imgs/example/face_a_clean.jpg
ADDED
|
imgs/example/face_a_mosaic.jpg
ADDED
|
imgs/example/face_b_clean.jpg
ADDED
|
imgs/example/face_b_mosaic.jpg
ADDED
|
imgs/example/lena.jpg
ADDED
|
imgs/example/lena_add.jpg
ADDED
|
imgs/example/lena_clean.jpg
ADDED
|
imgs/example/youknow.png
ADDED
|
imgs/example/youknow_add.png
ADDED
|
imgs/example/youknow_clean.png
ADDED
|
imgs/hand.gif
ADDED
|
Git LFS Details
|
imgs/lena.jpg
ADDED
|
imgs/logo.ico
ADDED
|
|
imgs/logo.png
ADDED
|
imgs/logo_withwords.png
ADDED
|
imgs/ruoruo.jpg
ADDED
|
make_datasets/cut_video.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
import random
|
| 5 |
+
import csv
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
sys.path.append("..")
|
| 9 |
+
from util import util,ffmpeg
|
| 10 |
+
from util import image_processing as impro
|
| 11 |
+
|
| 12 |
+
files = util.Traversal('/media/hypo/Media/download')
|
| 13 |
+
videos = util.is_videos(files)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
useable_videos = []
|
| 18 |
+
video_dict = {}
|
| 19 |
+
reader = csv.reader(open('./csv/video_used_time.csv'))
|
| 20 |
+
for line in reader:
|
| 21 |
+
useable_videos.append(line[0])
|
| 22 |
+
video_dict[line[0]]=line[1:]
|
| 23 |
+
|
| 24 |
+
in_cnt = 0
|
| 25 |
+
out_cnt = 1
|
| 26 |
+
for video in videos:
|
| 27 |
+
if os.path.basename(video) in useable_videos:
|
| 28 |
+
|
| 29 |
+
for i in range(len(video_dict[os.path.basename(video)])):
|
| 30 |
+
ffmpeg.cut_video(video, video_dict[os.path.basename(video)][i], '00:00:05', './video/'+'%04d'%out_cnt+'.mp4')
|
| 31 |
+
out_cnt +=1
|
| 32 |
+
in_cnt += 1
|
make_datasets/draw_mask.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import datetime
|
| 4 |
+
import os
|
| 5 |
+
import random
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
sys.path.append("..")
|
| 9 |
+
from cores import Options
|
| 10 |
+
from util import util
|
| 11 |
+
from util import image_processing as impro
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
opt = Options()
|
| 15 |
+
opt.parser.add_argument('--datadir',type=str,default=' ', help='your images dir')
|
| 16 |
+
opt.parser.add_argument('--savedir',type=str,default='../datasets/draw/face', help='')
|
| 17 |
+
opt = opt.getparse()
|
| 18 |
+
|
| 19 |
+
mask_savedir = os.path.join(opt.savedir,'mask')
|
| 20 |
+
img_savedir = os.path.join(opt.savedir,'origin_image')
|
| 21 |
+
util.makedirs(mask_savedir)
|
| 22 |
+
util.makedirs(img_savedir)
|
| 23 |
+
|
| 24 |
+
filepaths = util.Traversal(opt.datadir)
|
| 25 |
+
filepaths = util.is_imgs(filepaths)
|
| 26 |
+
random.shuffle(filepaths)
|
| 27 |
+
print('find image:',len(filepaths))
|
| 28 |
+
|
| 29 |
+
# mouse callback function
|
| 30 |
+
drawing = False # true if mouse is pressed
|
| 31 |
+
ix,iy = -1,-1
|
| 32 |
+
brushsize = 20
|
| 33 |
+
def draw_circle(event,x,y,flags,param):
|
| 34 |
+
global ix,iy,drawing,brushsize
|
| 35 |
+
|
| 36 |
+
if event == cv2.EVENT_LBUTTONDOWN:
|
| 37 |
+
drawing = True
|
| 38 |
+
ix,iy = x,y
|
| 39 |
+
|
| 40 |
+
elif event == cv2.EVENT_MOUSEMOVE:
|
| 41 |
+
if drawing == True:
|
| 42 |
+
cv2.circle(img_drawn,(x,y),brushsize,(0,255,0),-1)
|
| 43 |
+
|
| 44 |
+
elif event == cv2.EVENT_LBUTTONUP:
|
| 45 |
+
drawing = False
|
| 46 |
+
cv2.circle(img_drawn,(x,y),brushsize,(0,255,0),-1)
|
| 47 |
+
|
| 48 |
+
def makemask(img_drawn):
|
| 49 |
+
# starttime = datetime.datetime.now()
|
| 50 |
+
mask = np.zeros(img_drawn.shape, np.uint8)
|
| 51 |
+
for row in range(img_drawn.shape[0]):
|
| 52 |
+
for col in range(img_drawn.shape[1]):
|
| 53 |
+
# if (img_drawn[row,col,:] == [0,255,0]).all(): #too slow
|
| 54 |
+
if img_drawn[row,col,0] == 0:
|
| 55 |
+
if img_drawn[row,col,1] == 255:
|
| 56 |
+
if img_drawn[row,col,2] == 0:
|
| 57 |
+
mask[row,col,:] = [255,255,255]
|
| 58 |
+
return mask
|
| 59 |
+
|
| 60 |
+
cnt = 0
|
| 61 |
+
for file in filepaths:
|
| 62 |
+
try:
|
| 63 |
+
cnt += 1
|
| 64 |
+
img = impro.imread(file,loadsize=512)
|
| 65 |
+
img_drawn = img.copy()
|
| 66 |
+
cv2.namedWindow('image')
|
| 67 |
+
cv2.setMouseCallback('image',draw_circle) #MouseCallback
|
| 68 |
+
while(1):
|
| 69 |
+
|
| 70 |
+
cv2.imshow('image',img_drawn)
|
| 71 |
+
k = cv2.waitKey(1) & 0xFF
|
| 72 |
+
if k == ord('s'):
|
| 73 |
+
|
| 74 |
+
img_drawn = impro.resize(img_drawn,256)
|
| 75 |
+
mask = makemask(img_drawn)
|
| 76 |
+
cv2.imwrite(os.path.join(mask_savedir,os.path.splitext(os.path.basename(file))[0]+'.png'),mask)
|
| 77 |
+
cv2.imwrite(os.path.join(img_savedir,os.path.basename(file)),img)
|
| 78 |
+
print('Saved:',os.path.join(mask_savedir,os.path.splitext(os.path.basename(file))[0]+'.png'),mask)
|
| 79 |
+
# cv2.destroyAllWindows()
|
| 80 |
+
print('remain:',len(filepaths)-cnt)
|
| 81 |
+
brushsize = 20
|
| 82 |
+
break
|
| 83 |
+
elif k == ord('a'):
|
| 84 |
+
brushsize -= 5
|
| 85 |
+
if brushsize<5:
|
| 86 |
+
brushsize = 5
|
| 87 |
+
print('brushsize:',brushsize)
|
| 88 |
+
elif k == ord('d'):
|
| 89 |
+
brushsize += 5
|
| 90 |
+
print('brushsize:',brushsize)
|
| 91 |
+
elif k == ord('w'):
|
| 92 |
+
print('remain:',len(filepaths)-cnt)
|
| 93 |
+
break
|
| 94 |
+
except Exception as e:
|
| 95 |
+
print(file,e)
|
| 96 |
+
|