|
|
import tqdm |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
import os |
|
|
|
|
|
|
|
|
|
|
|
os.system("pip install opencv-python") |
|
|
|
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
|
import collections |
|
|
import cv2 |
|
|
|
|
|
|
|
|
|
|
|
device='cpu' |
|
|
|
|
|
def test(gpu_id, net, img_list, group_size, img_size): |
|
|
print('test') |
|
|
|
|
|
hl,wl=[_.shape[0] for _ in img_list],[_.shape[1] for _ in img_list] |
|
|
img_transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor(), |
|
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) |
|
|
img_transform_gray = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor(), |
|
|
transforms.Normalize(mean=[0.449], std=[0.226])]) |
|
|
with torch.no_grad(): |
|
|
|
|
|
group_img=torch.rand(5,3,224,224) |
|
|
for i in range(5): |
|
|
group_img[i]=img_transform(Image.fromarray(img_list[i])) |
|
|
_,pred_mask=net(group_img*1) |
|
|
pred_mask=(pred_mask.detach().squeeze()*255) |
|
|
|
|
|
img_resize=[((group_img[i]-group_img[i].min())/(group_img[i].max()-group_img[i].min())*255).permute(1,2,0).contiguous().numpy().astype(np.uint8) |
|
|
for i in range(5)] |
|
|
pred_mask=[crf_refine(img_resize[i],pred_mask[i].numpy().astype(np.uint8)) for i in range(5)] |
|
|
|
|
|
|
|
|
|
|
|
print(pred_mask[0].shape) |
|
|
white=(torch.ones(2,pred_mask[0].shape[1],3)*255).long() |
|
|
result = [torch.cat([torch.from_numpy(img_resize[i]),white,torch.from_numpy(pred_mask[i]).unsqueeze(2).repeat(1,1,3)],dim=0).numpy() for i in range(5)] |
|
|
|
|
|
|
|
|
|
|
|
print('done') |
|
|
return result |
|
|
|
|
|
|
|
|
outputpath1='img2.png' |
|
|
outputpath2='img2.png' |
|
|
outputpath3='img2.png' |
|
|
def sepia(opt,img1): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
name='bike'+opt.replace(':','_')+'.png' |
|
|
output=cv2.imread('bike'+opt.replace(':','_')+'.png') |
|
|
output=cv2.resize(output,(output.shape[1]*256//output.shape[0],256)) |
|
|
return output[:,:,::-1] |
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("image cropping") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
radio2 = gr.Radio(["9:16", "3:4","1:1","4:3", "16:9","circle"], value="3:4",label="Shape", info="The shape of cropped image") |
|
|
|
|
|
|
|
|
src_img1 = gr.Image() |
|
|
exp=gr.Examples(["bike.png","img2.png"],src_img1) |
|
|
bottom1 = gr.Button(label="cropping component") |
|
|
|
|
|
out1 = gr.Image() |
|
|
|
|
|
bottom1.click(sepia, inputs=[radio2,src_img1], outputs=out1) |
|
|
|
|
|
|
|
|
|
|
|
demo.launch(debug=True) |