PGBPGB
Add print to app.py
891c1bc
import os
import io
import datetime
import sys
import torch
import cv2
import numpy as np
from PIL import Image
import anvil.server
from anvil.tables import app_tables
from lib import *
from utils.misc import *
from data.dataloader import *
from data.custom_transforms import *
# Torch stuff
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
opt = load_config('configs/InSPyReNet_SwinB.yaml')
model = eval(opt.Model.name)(**opt.Model)
if torch.cuda.is_available():
model.load_state_dict(torch.load(os.path.join(
opt.Test.Checkpoint.checkpoint_dir, 'latest.pth')), strict=True)
model.cuda()
else:
model.load_state_dict(torch.load(os.path.join(
opt.Test.Checkpoint.checkpoint_dir, 'latest.pth'), map_location=torch.device('cpu')), strict=True)
model.eval()
transform = get_transform(opt.Test.Dataset.transforms)
# Connect Anvil server
anvil.server.connect("server_LZQ5VMURCIQEEAQE25QYFO2C-IKWA75JBE6UUUDAV")
@anvil.server.callable
def remove_background_notebook_v2(job_number, user_settings):
# we don't want to reload the image settings, because user can change it for a new task when background task is created
# and we don't want to change it because there can be a delay
print("Server call", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
user = user_settings['User']
# get all the rows from user and job_number
images_database = app_tables.images.search(User=user, JobNumber=job_number)
#loop over the media_list
for row in images_database:
image_pil = media_object2pil(row['Original'])
transparent_background = remove_background(image_pil)
row['Processed'] = pil2media_object(transparent_background, row['ImageName'])
row['Ready'] = True
return True
def remove_background(image):
print("Got background to remove, at", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
shape = image.size[::-1]
sample = {'image': image, 'shape': shape, 'original': image}
sample = transform(sample)
sample['image'] = sample['image'].unsqueeze(0)
if 'image_resized' in sample.keys():
sample['image_resized'] = sample['image_resized'].unsqueeze(0)
if torch.cuda.is_available():
sample = to_cuda(sample)
with torch.no_grad():
out = model(sample)
pred = to_numpy(out['pred'], sample['shape'])
img = np.array(sample['original'])
r, g, b = cv2.split(img)
pred = (pred * 255).astype(np.uint8)
img = cv2.merge([r, g, b, pred])
result = Image.fromarray(img)
print('Completed removing at', datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return result
# UTIL
def pil2media_object(image, filename):
# remove the extension from the filename
filename = os.path.splitext(filename)[0]
# PIL image back to media object
bs = io.BytesIO()
image.save(bs, format="PNG")
return anvil.BlobMedia(content_type="image/png", content=bs.getvalue(), name=filename)
def media_object2pil(image):
byte_image = io.BytesIO(image.get_bytes()) #media object to Pil image
pil_image = Image.open(byte_image)
# if image is grayscale transform to RGB
if pil_image.mode != 'RGB' and pil_image.mode != 'RGBA':
pil_image = pil_image.convert('RGB')
return pil_image
def get_transform(tfs):
print(tfs)
comp = []
for key, value in zip(tfs.keys(), tfs.values()):
if value is not None:
tf = eval(key)(**value)
else:
tf = eval(key)()
comp.append(tf)
return transforms.Compose(comp)
print("starting server", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
anvil.server.wait_forever()
print("ending", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))