Subha Nawer Pushpita
update
a2e570d
import torch
import requests
from PIL import Image
from torchvision import transforms
import gradio as gr
import io
import requests
import torch
import numpy as np
from torch.autograd import Variable
import torchvision.models as models
import torchvision.transforms as transforms
import math
import uuid
import numpy as np
import PIL.Image
import PIL.ImageDraw
import cv2
model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg16', pretrained=True).eval()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Download human-readable labels for ImageNet.
response = requests.get("https://git.io/JJkYN")
labels = response.text.split("\n")
import itertools
#compare two binary strings, check where there is one difference
def compBinary(s1,s2):
count = 0
pos = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
count+=1
pos = i
if count == 1:
return True, pos
else:
return False, None
#compare if the number is same as implicant term
#s1 should be the term
def compBinarySame(term,number):
for i in range(len(term)):
if term[i] != '-':
if term[i] != number[i]:
return False
return True
#combine pairs and make new group
def combinePairs(group, unchecked):
#define length
l = len(group) -1
#check list
check_list = []
#create next group
next_group = [[] for x in range(l)]
#go through the groups
for i in range(l):
#first selected group
for elem1 in group[i]:
#next selected group
for elem2 in group[i+1]:
b, pos = compBinary(elem1, elem2)
if b == True:
#append the ones used in check list
check_list.append(elem1)
check_list.append(elem2)
#replace the different bit with '-'
new_elem = list(elem1)
new_elem[pos] = '-'
new_elem = "".join(new_elem)
next_group[i].append(new_elem)
for i in group:
for j in i:
if j not in check_list:
unchecked.append(j)
return next_group, unchecked
#remove redundant lists in 2d list
def remove_redundant(group):
new_group = []
for j in group:
new=[]
for i in j:
if i not in new:
new.append(i)
new_group.append(new)
return new_group
#remove redundant in 1d list
def remove_redundant_list(list):
new_list = []
for i in list:
if i not in new_list:
new_list.append(i)
return new_list
#return True if empty
def check_empty(group):
if len(group) == 0:
return True
else:
count = 0
for i in group:
if i:
count+=1
if count == 0:
return True
return False
#find essential prime implicants ( col num of ones = 1)
def find_prime(Chart):
prime = []
for col in range(len(Chart[0])):
count = 0
pos = 0
for row in range(len(Chart)):
#find essential
if Chart[row][col] == 1:
count += 1
pos = row
if count == 1:
prime.append(pos)
return prime
def check_all_zero(Chart):
for i in Chart:
for j in i:
if j != 0:
return False
return True
#find max value in list
def find_max(l):
max = -1
index = 0
for i in range(len(l)):
if l[i] > max:
max = l[i]
index = i
return index
#multiply two terms (ex. (p1 + p2)(p1+p4+p5) )..it returns the product
def multiplication(list1, list2):
list_result = []
#if empty
if len(list1) == 0 and len(list2)== 0:
return list_result
#if one is empty
elif len(list1)==0:
return list2
#if another is empty
elif len(list2)==0:
return list1
#both not empty
else:
for i in list1:
for j in list2:
#if two term same
if i == j:
#list_result.append(sorted(i))
list_result.append(i)
else:
#list_result.append(sorted(list(set(i+j))))
list_result.append(list(set(i+j)))
#sort and remove redundant lists and return this list
list_result.sort()
return list(list_result for list_result,_ in itertools.groupby(list_result))
#petrick's method
def petrick_method(Chart):
#initial P
P = []
for col in range(len(Chart[0])):
p =[]
for row in range(len(Chart)):
if Chart[row][col] == 1:
p.append([row])
P.append(p)
#do multiplication
for l in range(len(P)-1):
P[l+1] = multiplication(P[l],P[l+1])
P = sorted(P[len(P)-1],key=len)
final = []
#find the terms with min length = this is the one with lowest cost (optimized result)
min=len(P[0])
for i in P:
if len(i) == min:
final.append(i)
else:
break
#final is the result of petrick's method
return final
#chart = n*n list
def find_minimum_cost(Chart, unchecked):
P_final = []
#essential_prime = list with terms with only one 1 (Essential Prime Implicants)
essential_prime = find_prime(Chart)
essential_prime = remove_redundant_list(essential_prime)
#print out the essential primes
if len(essential_prime)>0:
s = "\nEssential Prime Implicants :\n"
for i in range(len(unchecked)):
for j in essential_prime:
if j == i:
s= s+binary_to_letter(unchecked[i])+' , '
#print(s[:(len(s)-3)])
#modifiy the chart to exclude the covered terms
for i in range(len(essential_prime)):
for col in range(len(Chart[0])):
if Chart[essential_prime[i]][col] == 1:
for row in range(len(Chart)):
Chart[row][col] = 0
#if all zero, no need for petrick method
if check_all_zero(Chart) == True:
P_final = [essential_prime]
else:
#petrick's method
P = petrick_method(Chart)
#find the one with minimum cost
#see "Introduction to Logic Design" - Alan B.Marcovitz Example 4.6 pg 213
'''
Although Petrick's method gives the minimum terms that cover all,
it does not mean that it is the solution for minimum cost!
'''
P_cost = []
for prime in P:
count = 0
for i in range(len(unchecked)):
for j in prime:
if j == i:
count = count+ cal_efficient(unchecked[i])
P_cost.append(count)
for i in range(len(P_cost)):
if P_cost[i] == min(P_cost):
P_final.append(P[i])
#append prime implicants to the solution of Petrick's method
for i in P_final:
for j in essential_prime:
if j not in i:
i.append(j)
return P_final
#calculate the number of literals
def cal_efficient(s):
count = 0
for i in range(len(s)):
if s[i] != '-':
count+=1
return count
#print the binary code to letter
def binary_to_letter(s):
out = ''
c = 'a'
more = False
n = 0
for i in range(len(s)):
#if it is a range a-zA-Z
if more == False:
if s[i] == '1':
out = out + c
elif s[i] == '0':
out = out + c+'\''
if more == True:
if s[i] == '1':
out = out + c + str(n)
elif s[i] == '0':
out = out + c + str(n) + '\''
n+=1
#conditions for next operations
if c=='z' and more == False:
c = 'A'
elif c=='Z':
c = 'a'
more = True
elif more == False:
c = chr(ord(c)+1)
return out
def binary_to_letter_final(s, dictionary):
out = ''
c = 'a'
more = False
n = 0
for i in range(len(s)):
#if it is a range a-zA-Z
if more == False:
if s[i] == '1':
out = out + c
elif s[i] == '0':
out = out + c+'\''
if more == True:
if s[i] == '1':
out = out + c + str(n)
elif s[i] == '0':
out = out + c + str(n) + '\''
n+=1
#conditions for next operations
if c=='z' and more == False:
c = 'A'
elif c=='Z':
c = 'a'
more = True
elif more == False:
c = chr(ord(c)+1)
return_string = ''
for char in out:
if char == "'":
return_string += "'"
else:
return_string+='('+dictionary[char]+')'
return return_string
#main function
def quin_macluskey(n_var,minterms, dictionary):
a = minterms
#make a group list
group = [[] for x in range(n_var+1)]
for i in range(len(a)):
#convert to binary
a[i] = bin(a[i])[2:]
if len(a[i]) < n_var:
#add zeros to fill the n-bits
for j in range(n_var - len(a[i])):
a[i] = '0'+ a[i]
#if incorrect input
elif len(a[i]) > n_var:
print('\nError : Choose the correct number of variables(bits)\n')
return
#count the num of 1
index = a[i].count('1')
#group by num of 1 separately
group[index].append(a[i])
all_group=[]
unchecked = []
#combine the pairs in series until nothing new can be combined
while check_empty(group) == False:
all_group.append(group)
next_group, unchecked = combinePairs(group,unchecked)
group = remove_redundant(next_group)
s = "\nPrime Implicants :\n"
for i in unchecked:
s= s + binary_to_letter(i) + " , "
#print(s[:(len(s)-3)])
#make the prime implicant chart
Chart = [[0 for x in range(len(a))] for x in range(len(unchecked))]
for i in range(len(a)):
for j in range (len(unchecked)):
#term is same as number
if compBinarySame(unchecked[j], a[i]):
Chart[j][i] = 1
#prime contains the index of the prime implicant terms
#prime = remove_redundant_list(find_minimum_cost(Chart))
primes = find_minimum_cost(Chart, unchecked)
primes = remove_redundant(primes)
print("\n-- Answers --\n")
result = ''
for prime in primes:
s=''
for i in range(len(unchecked)):
for j in prime:
if j == i:
s= s+binary_to_letter_final(unchecked[i], dictionary)+' + '
result += s[:(len(s)-3)]
#print(result)
return result
#This part shortens boolean formulas even more than the last step, applicable only in the context of image prediction boolean formulas
def pre_image(res_image,model):
model.eval()
res_image = res_image[:,:,::-1]
img = Image.fromarray(res_image)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_pipeline = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
img = transform_pipeline(img)
# PyTorch pretrained models expect the Tensor dims to be (num input imgs, num color channels, height, width).
# Currently however, we have (num color channels, height, width); let's fix this by inserting a new axis.
img = img.unsqueeze(0) # Insert the new axis at index 0 i.e. in front of the other axes/dims.
# Now that we have preprocessed our img, we need to convert it into a
# Variable; PyTorch models expect inputs to be Variables. A PyTorch Variable is a
# wrapper around a PyTorch Tensor.
img = Variable(img)
img = img.to(device)
output = model(img) # Returns a Tensor of shape (batch, num class labels)
_, preds = torch.max(output, 1)
preds = preds.detach().cpu().numpy()
return preds[0]
def pre_image2(img,model):
model.eval()
#img = Image.open(img)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_pipeline = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
img = transform_pipeline(img)
# PyTorch pretrained models expect the Tensor dims to be (num input imgs, num color channels, height, width).
# Currently however, we have (num color channels, height, width); let's fix this by inserting a new axis.
img = img.unsqueeze(0) # Insert the new axis at index 0 i.e. in front of the other axes/dims.
# Now that we have preprocessed our img, we need to convert it into a
# Variable; PyTorch models expect inputs to be Variables. A PyTorch Variable is a
# wrapper around a PyTorch Tensor.
img = Variable(img)
img = img.to(device)
output = model(img) # Returns a Tensor of shape (batch, num class labels)
_, preds = torch.max(output, 1)
preds = preds.detach().cpu().numpy()
return preds[0]
def findsubsets(s, n):
return list(itertools.combinations(s, n))
def find_all_subsets(s):
res = []
for i in range(1,len(s)+1):
res+= findsubsets(s,i)
return res+[()]
def bitwise_general_or(comb,elt_mask_list):
res_mask = elt_mask_list[comb[0]]
for i in range(1,len(comb)):
new_mask = elt_mask_list[comb[i]]
res_mask = cv2.bitwise_and(res_mask,new_mask)
return res_mask
def overlap(res_mask,main_img):
res_mask = np.mean(res_mask,axis=2)
bw_mask = np.array(res_mask,dtype=np.uint8)
img = np.array(main_img)
rows,cols,channels = img.shape
roi = img[0:rows,0:cols]
img1_bg = cv2.bitwise_and(roi,roi,mask=bw_mask)
return img1_bg
def mask_generation(main_img,painted_img):
img1 = cv2.imread(main_img)
img2 = cv2.imread(painted_img)
k,l,m = img1.shape
mat_3 = np.full((k,l,m),255)
mat_3[(img1[:,:,0]!=img2[:,:,0]) | (img1[:,:,1]!=img2[:,:,1]) | (img1[:,:,2]!=img2[:,:,2])]=0
return mat_3
def calculate_weight(a_list):
running_sum = 0
for e in a_list:
running_sum+=2**e
return running_sum
def shape_to_mask(
img_shape, points, shape_type=None, line_width=10, point_size=5
):
mask = np.zeros(img_shape[:2], dtype=np.uint8)
mask = PIL.Image.fromarray(mask)
draw = PIL.ImageDraw.Draw(mask)
xy = [tuple(point) for point in points]
if shape_type == "circle":
assert len(xy) == 2, "Shape of shape_type=circle must have 2 points"
(cx, cy), (px, py) = xy
d = math.sqrt((cx - px) ** 2 + (cy - py) ** 2)
draw.ellipse([cx - d, cy - d, cx + d, cy + d], outline=1, fill=1)
elif shape_type == "rectangle":
assert len(xy) == 2, "Shape of shape_type=rectangle must have 2 points"
draw.rectangle(xy, outline=1, fill=1)
elif shape_type == "line":
assert len(xy) == 2, "Shape of shape_type=line must have 2 points"
draw.line(xy=xy, fill=1, width=line_width)
elif shape_type == "linestrip":
draw.line(xy=xy, fill=1, width=line_width)
elif shape_type == "point":
assert len(xy) == 1, "Shape of shape_type=point must have 1 points"
cx, cy = xy[0]
r = point_size
draw.ellipse([cx - r, cy - r, cx + r, cy + r], outline=1, fill=1)
else:
assert len(xy) > 2, "Polygon must have points more than 2"
draw.polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def combine_similar_masks(dj,shape):
j = 0
for i in range(len(dj['shapes'])):
if dj['shapes'][i]["label"]==shape:
j+=1
curr_mask = shape_to_mask((dj['imageHeight'],dj['imageWidth']), dj['shapes'][i]['points'], shape_type=None,line_width=1, point_size=1)
mask_img = curr_mask.astype(np.int)#boolean to 0,Convert to 1
mask_img = 255*mask_img
mask_img = np.stack((mask_img, mask_img, mask_img), axis=2)
mask_img = 255 - mask_img
if j==1:
result_mask= mask_img
else:
result_mask = cv2.bitwise_and(mask_img, result_mask)
return result_mask
def combine_all_masks(dj):
j = 0
for i in range(len(dj['shapes'])):
j+=1
curr_mask = shape_to_mask((dj['imageHeight'],dj['imageWidth']), dj['shapes'][i]['points'], shape_type=None,line_width=1, point_size=1)
mask_img = curr_mask.astype(np.int)#boolean to 0,Convert to 1
mask_img = 255*mask_img
mask_img = np.stack((mask_img, mask_img, mask_img), axis=2)
mask_img = 255 - mask_img
if j==1:
result_mask= mask_img
else:
result_mask = cv2.bitwise_and(mask_img, result_mask)
return result_mask
def get_key(my_dict,val):
for key, value in my_dict.items():
if val == value:
return key
import json
import tempfile
#TODO: Add a region for bg, so add a shape in unique_shape_set, write a function forcombiningall masks and doing the not to found the
#bg mask and then the rest is the same
def generate_predictions(path, main_img):
print(main_img)
#generate all elemantary masks
#if flag=True then combine similar label masks, else no
#with open(path, "r",encoding="utf-8") as f:
path[0].seek(0)
my_bytes = path[0].read()
string_data = my_bytes.decode('utf8')
dj = json.loads(string_data)
bit_assignment = {}
elt_mask_dict = {}
unique_shape_set = set()
for elt in dj["shapes"]:
unique_shape_set.add(elt["label"])
#unique_shape_set.add('bg')
l = len(unique_shape_set)
i = l-1
for shape in unique_shape_set:
if shape=='bg':
intermed_mask = combine_all_masks(dj)
mask_img = 255 - intermed_mask
bit_assignment[shape]=i
elt_mask_dict[i] = mask_img
i = i-1
else:
mask_img = combine_similar_masks(dj,shape)
bit_assignment[shape]= i
elt_mask_dict[i]= mask_img
i=i-1
true_prediction = pre_image2(main_img, model)
all_file_comb_list = find_all_subsets(elt_mask_dict)
minterms = []
bit_list = list(elt_mask_dict.keys())
print(len(all_file_comb_list))
for comb in all_file_comb_list:
print('ho')
if comb == ():
res_image = np.array(main_img)
prediction = true_prediction
else:
res_mask = bitwise_general_or(comb,elt_mask_dict)
res_image = overlap(res_mask,main_img)
prediction = pre_image(res_image,model)
if prediction==true_prediction:
region_present = [e for e in bit_list if not e in list(comb)]
minterms.append(calculate_weight(region_present))
dictionary = {}
ch = 'a'
for i in range(l):
dictionary[chr(ord(ch) -i+l-1)]=get_key(bit_assignment, i)
#print(dictionary)
print('no')
result = quin_macluskey(l,minterms, dictionary)
print(result)
return result
gr.Interface(fn=generate_predictions,
inputs=["files", "pil"],
outputs="text").launch(share= True, debug= True)