rawanessam commited on
Commit
d359351
·
verified ·
1 Parent(s): 12bb8ac

Update deepfloorplan_inference.py

Browse files
Files changed (1) hide show
  1. deepfloorplan_inference.py +57 -55
deepfloorplan_inference.py CHANGED
@@ -1,56 +1,58 @@
1
- import os
2
- import numpy as np
3
- import tensorflow.compat.v1 as tf
4
- tf.disable_v2_behavior()
5
- from PIL import Image
6
- import imageio
7
- from net import Network
8
- from utils.rgb_ind_convertor import ind2rgb, floorplan_fuse_map
9
-
10
- class DeepFloorPlanModel:
11
- def __init__(self, model_dir='pretrained', input_size=(512, 512)):
12
- self.input_size = input_size
13
- self.model_dir = model_dir
14
- self._build_graph()
15
- self._load_weights()
16
-
17
- def _build_graph(self):
18
- tf.compat.v1.reset_default_graph()
19
- self.sess = tf.compat.v1.Session()
20
- self.x = tf.compat.v1.placeholder(shape=[1, self.input_size[0], self.input_size[1], 3], dtype=tf.float32, name='inputs')
21
- self.network = Network()
22
- logits1, logits2 = self.network.forward(self.x, init_with_pretrain_vgg=False)
23
- self.rooms = self.network.convert_one_hot_to_image(logits1, act='softmax', dtype='int')
24
- self.close_walls = self.network.convert_one_hot_to_image(logits2, act='softmax', dtype='int')
25
- self.sess.run(tf.compat.v1.global_variables_initializer())
26
- self.sess.run(tf.compat.v1.local_variables_initializer())
27
- self.saver = tf.compat.v1.train.Saver()
28
-
29
- def _load_weights(self):
30
- ckpt = tf.train.latest_checkpoint(self.model_dir)
31
- if ckpt is None:
32
- raise FileNotFoundError(f"No checkpoint found in {self.model_dir}")
33
- self.saver.restore(self.sess, ckpt)
34
-
35
- def predict(self, image):
36
- # Accepts a numpy array or PIL image, returns a numpy array (segmentation mask)
37
- if isinstance(image, Image.Image):
38
- image = np.array(image)
39
- if image.shape[-1] == 4:
40
- image = image[..., :3]
41
- im_resized = np.array(Image.fromarray(image).resize(self.input_size, Image.BICUBIC)) / 255.0
42
- im_resized = im_resized.astype(np.float32)
43
- im_resized = np.reshape(im_resized, (1, self.input_size[0], self.input_size[1], 3))
44
- out1, out2 = self.sess.run([self.rooms, self.close_walls], feed_dict={self.x: im_resized})
45
- out1 = np.squeeze(out1)
46
- out2 = np.squeeze(out2)
47
- # Merge logic: set out1 pixels to 9/10 where out2==1/2
48
- out1[out2==2] = 10
49
- out1[out2==1] = 9
50
- # Convert to RGB for visualization
51
- out_rgb = ind2rgb(out1, color_map=floorplan_fuse_map)
52
- out_rgb = out_rgb.astype(np.uint8)
53
- return out_rgb
54
-
55
- def close(self):
 
 
56
  self.sess.close()
 
1
+ import os
2
+ import numpy as np
3
+ import tensorflow.compat.v1 as tf
4
+ tf.disable_v2_behavior()
5
+ from PIL import Image
6
+ import imageio
7
+ from net import Network
8
+ from utils.rgb_ind_convertor import ind2rgb, floorplan_fuse_map
9
+
10
+ class DeepFloorPlanModel:
11
+ def __init__(self, model_dir='pretrained', input_size=(512, 512)):
12
+ self.input_size = input_size
13
+ self.model_dir = model_dir
14
+ self._build_graph()
15
+ self._load_weights()
16
+
17
+ def _build_graph(self):
18
+ tf.compat.v1.reset_default_graph()
19
+ self.sess = tf.compat.v1.Session()
20
+ self.x = tf.compat.v1.placeholder(shape=[1, self.input_size[0], self.input_size[1], 3], dtype=tf.float32, name='inputs')
21
+ self.network = Network()
22
+ logits1, logits2 = self.network.forward(self.x, init_with_pretrain_vgg=False)
23
+ self.rooms = self.network.convert_one_hot_to_image(logits1, act='softmax', dtype='int')
24
+ self.close_walls = self.network.convert_one_hot_to_image(logits2, act='softmax', dtype='int')
25
+ self.sess.run(tf.compat.v1.global_variables_initializer())
26
+ self.sess.run(tf.compat.v1.local_variables_initializer())
27
+ self.saver = tf.compat.v1.train.Saver()
28
+
29
+ def _load_weights(self):
30
+ ckpt = tf.train.latest_checkpoint(self.model_dir)
31
+ if ckpt is None:
32
+ print(f"[ERROR] No checkpoint found in {self.model_dir}")
33
+ raise FileNotFoundError(f"No checkpoint found in {self.model_dir}")
34
+ print(f"[INFO] Restoring model weights from {ckpt}")
35
+ self.saver.restore(self.sess, ckpt)
36
+
37
+ def predict(self, image):
38
+ # Accepts a numpy array or PIL image, returns a numpy array (segmentation mask)
39
+ if isinstance(image, Image.Image):
40
+ image = np.array(image)
41
+ if image.shape[-1] == 4:
42
+ image = image[..., :3]
43
+ im_resized = np.array(Image.fromarray(image).resize(self.input_size, Image.BICUBIC)) / 255.0
44
+ im_resized = im_resized.astype(np.float32)
45
+ im_resized = np.reshape(im_resized, (1, self.input_size[0], self.input_size[1], 3))
46
+ out1, out2 = self.sess.run([self.rooms, self.close_walls], feed_dict={self.x: im_resized})
47
+ out1 = np.squeeze(out1)
48
+ out2 = np.squeeze(out2)
49
+ # Merge logic: set out1 pixels to 9/10 where out2==1/2
50
+ out1[out2==2] = 10
51
+ out1[out2==1] = 9
52
+ # Convert to RGB for visualization
53
+ out_rgb = ind2rgb(out1, color_map=floorplan_fuse_map)
54
+ out_rgb = out_rgb.astype(np.uint8)
55
+ return out_rgb
56
+
57
+ def close(self):
58
  self.sess.close()