Dharini Baskaran commited on
Commit
0872418
·
1 Parent(s): f419f09

code cleanup

Browse files
Dockerfile CHANGED
@@ -12,7 +12,7 @@ RUN apt-get update && apt-get install -y \
12
  ffmpeg \
13
  && rm -rf /var/lib/apt/lists/*
14
 
15
- # Set Matplotlib temp folder
16
  ENV MPLCONFIGDIR=/tmp/matplotlib
17
 
18
  # Set working directory
@@ -33,4 +33,4 @@ RUN pip install 'git+https://github.com/facebookresearch/detectron2.git'
33
  COPY . .
34
 
35
  # Run
36
- CMD ["python3", "app.py"]
 
12
  ffmpeg \
13
  && rm -rf /var/lib/apt/lists/*
14
 
15
+ # Set Matplotlib temp folder. This is needed to avoid warnings for missing fonts
16
  ENV MPLCONFIGDIR=/tmp/matplotlib
17
 
18
  # Set working directory
 
33
  COPY . .
34
 
35
  # Run
36
+ CMD ["python3", "app.py"]
app.py CHANGED
@@ -5,15 +5,9 @@ import sys
5
  import json
6
  import shutil
7
  import gdown
8
- import time
9
  from PIL import Image
10
- from io import BytesIO
11
 
12
- # ==================================
13
- # SETUP
14
- # ==================================
15
-
16
- print("🚀 Gradio App Starting...")
17
 
18
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
19
 
@@ -34,22 +28,14 @@ os.makedirs(UPLOAD_DIR, exist_ok=True)
34
  os.makedirs(JSON_DIR, exist_ok=True)
35
  os.makedirs(OUTPUT_DIR, exist_ok=True)
36
 
37
- # Copy logo.png to /tmp
38
- logo_src = os.path.join(BASE_DIR, "public", "logo.png")
39
- logo_dst = "/tmp/logo.png"
40
-
41
- # Only copy if not already there
42
- if not os.path.exists(logo_dst):
43
- shutil.copy(logo_src, logo_dst)
44
-
45
  # Download model if missing
46
  if not os.path.exists(model_path):
47
- print("🚀 Model file not found! Downloading...")
48
  try:
49
  gdown.download(GDRIVE_URL, model_path, quiet=False, use_cookies=False)
50
- print("Model downloaded successfully.")
51
  except Exception as e:
52
- print(f"Failed to download model: {e}")
53
 
54
  # Import model
55
  sys.path.append(MODEL_DIR)
@@ -57,16 +43,11 @@ from rcnn_model.scripts.rcnn_run import main, write_config
57
 
58
  cfg = write_config()
59
 
60
- # ==================================
61
- # MAIN PREDICTION FUNCTION
62
- # ==================================
63
-
64
  def predict(uploaded_file_path):
65
- print("Inside Predict:" + uploaded_file_path)
66
  if uploaded_file_path is None:
67
  return None, None, "No file uploaded.", None
68
 
69
- # Save uploaded file to temp
70
  uploaded_path = os.path.join(UPLOAD_DIR, "input_image.png")
71
  shutil.copy(uploaded_file_path, uploaded_path)
72
 
@@ -95,17 +76,11 @@ def predict(uploaded_file_path):
95
 
96
  return result_img, json.dumps(result_json, indent=2), None, download_json_path, uploaded_path
97
 
98
- # ==================================
99
- # GRADIO UI
100
- # ==================================
101
-
102
  with gr.Blocks() as demo:
103
- # Header
104
  with gr.Row():
105
  gr.Markdown(
106
  f"""
107
  <div style='display: flex; align-items: center; justify-content: center;'>
108
- <img src='file/tmp/logo.png' style='height: 50px; margin-right: 10px;'/>
109
  <h1>Inovonics 2D Floorplan Vectorizer</h1>
110
  </div>
111
  """
@@ -118,19 +93,12 @@ with gr.Blocks() as demo:
118
  run_button = gr.Button("Run Vectorizer 🔥")
119
 
120
  with gr.Column():
121
- output_image = gr.Image(label="🖼 Output Vectorized Image")
122
- download_button = gr.File(label="⬇️ Download JSON", visible=True)
123
- output_json = gr.JSON(label="🧾 Output JSON")
124
 
125
  error_output = gr.Textbox(label="Error Message", visible=False)
126
 
127
- # Logic binding
128
- # run_button.click(
129
- # predict,
130
- # inputs=[uploaded_file],
131
- # outputs=[output_image, output_json, error_output, download_button, uploaded_image_display]
132
- # )
133
-
134
  run_button.click(
135
  lambda x: (x, gr.update(interactive=False)),
136
  inputs=[uploaded_file],
 
5
  import json
6
  import shutil
7
  import gdown
 
8
  from PIL import Image
 
9
 
10
+ print("Gradio App Starting...")
 
 
 
 
11
 
12
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
13
 
 
28
  os.makedirs(JSON_DIR, exist_ok=True)
29
  os.makedirs(OUTPUT_DIR, exist_ok=True)
30
 
 
 
 
 
 
 
 
 
31
  # Download model if missing
32
  if not os.path.exists(model_path):
33
+ print("Model file not found! Downloading...")
34
  try:
35
  gdown.download(GDRIVE_URL, model_path, quiet=False, use_cookies=False)
36
+ print("Model downloaded successfully.")
37
  except Exception as e:
38
+ print(f"Failed to download model: {e}")
39
 
40
  # Import model
41
  sys.path.append(MODEL_DIR)
 
43
 
44
  cfg = write_config()
45
 
 
 
 
 
46
  def predict(uploaded_file_path):
 
47
  if uploaded_file_path is None:
48
  return None, None, "No file uploaded.", None
49
 
50
+ # Save uploaded file to tmp folder
51
  uploaded_path = os.path.join(UPLOAD_DIR, "input_image.png")
52
  shutil.copy(uploaded_file_path, uploaded_path)
53
 
 
76
 
77
  return result_img, json.dumps(result_json, indent=2), None, download_json_path, uploaded_path
78
 
 
 
 
 
79
  with gr.Blocks() as demo:
 
80
  with gr.Row():
81
  gr.Markdown(
82
  f"""
83
  <div style='display: flex; align-items: center; justify-content: center;'>
 
84
  <h1>Inovonics 2D Floorplan Vectorizer</h1>
85
  </div>
86
  """
 
93
  run_button = gr.Button("Run Vectorizer 🔥")
94
 
95
  with gr.Column():
96
+ output_image = gr.Image(label="Output Vectorized Image")
97
+ download_button = gr.File(label="Download JSON", visible=True)
98
+ output_json = gr.JSON(label="Output JSON")
99
 
100
  error_output = gr.Textbox(label="Error Message", visible=False)
101
 
 
 
 
 
 
 
 
102
  run_button.click(
103
  lambda x: (x, gr.update(interactive=False)),
104
  inputs=[uploaded_file],
app_streamlit.py DELETED
@@ -1,200 +0,0 @@
1
- import streamlit as st
2
- import json
3
- import time
4
- from PIL import Image
5
- import os
6
- import sys
7
- import shutil
8
- import gdown
9
- from io import BytesIO
10
-
11
- # ==================================
12
- # SETUP
13
- # ==================================
14
-
15
- print("🚀 Streamlit App Starting...")
16
-
17
- BASE_DIR = os.path.dirname(os.path.abspath(__file__))
18
-
19
- # Setup Paths
20
- UPLOAD_DIR = "/tmp/uploads/"
21
- MODEL_DIR = os.path.join(BASE_DIR, "rcnn_model", "scripts")
22
- JSON_DIR = "/tmp/results/"
23
- OUTPUT_DIR = "/tmp/output/"
24
- SAMPLE_DIR = os.path.join(BASE_DIR, "rcnn_model", "sample")
25
- logo_path = os.path.join(BASE_DIR, "public", "logo.png")
26
- model_path = os.path.join(OUTPUT_DIR, "model_final.pth")
27
-
28
- # Google Drive file download link
29
- GOOGLE_DRIVE_FILE_ID = "1yr64AOgaYZPTcQzG6cxG6lWBENHR9qjW"
30
- GDRIVE_URL = f"https://drive.google.com/uc?id={GOOGLE_DRIVE_FILE_ID}"
31
-
32
- # Create necessary folders
33
- os.makedirs(UPLOAD_DIR, exist_ok=True)
34
- os.makedirs(JSON_DIR, exist_ok=True)
35
- os.makedirs(OUTPUT_DIR, exist_ok=True)
36
-
37
- # ==================================
38
- # DOWNLOAD MODEL IF MISSING
39
- # ==================================
40
-
41
- if not os.path.exists(model_path):
42
- print("🚀 Model file not found! Downloading from Google Drive...")
43
- try:
44
- gdown.download(GDRIVE_URL, model_path, quiet=False)
45
- print("✅ Model downloaded successfully.")
46
- except Exception as e:
47
- print(f"❌ Failed to download model: {e}")
48
-
49
- # ==================================
50
- # IMPORT MODEL RUNNER
51
- # ==================================
52
-
53
- sys.path.append(MODEL_DIR)
54
- from rcnn_model.scripts.rcnn_run import main, write_config
55
-
56
- # ==================================
57
- # PAGE CONFIG
58
- # ==================================
59
-
60
- st.set_page_config(
61
- page_title="2D Floorplan Vectorizer",
62
- layout="wide",
63
- initial_sidebar_state="collapsed"
64
- )
65
-
66
- # ==================================
67
- # HEADER
68
- # ==================================
69
-
70
- st.image(logo_path, width=250)
71
- st.markdown("<div class='header-title'>2D Floorplan Vectorizer</div>", unsafe_allow_html=True)
72
-
73
- # ==================================
74
- # FILE UPLOAD SECTION
75
- # ==================================
76
-
77
- st.subheader("Upload your Floorplan Image")
78
- uploaded_file = st.file_uploader("Choose an image", type=["png", "jpg", "jpeg"])
79
-
80
- # Initialize session state
81
- if "processing_complete" not in st.session_state:
82
- st.session_state.processing_complete = False
83
- if "json_output" not in st.session_state:
84
- st.session_state.json_output = None
85
-
86
- # ==================================
87
- # IMAGE + JSON Layout
88
- # ==================================
89
-
90
- col1, col2 = st.columns([1, 2])
91
-
92
- # ==================================
93
- # MAIN LOGIC
94
- # ==================================
95
-
96
- if uploaded_file is not None:
97
- print("📤 File Uploaded:", uploaded_file.name)
98
-
99
- image_bytes = uploaded_file.read()
100
- img = Image.open(BytesIO(image_bytes)).convert("RGB")
101
-
102
- uploaded_path = os.path.join(UPLOAD_DIR, uploaded_file.name)
103
- with open(uploaded_path, "wb") as f:
104
- f.write(uploaded_file.getbuffer())
105
- print("✅ Uploaded file saved at:", uploaded_path)
106
-
107
- with col1:
108
- st.markdown("<div class='upload-container'>", unsafe_allow_html=True)
109
- st.image(Image.open(uploaded_path), caption="Uploaded Image", use_container_width=True)
110
- st.markdown("</div>", unsafe_allow_html=True)
111
-
112
- with col2:
113
- if not st.session_state.processing_complete:
114
- status_placeholder = st.empty()
115
- status_placeholder.info("⏳ Model is processing the uploaded image...")
116
- progress_bar = st.progress(0)
117
- status_text = st.empty()
118
-
119
- # === 🔥 Model Run Here ===
120
- input_image = uploaded_path
121
- output_json_name = uploaded_file.name.replace(".png", "_result.json").replace(".jpg", "_result.json").replace(".jpeg", "_result.json")
122
- output_image_name = uploaded_file.name.replace(".png", "_result.png").replace(".jpg", "_result.png").replace(".jpeg", "_result.png")
123
-
124
- output_json_path = os.path.join(JSON_DIR, output_json_name)
125
- output_image_path = os.path.join(JSON_DIR, output_image_name)
126
-
127
- cfg = write_config()
128
- print("⚙️ Model config created. Running model...")
129
-
130
- # Simulate progress
131
- for i in range(1, 30):
132
- time.sleep(0.01)
133
- progress_bar.progress(i)
134
- status_text.text(f"Preprocessing: {i}%")
135
-
136
- # Run model
137
- main(cfg, input_image, output_json_path, output_image_path)
138
- print("✅ Model run complete.")
139
-
140
- while not os.path.exists(output_json_path):
141
- print("Waiting for JSON output...")
142
- time.sleep(0.5)
143
-
144
- for i in range(30, 100):
145
- time.sleep(0.01)
146
- progress_bar.progress(i)
147
- status_text.text(f"Postprocessing: {i}%")
148
-
149
- progress_bar.empty()
150
- status_text.text("✅ Processing Complete!")
151
- status_placeholder.success("✅ Model finished and JSON is ready!")
152
-
153
- # Read generated JSON
154
- if os.path.exists(output_json_path):
155
- with open(output_json_path, "r") as jf:
156
- st.session_state.json_output = json.load(jf)
157
- print("📄 JSON Output Loaded Successfully.")
158
- else:
159
- st.session_state.json_output = {"error": "JSON output not generated."}
160
- print("❌ JSON output missing.")
161
-
162
- st.session_state.processing_complete = True
163
-
164
- # ==================================
165
- # DISPLAY OUTPUTS
166
- # ==================================
167
-
168
- out_col1, out_col2 = st.columns(2)
169
-
170
- with out_col1:
171
- if os.path.exists(output_image_path):
172
- with open(output_image_path, "rb") as img_file:
173
- image = Image.open(img_file)
174
- st.image(image, caption="🖼 Output Vectorized Image", use_container_width=True)
175
-
176
- img_file.seek(0)
177
- st.download_button(
178
- label="Download Output Image",
179
- data=img_file,
180
- file_name="floorplan_output.png",
181
- mime="image/png"
182
- )
183
-
184
- if os.path.exists(output_json_path):
185
- json_str = json.dumps(st.session_state.json_output, indent=4)
186
- st.download_button(
187
- label="Download JSON",
188
- data=json_str,
189
- file_name="floorplan_output.json",
190
- mime="application/json"
191
- )
192
-
193
- with out_col2:
194
- st.markdown("<div class='json-container'>", unsafe_allow_html=True)
195
- st.json(st.session_state.json_output)
196
- st.markdown("</div>", unsafe_allow_html=True)
197
-
198
- else:
199
- st.warning("⚠️ No image uploaded yet.")
200
- st.session_state.processing_complete = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
rcnn_model/extraction/floorplan_sampler.py CHANGED
@@ -13,13 +13,6 @@ import pylab
13
  pylab.rcParams['figure.figsize'] = (128.0, 160.0)
14
  from rcnn_model.utils.floorplan_vectorizer_utils import get_image_size, draw_from_coco
15
 
16
-
17
- # sys.path.append(str(from_root("utils")))
18
- # from floorplan_vectorizer_utils import get_image_size, draw_from_coco
19
-
20
-
21
- ### Main functionality ###
22
-
23
  data_directory_root = str(from_root("dataset/"))+"/"
24
  category_filter = [2]
25
  image_sample_room_count_threshold = 4
 
13
  pylab.rcParams['figure.figsize'] = (128.0, 160.0)
14
  from rcnn_model.utils.floorplan_vectorizer_utils import get_image_size, draw_from_coco
15
 
 
 
 
 
 
 
 
16
  data_directory_root = str(from_root("dataset/"))+"/"
17
  category_filter = [2]
18
  image_sample_room_count_threshold = 4
rcnn_model/extraction/svg_to_json.py CHANGED
@@ -13,13 +13,6 @@ from from_root import from_root
13
  from rcnn_model.preprocessing.cleaning_single_image import preprocess_image
14
  from rcnn_model.utils.floorplan_vectorizer_utils import get_image_size, draw_from_coco
15
 
16
-
17
-
18
- # sys.path.append(str(from_root("preprocessing")))
19
- # from cleaning_images import preprocessing
20
- # sys.path.append(str(from_root("utils")))
21
- # from floorplan_vectorizer_utils import get_image_size, draw_from_coco
22
-
23
  ### After running, its split with https://github.com/akarazniewicz/cocosplit
24
  ### This may or may not be temporary
25
 
 
13
  from rcnn_model.preprocessing.cleaning_single_image import preprocess_image
14
  from rcnn_model.utils.floorplan_vectorizer_utils import get_image_size, draw_from_coco
15
 
 
 
 
 
 
 
 
16
  ### After running, its split with https://github.com/akarazniewicz/cocosplit
17
  ### This may or may not be temporary
18
 
rcnn_model/preprocessing/cleaning_single_image.py CHANGED
@@ -1,37 +1,20 @@
1
- # single_image_cleaning.py
2
-
3
  import cv2
4
  import numpy as np
5
- import os
6
 
7
  def preprocess_image(image_path):
8
- """
9
- Preprocess a single floorplan image: denoising, CLAHE, edge enhancement.
10
- """
11
- print(f"🧹 Preprocessing image: {image_path}")
12
 
13
  image = cv2.imread(image_path)
14
  if image is None:
15
- print(f"Error: Could not read image from {image_path}")
16
  return None
17
-
18
- # Convert to grayscale
19
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
20
-
21
- # Apply Gaussian blur
22
  denoisy_img = cv2.GaussianBlur(gray, (5, 5), 0)
23
-
24
- # Apply CLAHE (Contrast Limited Adaptive Histogram Equalization)
25
  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
26
  enhanced = clahe.apply(denoisy_img)
27
-
28
- # Apply threshold
29
  _, thresholded = cv2.threshold(enhanced, 150, 255, cv2.THRESH_BINARY)
30
-
31
- # Detect edges
32
  edges = cv2.Canny(thresholded, 100, 220, apertureSize=3)
33
-
34
- # Detect lines and draw them
35
  output_img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
36
  lines = cv2.HoughLinesP(edges, rho=1, theta=np.pi / 180, threshold=50,
37
  minLineLength=35, maxLineGap=5)
@@ -41,8 +24,7 @@ def preprocess_image(image_path):
41
  x1, y1, x2, y2 = line[0]
42
  cv2.line(output_img, (x1, y1), (x2, y2), (210, 210, 210), 1)
43
 
44
- # Blend the original image with line-enhanced version
45
  blended_image = cv2.addWeighted(image, 0.7, output_img, 0.3, 0)
46
 
47
- print(f"Preprocessing complete for: {image_path}")
48
  return blended_image
 
1
+ #This file is a version of the original cleaning_images.py fiele, but it has been modified to only process a single image.
 
2
  import cv2
3
  import numpy as np
 
4
 
5
  def preprocess_image(image_path):
6
+ print(f" Preprocessing image: {image_path}")
 
 
 
7
 
8
  image = cv2.imread(image_path)
9
  if image is None:
10
+ print(f"Error: Could not read image from {image_path}")
11
  return None
 
 
12
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
 
 
13
  denoisy_img = cv2.GaussianBlur(gray, (5, 5), 0)
 
 
14
  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
15
  enhanced = clahe.apply(denoisy_img)
 
 
16
  _, thresholded = cv2.threshold(enhanced, 150, 255, cv2.THRESH_BINARY)
 
 
17
  edges = cv2.Canny(thresholded, 100, 220, apertureSize=3)
 
 
18
  output_img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
19
  lines = cv2.HoughLinesP(edges, rho=1, theta=np.pi / 180, threshold=50,
20
  minLineLength=35, maxLineGap=5)
 
24
  x1, y1, x2, y2 = line[0]
25
  cv2.line(output_img, (x1, y1), (x2, y2), (210, 210, 210), 1)
26
 
 
27
  blended_image = cv2.addWeighted(image, 0.7, output_img, 0.3, 0)
28
 
29
+ print(f"Preprocessing complete for: {image_path}")
30
  return blended_image
rcnn_model/preprocessing/svg_to_yolo.py CHANGED
@@ -19,7 +19,6 @@ def extract_svg_elements(svg_file):
19
  svg_width = float(root.get("width", "1"))
20
  svg_height = float(root.get("height", "1"))
21
 
22
- # floorplans = {}
23
  floorplans = {"Door": [], "Window": [], "Space": []}
24
  for floorplan in root.findall(".//svg:g[@class]", namespaces=namespace):
25
  class_attr = floorplan.get("class", "").strip()
@@ -54,11 +53,8 @@ def extract_svg_elements(svg_file):
54
  return floorplans, svg_width, svg_height
55
 
56
  def get_bounding_box(polygons, svg_width, svg_height):
57
- """Compute YOLO bounding box from polygon points."""
58
  all_x, all_y = [], []
59
  print(polygons)
60
- # for polygon in polygons:
61
- # print(polygon)
62
  points = polygons.strip().split(" ")
63
  for point in points:
64
  x, y = map(float, point.split(","))
@@ -78,7 +74,6 @@ def get_bounding_box(polygons, svg_width, svg_height):
78
  return (x_center, y_center, width, height)
79
 
80
  def save_yolo_annotations(floorplans, output_dir, filename):
81
- """Save extracted bounding boxes in YOLO format."""
82
  os.makedirs("dataset", exist_ok=True)
83
  os.makedirs(output_dir, exist_ok=True)
84
 
 
19
  svg_width = float(root.get("width", "1"))
20
  svg_height = float(root.get("height", "1"))
21
 
 
22
  floorplans = {"Door": [], "Window": [], "Space": []}
23
  for floorplan in root.findall(".//svg:g[@class]", namespaces=namespace):
24
  class_attr = floorplan.get("class", "").strip()
 
53
  return floorplans, svg_width, svg_height
54
 
55
  def get_bounding_box(polygons, svg_width, svg_height):
 
56
  all_x, all_y = [], []
57
  print(polygons)
 
 
58
  points = polygons.strip().split(" ")
59
  for point in points:
60
  x, y = map(float, point.split(","))
 
74
  return (x_center, y_center, width, height)
75
 
76
  def save_yolo_annotations(floorplans, output_dir, filename):
 
77
  os.makedirs("dataset", exist_ok=True)
78
  os.makedirs(output_dir, exist_ok=True)
79
 
rcnn_model/scripts/rcnn_config.py CHANGED
@@ -26,8 +26,6 @@ def write_config():
26
  cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
27
  cfg.MODEL.ROI_MASK_HEAD.NUM_CONV = 3
28
  cfg.TEST.DETECTIONS_PER_IMAGE = 120
29
- # Added this extra line
30
- # cfg.OUTPUT_DIR = str(from_root("rcnn_model/output"))
31
 
32
  return cfg
33
 
 
26
  cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
27
  cfg.MODEL.ROI_MASK_HEAD.NUM_CONV = 3
28
  cfg.TEST.DETECTIONS_PER_IMAGE = 120
 
 
29
 
30
  return cfg
31
 
rcnn_model/scripts/rcnn_eval.py CHANGED
@@ -12,14 +12,10 @@ from detectron2.utils.visualizer import Visualizer
12
  import random
13
  import matplotlib.pyplot as plt
14
  import time
15
- from rcnn_config import write_config
16
  from from_root import from_root
17
  from rcnn_model.utils.floorplan_vectorizer_utils import check_image_size_thresh
18
 
19
 
20
- # sys.path.append(str(from_root("utils")))
21
- # from floorplan_vectorizer_utils import check_image_size_thresh
22
-
23
  results_directory = str(from_root("results"))+"/"
24
  max_image_size = 700*500
25
 
 
12
  import random
13
  import matplotlib.pyplot as plt
14
  import time
 
15
  from from_root import from_root
16
  from rcnn_model.utils.floorplan_vectorizer_utils import check_image_size_thresh
17
 
18
 
 
 
 
19
  results_directory = str(from_root("results"))+"/"
20
  max_image_size = 700*500
21
 
rcnn_model/scripts/rcnn_run.py CHANGED
@@ -6,24 +6,11 @@ import matplotlib.pyplot as plt
6
  import torch
7
  from datetime import datetime
8
  from rcnn_config import write_config
9
- import sys
10
  from from_root import from_root
11
  from rcnn_model.preprocessing.cleaning_single_image import preprocess_image
12
  from rcnn_model.utils.floorplan_vectorizer_utils import draw_from_coco, bitmask_to_polygon
13
  from rcnn_model.extraction.annotation_builder import AnnotationBuilder as AnnBuild
14
 
15
-
16
-
17
- # sys.path.append(str(from_root("preprocessing")))
18
- # from cleaning_images import preprocessing
19
- # sys.path.append(str(from_root("utils")))
20
- # from floorplan_vectorizer_utils import draw_from_coco, bitmask_to_polygon
21
- # sys.path.append(str(from_root("dataset/extraction_scripts")))
22
- # from annotation_builder import AnnotationBuilder as AnnBuild
23
-
24
- # results_directory = str(from_root("results"))+"/"
25
- # sample_data_directory = str(from_root("models/rcnn/sample_data"))+"/"
26
-
27
  results_directory = "/tmp/results/"
28
  sample_data_directory = "rcnn_model/sample/"
29
  model_directory = "/tmp/output/"
@@ -34,15 +21,13 @@ def main(cfg,img_source_path, coco_dest_filename, val_img_dest_filename):
34
  print("Inside main: " + img_source_path)
35
  #configure model
36
  cfg.DATALOADER.NUM_WORKERS = 1
37
- cfg.SOLVER.IMS_PER_BATCH = 1
38
- # Changed this file here
39
  cfg.MODEL.WEIGHTS = os.path.join(model_directory, "model_final.pth")
40
  cfg.MODEL.DEVICE = "cpu"
41
  predictor = DefaultPredictor(cfg)
42
 
43
  #run
44
  prediction_runner(img_source_path, results_directory+coco_dest_filename, results_directory+val_img_dest_filename, predictor)
45
- #prediction_runner(img_source_path, results_directory+coco_dest_filename, results_directory+val_img_dest_filename, predictor, segmented_prediction=True, scale_factor=.5)
46
  print("SAVED to "+results_directory+coco_dest_filename+" and "+results_directory+val_img_dest_filename)
47
 
48
 
@@ -174,4 +159,5 @@ if __name__ == "__main__":
174
  print("Inside Main Function Call")
175
  cfg = write_config()
176
  run_index=-1
 
177
  main(cfg,sample_data_directory+"F1_original.png","cubicasa_result_run_"+str(run_index)+".json","cubicasa_result_run_"+str(run_index)+".png")
 
6
  import torch
7
  from datetime import datetime
8
  from rcnn_config import write_config
 
9
  from from_root import from_root
10
  from rcnn_model.preprocessing.cleaning_single_image import preprocess_image
11
  from rcnn_model.utils.floorplan_vectorizer_utils import draw_from_coco, bitmask_to_polygon
12
  from rcnn_model.extraction.annotation_builder import AnnotationBuilder as AnnBuild
13
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  results_directory = "/tmp/results/"
15
  sample_data_directory = "rcnn_model/sample/"
16
  model_directory = "/tmp/output/"
 
21
  print("Inside main: " + img_source_path)
22
  #configure model
23
  cfg.DATALOADER.NUM_WORKERS = 1
24
+ cfg.SOLVER.IMS_PER_BATCH = 1
 
25
  cfg.MODEL.WEIGHTS = os.path.join(model_directory, "model_final.pth")
26
  cfg.MODEL.DEVICE = "cpu"
27
  predictor = DefaultPredictor(cfg)
28
 
29
  #run
30
  prediction_runner(img_source_path, results_directory+coco_dest_filename, results_directory+val_img_dest_filename, predictor)
 
31
  print("SAVED to "+results_directory+coco_dest_filename+" and "+results_directory+val_img_dest_filename)
32
 
33
 
 
159
  print("Inside Main Function Call")
160
  cfg = write_config()
161
  run_index=-1
162
+ #Sample run for a single image
163
  main(cfg,sample_data_directory+"F1_original.png","cubicasa_result_run_"+str(run_index)+".json","cubicasa_result_run_"+str(run_index)+".png")
rcnn_model/utils/coco_to_inovonics_json.py CHANGED
@@ -2,7 +2,6 @@
2
  from pycocotools.coco import COCO
3
  import json
4
  from inovonics_ann_builder import InovonicsAnnotationBuilder as InovAnnBuild
5
- from from_root import from_root
6
 
7
  def main(coco_source_path, inovonics_anns_dest_path, img_ids=[1]):
8
  coco = COCO(coco_source_path)
 
2
  from pycocotools.coco import COCO
3
  import json
4
  from inovonics_ann_builder import InovonicsAnnotationBuilder as InovAnnBuild
 
5
 
6
  def main(coco_source_path, inovonics_anns_dest_path, img_ids=[1]):
7
  coco = COCO(coco_source_path)
rcnn_model/utils/floorplan_vectorizer_utils.py CHANGED
@@ -39,7 +39,6 @@ def draw_from_coco(id,coco,annotated_img_dest_path,category_filter = [0,1,2,3],
39
 
40
  def get_blank_image(width,height):
41
  blank = io.imread(str(from_root("models/rcnn/westmoor_check/white_bg.png")))
42
- #return cv2.resize(blank, (0,0), fx=width, fy=height)
43
  return blank[:width,:height,:]
44
 
45
 
 
39
 
40
  def get_blank_image(width,height):
41
  blank = io.imread(str(from_root("models/rcnn/westmoor_check/white_bg.png")))
 
42
  return blank[:width,:height,:]
43
 
44
 
rcnn_model/utils/inovonics_ann_builder.py CHANGED
@@ -1,6 +1,4 @@
1
  import json
2
- import numpy as np
3
- from datetime import datetime
4
  import random
5
 
6
  version_number = "0.0.1"
 
1
  import json
 
 
2
  import random
3
 
4
  version_number = "0.0.1"