sahadev10 commited on
Commit
4205306
·
verified ·
1 Parent(s): 0b4b741

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -57
app.py CHANGED
@@ -124,44 +124,24 @@
124
  # demo.launch()
125
 
126
 
127
-
128
- import torch
129
- import numpy as np
130
  import cv2
 
131
  import json
132
- import os
133
- import gradio as gr
134
- from detectron2.detectron2.engine import DefaultPredictor
135
- from detectron2.detectron2.config import get_cfg
136
- from detectron2 import model_zoo
137
- import torch_utils
138
- import dnnlib
139
- import requests
140
  import base64
 
141
  from io import BytesIO
142
 
143
- # Create output directory if it doesn't exist
144
- output_dir = "key/"
145
- os.makedirs(output_dir, exist_ok=True)
146
- output_file = os.path.join(output_dir, "keypoints.json")
147
-
148
- # Load pre-trained Keypoint R-CNN model
149
- cfg = get_cfg()
150
- cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
151
- cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
152
- cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
153
-
154
- # Load the predictor
155
- predictor = DefaultPredictor(cfg)
156
-
157
- # Replace with your actual Ngrok or backend URL
158
- NGROK_URL = "https://your-ngrok-url.ngrok.io/measurements" # Change this
159
-
160
  def process_image(image, user_height_cm):
 
 
 
 
161
  # Convert Gradio image input to OpenCV format
162
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
163
 
164
- # Run keypoint detection
165
  outputs = predictor(image)
166
 
167
  # Extract keypoints
@@ -172,6 +152,7 @@ def process_image(image, user_height_cm):
172
  return "No keypoints detected.", None
173
 
174
  # Save keypoints to JSON
 
175
  with open(output_file, "w") as f:
176
  json.dump({"keypoints": keypoints}, f, indent=4)
177
 
@@ -205,10 +186,18 @@ def process_image(image, user_height_cm):
205
  ankle_mid = ((keypoints[L_ANKLE] + keypoints[R_ANKLE]) / 2).tolist()
206
  pixel_height = get_distance(keypoints[NOSE], ankle_mid)
207
 
 
 
 
 
208
  # Estimated full body height (add approx head length)
209
  estimated_full_pixel_height = pixel_height / 0.87 # Since 87% = nose to ankle
210
  pixels_per_cm = estimated_full_pixel_height / user_height_cm
211
 
 
 
 
 
212
  # Waist and shoulder measurements
213
  shoulder_width_px = get_distance(keypoints[L_SHOULDER], keypoints[R_SHOULDER])
214
  waist_width_px = get_distance(keypoints[L_HIP], keypoints[R_HIP])
@@ -253,13 +242,13 @@ def save_to_database(measurements, image, user_height_cm):
253
 
254
  # Convert image to base64
255
  buffered = BytesIO()
256
- pil_image = Image.fromarray(image)
257
  pil_image.save(buffered, format="JPEG")
258
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
259
 
260
- # Send POST request
261
  response = requests.post(
262
- "https://9dbc-210-212-162-140.ngrok-free.app/upload", # Change this
263
  json={
264
  "imageBase64": img_str,
265
  "heightCm": user_height_cm,
@@ -273,28 +262,25 @@ def save_to_database(measurements, image, user_height_cm):
273
 
274
  return f"Status: {response.status_code}, Message: {response.text}"
275
 
276
- # Gradio Interface
277
- def create_interface():
278
- with gr.Blocks() as demo:
279
- with gr.Row():
280
- img_input = gr.Image(type="pil", label="Upload Image")
281
- height_input = gr.Number(label="User Height (cm)")
282
-
283
- with gr.Row():
284
- output_measurements = gr.JSON(label="Measurements")
285
- output_image = gr.Image(type="pil", label="Keypoint Overlay")
286
-
287
- save_btn = gr.Button("Save to Database")
288
- save_status = gr.Textbox(label="Save Status", interactive=False)
289
-
290
- img_input.change(fn=process_image, inputs=[img_input, height_input], outputs=[output_measurements, output_image])
291
- save_btn.click(
292
- fn=save_to_database,
293
- inputs=[output_measurements, output_image, height_input],
294
- outputs=save_status
295
- )
296
-
297
- return demo
298
-
299
- demo = create_interface()
300
- demo.launch()
 
124
  # demo.launch()
125
 
126
 
127
+ from PIL import Image # Importing Image from PIL (Pillow)
 
 
128
  import cv2
129
+ import numpy as np
130
  import json
 
 
 
 
 
 
 
 
131
  import base64
132
+ import requests
133
  from io import BytesIO
134
 
135
+ # Function to process the image and calculate measurements
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  def process_image(image, user_height_cm):
137
+ # Check if the user height is valid
138
+ if user_height_cm == 0 or user_height_cm is None:
139
+ return "User height cannot be zero or None. Please provide a valid height.", None
140
+
141
  # Convert Gradio image input to OpenCV format
142
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
143
 
144
+ # Run keypoint detection (using your predictor)
145
  outputs = predictor(image)
146
 
147
  # Extract keypoints
 
152
  return "No keypoints detected.", None
153
 
154
  # Save keypoints to JSON
155
+ output_file = "keypoints.json"
156
  with open(output_file, "w") as f:
157
  json.dump({"keypoints": keypoints}, f, indent=4)
158
 
 
186
  ankle_mid = ((keypoints[L_ANKLE] + keypoints[R_ANKLE]) / 2).tolist()
187
  pixel_height = get_distance(keypoints[NOSE], ankle_mid)
188
 
189
+ # Check if pixel height is non-zero to avoid divide by zero error
190
+ if pixel_height == 0:
191
+ return "Error in calculating pixel height. Please check the input image.", None
192
+
193
  # Estimated full body height (add approx head length)
194
  estimated_full_pixel_height = pixel_height / 0.87 # Since 87% = nose to ankle
195
  pixels_per_cm = estimated_full_pixel_height / user_height_cm
196
 
197
+ # Ensure pixels_per_cm is valid (non-zero)
198
+ if pixels_per_cm == 0:
199
+ return "Error in calculating pixels per cm. Ensure the user height is correct.", None
200
+
201
  # Waist and shoulder measurements
202
  shoulder_width_px = get_distance(keypoints[L_SHOULDER], keypoints[R_SHOULDER])
203
  waist_width_px = get_distance(keypoints[L_HIP], keypoints[R_HIP])
 
242
 
243
  # Convert image to base64
244
  buffered = BytesIO()
245
+ pil_image = Image.fromarray(image) # Convert to PIL Image from NumPy array
246
  pil_image.save(buffered, format="JPEG")
247
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
248
 
249
+ # Send POST request to save measurements and image to the database
250
  response = requests.post(
251
+ "https://9dbc-210-212-162-140.ngrok-free.app/upload", # Replace with your actual URL
252
  json={
253
  "imageBase64": img_str,
254
  "heightCm": user_height_cm,
 
262
 
263
  return f"Status: {response.status_code}, Message: {response.text}"
264
 
265
+ # Example usage (integrating with your Gradio app)
266
+ import gradio as gr
267
+
268
+ # Gradio interface setup
269
+ def process_and_save(image, user_height_cm):
270
+ measurements, processed_image = process_image(image, user_height_cm)
271
+ if measurements:
272
+ save_message = save_to_database(measurements, processed_image, user_height_cm)
273
+ return processed_image, measurements, save_message
274
+ else:
275
+ return None, None, "Error in processing image."
276
+
277
+ # Gradio interface setup
278
+ iface = gr.Interface(
279
+ fn=process_and_save,
280
+ inputs=[gr.Image(type="numpy"), gr.Number(label="User Height (cm)")],
281
+ outputs=[gr.Image(type="numpy"), gr.JSON(), gr.Textbox()],
282
+ live=True
283
+ )
284
+
285
+ iface.launch(share=True)
286
+