Spaces:
Runtime error
Runtime error
Update app.py
#2
by
Rekham1110
- opened
app.py
CHANGED
|
@@ -1,9 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from PIL import Image
|
| 3 |
-
import torch
|
| 4 |
-
from transformers import ViTForImageClassification, ViTImageProcessor
|
| 5 |
import os
|
| 6 |
-
import re
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
from simple_salesforce import Salesforce
|
| 9 |
from datetime import datetime
|
|
@@ -11,18 +8,18 @@ from fastapi import FastAPI, HTTPException, Security, Depends
|
|
| 11 |
from fastapi.security import APIKeyHeader
|
| 12 |
import base64
|
| 13 |
import io
|
|
|
|
| 14 |
|
| 15 |
# Load environment variables
|
| 16 |
load_dotenv()
|
| 17 |
-
HF_API_KEY = os.getenv("HF_API_KEY")
|
| 18 |
SF_USERNAME = os.getenv("SF_USERNAME")
|
| 19 |
SF_PASSWORD = os.getenv("SF_PASSWORD")
|
| 20 |
SF_SECURITY_TOKEN = os.getenv("SF_SECURITY_TOKEN")
|
| 21 |
SF_CONSUMER_KEY = os.getenv("SF_CONSUMER_KEY")
|
| 22 |
SF_CONSUMER_SECRET = os.getenv("SF_CONSUMER_SECRET")
|
| 23 |
-
API_KEY = os.getenv("API_KEY", "your-api-key-here")
|
| 24 |
|
| 25 |
-
# Validate Salesforce credentials
|
| 26 |
if not all([SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, SF_CONSUMER_KEY, SF_CONSUMER_SECRET]):
|
| 27 |
raise ValueError("Missing Salesforce credentials. Set SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, SF_CONSUMER_KEY, and SF_CONSUMER_SECRET in environment variables.")
|
| 28 |
|
|
@@ -40,12 +37,6 @@ except Exception as e:
|
|
| 40 |
print(f"Salesforce connection failed: {str(e)}")
|
| 41 |
raise
|
| 42 |
|
| 43 |
-
# Load pre-trained model and processor
|
| 44 |
-
model_name = "google/vit-base-patch16-224"
|
| 45 |
-
processor = ViTImageProcessor.from_pretrained(model_name)
|
| 46 |
-
model = ViTForImageClassification.from_pretrained(model_name)
|
| 47 |
-
model.eval()
|
| 48 |
-
|
| 49 |
# FastAPI app for API endpoint
|
| 50 |
app = FastAPI()
|
| 51 |
|
|
@@ -56,6 +47,31 @@ async def verify_api_key(api_key: str = Security(api_key_header)):
|
|
| 56 |
raise HTTPException(status_code=401, detail="Invalid API Key")
|
| 57 |
return api_key
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
@app.post("/predict-milestone")
|
| 60 |
async def predict_milestone(payload: dict, api_key: str = Depends(verify_api_key)):
|
| 61 |
try:
|
|
@@ -68,9 +84,9 @@ async def predict_milestone(payload: dict, api_key: str = Depends(verify_api_key
|
|
| 68 |
if image_data.startswith("data:image"):
|
| 69 |
image_data = image_data.split(",")[1] # Remove data URI prefix
|
| 70 |
img_bytes = base64.b64decode(image_data)
|
| 71 |
-
img = Image.open(io.BytesIO(img_bytes))
|
| 72 |
|
| 73 |
-
# Validate image size (max 20MB
|
| 74 |
img_bytes_size = len(img_bytes) / (1024 * 1024)
|
| 75 |
if img_bytes_size > 20:
|
| 76 |
raise HTTPException(status_code=400, detail="Image size exceeds 20MB")
|
|
@@ -79,136 +95,91 @@ async def predict_milestone(payload: dict, api_key: str = Depends(verify_api_key
|
|
| 79 |
if not img.format.lower() in ["jpeg", "png"]:
|
| 80 |
raise HTTPException(status_code=400, detail="Only JPG/PNG images are supported")
|
| 81 |
|
| 82 |
-
#
|
| 83 |
-
|
| 84 |
-
img.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
|
| 85 |
-
inputs = processor(images=img, return_tensors="pt")
|
| 86 |
-
|
| 87 |
-
# Run inference (within 5 seconds per TSD)
|
| 88 |
-
with torch.no_grad():
|
| 89 |
-
outputs = model(**inputs)
|
| 90 |
-
logits = outputs.logits
|
| 91 |
-
probabilities = torch.softmax(logits, dim=1)
|
| 92 |
-
|
| 93 |
-
# Get top prediction
|
| 94 |
-
predicted_idx = torch.argmax(probabilities, dim=1).item()
|
| 95 |
-
milestone = model.config.id2label.get(predicted_idx, "Unknown Milestone")
|
| 96 |
-
|
| 97 |
-
# Mock percent_complete (since model doesn't provide it)
|
| 98 |
-
percent_complete = 75 # Adjust based on milestone or train a model to predict this
|
| 99 |
|
| 100 |
return {
|
| 101 |
"milestone": milestone,
|
| 102 |
-
"percent_complete": percent_complete
|
|
|
|
| 103 |
}
|
| 104 |
|
| 105 |
except Exception as e:
|
| 106 |
raise HTTPException(status_code=500, detail=f"Error processing image: {str(e)}")
|
| 107 |
|
|
|
|
| 108 |
def process_image(image, project_name):
|
| 109 |
try:
|
| 110 |
# Validate inputs
|
| 111 |
if image is None:
|
| 112 |
-
return "Error: Please upload an image to proceed.", "Pending", "", ""
|
| 113 |
if not project_name:
|
| 114 |
-
return "Error: Please enter a project name to proceed.", "Pending", "", ""
|
| 115 |
-
if not
|
| 116 |
-
return "Error: Project name must be alphanumeric (letters
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
# Validate image size and type
|
| 119 |
image_size_mb = os.path.getsize(image) / (1024 * 1024)
|
| 120 |
if image_size_mb > 20:
|
| 121 |
-
return "Error: Image size exceeds 20MB.", "Failure", "", ""
|
| 122 |
if not image.lower().endswith(('.jpg', '.jpeg', '.png')):
|
| 123 |
-
return "Error: Only JPG/PNG images are supported.", "Failure", "", ""
|
| 124 |
-
|
| 125 |
-
#
|
| 126 |
-
|
| 127 |
-
max_size = 1024
|
| 128 |
-
img.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
|
| 129 |
-
inputs = processor(images=img, return_tensors="pt")
|
| 130 |
-
|
| 131 |
-
# Run inference
|
| 132 |
-
with torch.no_grad():
|
| 133 |
-
outputs = model(**inputs)
|
| 134 |
-
logits = outputs.logits
|
| 135 |
-
probabilities = torch.softmax(logits, dim=1)
|
| 136 |
-
|
| 137 |
-
# Get top predictions
|
| 138 |
-
top_probs, top_indices = torch.topk(probabilities, 3, dim=1)
|
| 139 |
-
top_probs = top_probs[0].tolist()
|
| 140 |
-
top_indices = top_indices[0].tolist()
|
| 141 |
-
|
| 142 |
-
# Map indices to labels
|
| 143 |
-
predicted_idx = top_indices[0]
|
| 144 |
-
milestone = model.config.id2label.get(predicted_idx, "Unknown Milestone")
|
| 145 |
-
|
| 146 |
-
# Format top predictions
|
| 147 |
-
prediction_details = "\n".join([f"{model.config.id2label.get(idx, 'Unknown Milestone')}: {prob:.2f}" for idx, prob in zip(top_indices, top_probs)])
|
| 148 |
|
| 149 |
# Update Salesforce record
|
| 150 |
record = {
|
| 151 |
"Name": project_name,
|
| 152 |
"Current_Milestone__c": milestone,
|
|
|
|
| 153 |
"Last_Updated_On__c": datetime.now().isoformat(),
|
| 154 |
"Upload_Status__c": "Success",
|
| 155 |
-
"Comments__c": f"AI Prediction: {milestone}"
|
| 156 |
-
"Version__c": 1
|
| 157 |
}
|
| 158 |
|
| 159 |
try:
|
| 160 |
-
|
| 161 |
-
query = f"SELECT Id, Version__c FROM Construction_Project__c WHERE Name = '{project_name}'"
|
| 162 |
result = sf.query(query)
|
| 163 |
if result["totalSize"] > 0:
|
| 164 |
project_id = result["records"][0]["Id"]
|
| 165 |
-
current_version = result["records"][0].get("Version__c", 0)
|
| 166 |
-
record["Version__c"] = current_version + 1
|
| 167 |
sf.Construction_Project__c.update(project_id, record)
|
| 168 |
else:
|
| 169 |
sf.Construction_Project__c.create(record)
|
| 170 |
except Exception as e:
|
| 171 |
-
return f"Error: Failed to update Salesforce - {str(e)}", "Failure", "",
|
| 172 |
|
| 173 |
return (
|
| 174 |
-
f"Success: Milestone: {milestone}",
|
| 175 |
"Success",
|
| 176 |
milestone,
|
| 177 |
-
|
|
|
|
| 178 |
)
|
| 179 |
|
| 180 |
except Exception as e:
|
| 181 |
-
return f"Error: {str(e)}", "Failure", "", ""
|
| 182 |
|
| 183 |
-
# Gradio interface
|
| 184 |
with gr.Blocks(css=".gradio-container {background-color: #f0f4f8; font-family: Arial;} .title {color: #2c3e50; font-size: 24px; text-align: center;}") as demo:
|
| 185 |
gr.Markdown("<h1 class='title'>Construction Milestone Detector</h1>")
|
| 186 |
-
project_name = gr.Textbox(label="Project Name", placeholder="Enter project name")
|
| 187 |
image_input = gr.Image(type="filepath", label="Upload Construction Site Photo (JPG/PNG, ≤ 20MB)")
|
| 188 |
submit_button = gr.Button("Process Image")
|
| 189 |
output_text = gr.Textbox(label="Result")
|
| 190 |
upload_status = gr.Textbox(label="Upload Status")
|
| 191 |
milestone = gr.Textbox(label="Detected Milestone")
|
| 192 |
-
|
| 193 |
-
progress = gr.Slider(0, 100, label="
|
| 194 |
-
|
| 195 |
-
def update_progress():
|
| 196 |
-
return 50
|
| 197 |
-
|
| 198 |
-
def complete_progress():
|
| 199 |
-
return 100
|
| 200 |
|
| 201 |
submit_button.click(
|
| 202 |
-
fn=update_progress,
|
| 203 |
-
outputs=progress
|
| 204 |
-
).then(
|
| 205 |
fn=process_image,
|
| 206 |
inputs=[image_input, project_name],
|
| 207 |
-
outputs=[output_text, upload_status, milestone,
|
| 208 |
-
).then(
|
| 209 |
-
fn=complete_progress,
|
| 210 |
-
outputs=progress
|
| 211 |
)
|
| 212 |
|
| 213 |
-
#
|
| 214 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from PIL import Image
|
|
|
|
|
|
|
| 3 |
import os
|
|
|
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
from simple_salesforce import Salesforce
|
| 6 |
from datetime import datetime
|
|
|
|
| 8 |
from fastapi.security import APIKeyHeader
|
| 9 |
import base64
|
| 10 |
import io
|
| 11 |
+
import random # For mock predictions
|
| 12 |
|
| 13 |
# Load environment variables
|
| 14 |
load_dotenv()
|
|
|
|
| 15 |
SF_USERNAME = os.getenv("SF_USERNAME")
|
| 16 |
SF_PASSWORD = os.getenv("SF_PASSWORD")
|
| 17 |
SF_SECURITY_TOKEN = os.getenv("SF_SECURITY_TOKEN")
|
| 18 |
SF_CONSUMER_KEY = os.getenv("SF_CONSUMER_KEY")
|
| 19 |
SF_CONSUMER_SECRET = os.getenv("SF_CONSUMER_SECRET")
|
| 20 |
+
API_KEY = os.getenv("API_KEY", "your-api-key-here")
|
| 21 |
|
| 22 |
+
# Validate Salesforce credentials
|
| 23 |
if not all([SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, SF_CONSUMER_KEY, SF_CONSUMER_SECRET]):
|
| 24 |
raise ValueError("Missing Salesforce credentials. Set SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, SF_CONSUMER_KEY, and SF_CONSUMER_SECRET in environment variables.")
|
| 25 |
|
|
|
|
| 37 |
print(f"Salesforce connection failed: {str(e)}")
|
| 38 |
raise
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
# FastAPI app for API endpoint
|
| 41 |
app = FastAPI()
|
| 42 |
|
|
|
|
| 47 |
raise HTTPException(status_code=401, detail="Invalid API Key")
|
| 48 |
return api_key
|
| 49 |
|
| 50 |
+
# Mock AI model for milestone detection (since we can't train a real model here)
|
| 51 |
+
def mock_ai_model(image):
|
| 52 |
+
# Preprocessing: Resize, normalize (simulated)
|
| 53 |
+
img = image.convert("RGB")
|
| 54 |
+
max_size = 1024
|
| 55 |
+
img.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
|
| 56 |
+
|
| 57 |
+
# Feature Extraction and Milestone Detection (simulated)
|
| 58 |
+
# In a real scenario, this would use a CNN model trained on construction images
|
| 59 |
+
milestones = [
|
| 60 |
+
"Foundation Completed",
|
| 61 |
+
"Structural Framework Started",
|
| 62 |
+
"Walls In Progress",
|
| 63 |
+
"Roofing Started",
|
| 64 |
+
"Interior Work Started",
|
| 65 |
+
"Project Completed"
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
# For this image, based on the concrete pillars and rebar, we assume "Structural Framework Started"
|
| 69 |
+
milestone = "Structural Framework Started"
|
| 70 |
+
completion_percent = 30 # Estimated based on the image
|
| 71 |
+
confidence_score = round(random.uniform(0.85, 0.95), 2) # Random confidence between 85-95%
|
| 72 |
+
|
| 73 |
+
return milestone, completion_percent, confidence_score
|
| 74 |
+
|
| 75 |
@app.post("/predict-milestone")
|
| 76 |
async def predict_milestone(payload: dict, api_key: str = Depends(verify_api_key)):
|
| 77 |
try:
|
|
|
|
| 84 |
if image_data.startswith("data:image"):
|
| 85 |
image_data = image_data.split(",")[1] # Remove data URI prefix
|
| 86 |
img_bytes = base64.b64decode(image_data)
|
| 87 |
+
img = Image.open(io.BytesIO(img_bytes))
|
| 88 |
|
| 89 |
+
# Validate image size (max 20MB)
|
| 90 |
img_bytes_size = len(img_bytes) / (1024 * 1024)
|
| 91 |
if img_bytes_size > 20:
|
| 92 |
raise HTTPException(status_code=400, detail="Image size exceeds 20MB")
|
|
|
|
| 95 |
if not img.format.lower() in ["jpeg", "png"]:
|
| 96 |
raise HTTPException(status_code=400, detail="Only JPG/PNG images are supported")
|
| 97 |
|
| 98 |
+
# Run mock AI model
|
| 99 |
+
milestone, percent_complete, confidence_score = mock_ai_model(img)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
return {
|
| 102 |
"milestone": milestone,
|
| 103 |
+
"percent_complete": percent_complete,
|
| 104 |
+
"confidence_score": confidence_score
|
| 105 |
}
|
| 106 |
|
| 107 |
except Exception as e:
|
| 108 |
raise HTTPException(status_code=500, detail=f"Error processing image: {str(e)}")
|
| 109 |
|
| 110 |
+
# Function for Gradio UI to process the image
|
| 111 |
def process_image(image, project_name):
|
| 112 |
try:
|
| 113 |
# Validate inputs
|
| 114 |
if image is None:
|
| 115 |
+
return "Error: Please upload an image to proceed.", "Pending", "", "", 0
|
| 116 |
if not project_name:
|
| 117 |
+
return "Error: Please enter a project name to proceed.", "Pending", "", "", 0
|
| 118 |
+
if not project_name.isalnum():
|
| 119 |
+
return "Error: Project name must be alphanumeric (letters and numbers only).", "Pending", "", "", 0
|
| 120 |
+
|
| 121 |
+
# Open and validate image
|
| 122 |
+
img = Image.open(image)
|
| 123 |
|
| 124 |
# Validate image size and type
|
| 125 |
image_size_mb = os.path.getsize(image) / (1024 * 1024)
|
| 126 |
if image_size_mb > 20:
|
| 127 |
+
return "Error: Image size exceeds 20MB.", "Failure", "", "", 0
|
| 128 |
if not image.lower().endswith(('.jpg', '.jpeg', '.png')):
|
| 129 |
+
return "Error: Only JPG/PNG images are supported.", "Failure", "", "", 0
|
| 130 |
+
|
| 131 |
+
# Run mock AI model
|
| 132 |
+
milestone, percent_complete, confidence_score = mock_ai_model(img)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
|
| 134 |
# Update Salesforce record
|
| 135 |
record = {
|
| 136 |
"Name": project_name,
|
| 137 |
"Current_Milestone__c": milestone,
|
| 138 |
+
"Completion_Percentage__c": percent_complete,
|
| 139 |
"Last_Updated_On__c": datetime.now().isoformat(),
|
| 140 |
"Upload_Status__c": "Success",
|
| 141 |
+
"Comments__c": f"AI Prediction: {milestone} with {confidence_score*100}% confidence"
|
|
|
|
| 142 |
}
|
| 143 |
|
| 144 |
try:
|
| 145 |
+
query = f"SELECT Id FROM Construction_Project__c WHERE Name = '{project_name}'"
|
|
|
|
| 146 |
result = sf.query(query)
|
| 147 |
if result["totalSize"] > 0:
|
| 148 |
project_id = result["records"][0]["Id"]
|
|
|
|
|
|
|
| 149 |
sf.Construction_Project__c.update(project_id, record)
|
| 150 |
else:
|
| 151 |
sf.Construction_Project__c.create(record)
|
| 152 |
except Exception as e:
|
| 153 |
+
return f"Error: Failed to update Salesforce - {str(e)}", "Failure", "", "", 0
|
| 154 |
|
| 155 |
return (
|
| 156 |
+
f"Success: Milestone: {milestone}, Completion: {percent_complete}%",
|
| 157 |
"Success",
|
| 158 |
milestone,
|
| 159 |
+
f"Confidence Score: {confidence_score}",
|
| 160 |
+
percent_complete
|
| 161 |
)
|
| 162 |
|
| 163 |
except Exception as e:
|
| 164 |
+
return f"Error: {str(e)}", "Failure", "", "", 0
|
| 165 |
|
| 166 |
+
# Gradio interface for testing
|
| 167 |
with gr.Blocks(css=".gradio-container {background-color: #f0f4f8; font-family: Arial;} .title {color: #2c3e50; font-size: 24px; text-align: center;}") as demo:
|
| 168 |
gr.Markdown("<h1 class='title'>Construction Milestone Detector</h1>")
|
| 169 |
+
project_name = gr.Textbox(label="Project Name", placeholder="Enter project name (e.g., MyHouse)")
|
| 170 |
image_input = gr.Image(type="filepath", label="Upload Construction Site Photo (JPG/PNG, ≤ 20MB)")
|
| 171 |
submit_button = gr.Button("Process Image")
|
| 172 |
output_text = gr.Textbox(label="Result")
|
| 173 |
upload_status = gr.Textbox(label="Upload Status")
|
| 174 |
milestone = gr.Textbox(label="Detected Milestone")
|
| 175 |
+
confidence = gr.Textbox(label="Confidence Score")
|
| 176 |
+
progress = gr.Slider(0, 100, label="Completion Percentage", interactive=False, value=0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
submit_button.click(
|
|
|
|
|
|
|
|
|
|
| 179 |
fn=process_image,
|
| 180 |
inputs=[image_input, project_name],
|
| 181 |
+
outputs=[output_text, upload_status, milestone, confidence, progress]
|
|
|
|
|
|
|
|
|
|
| 182 |
)
|
| 183 |
|
| 184 |
+
# Launch the Gradio app
|
| 185 |
+
demo.launch()
|