Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,153 +1,77 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
import requests
|
| 3 |
from PIL import Image
|
| 4 |
-
import
|
| 5 |
-
import
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
SF_CONSUMER_KEY = os.getenv("SF_CONSUMER_KEY")
|
| 18 |
-
SF_CONSUMER_SECRET = os.getenv("SF_CONSUMER_SECRET")
|
| 19 |
-
|
| 20 |
-
# Validate Salesforce credentials
|
| 21 |
-
if not all([SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, SF_CONSUMER_KEY, SF_CONSUMER_SECRET]):
|
| 22 |
-
raise ValueError("Missing Salesforce credentials. Please set SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, SF_CONSUMER_KEY, and SF_CONSUMER_SECRET in environment variables.")
|
| 23 |
-
|
| 24 |
-
# Initialize Salesforce connection
|
| 25 |
-
try:
|
| 26 |
-
sf = Salesforce(
|
| 27 |
-
username=SF_USERNAME,
|
| 28 |
-
password=SF_PASSWORD,
|
| 29 |
-
security_token=SF_SECURITY_TOKEN,
|
| 30 |
-
consumer_key=SF_CONSUMER_KEY,
|
| 31 |
-
consumer_secret=SF_CONSUMER_SECRET,
|
| 32 |
-
domain='login' # Use 'test' for sandbox
|
| 33 |
-
)
|
| 34 |
-
except Exception as e:
|
| 35 |
-
print(f"Salesforce connection failed: {str(e)}")
|
| 36 |
-
raise
|
| 37 |
-
|
| 38 |
-
# Hugging Face Inference API endpoint (replace with your model)
|
| 39 |
-
HF_MODEL_URL = "https://api-inference.huggingface.co/models/nasreshsuguru/construction-milestone-detector"
|
| 40 |
-
|
| 41 |
-
def process_image(image, project_name):
|
| 42 |
try:
|
| 43 |
-
# Validate
|
| 44 |
if image is None:
|
| 45 |
-
return "Error:
|
| 46 |
-
if not project_name:
|
| 47 |
-
return "Error: Please enter a project name to proceed.", "Pending", "", 0.0, 0.0, ""
|
| 48 |
-
if not re.match(r'^[a-zA-Z0-9\s-]+$', project_name):
|
| 49 |
-
return "Error: Project name must be alphanumeric (letters, numbers, spaces, or hyphens).", "Pending", "", 0.0, 0.0, ""
|
| 50 |
-
|
| 51 |
-
# Validate image size and type
|
| 52 |
-
image_size_mb = os.path.getsize(image) / (1024 * 1024)
|
| 53 |
-
if image_size_mb > 20:
|
| 54 |
-
return "Error: Image size exceeds 20MB.", "Failure", "", 0.0, 0.0, ""
|
| 55 |
-
if not image.lower().endswith(('.jpg', '.jpeg', '.png')):
|
| 56 |
-
return "Error: Only JPG/PNG images are supported.", "Failure", "", 0.0, 0.0, ""
|
| 57 |
|
| 58 |
# Preprocess image
|
| 59 |
-
img = Image.open(image)
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
top_predictions = sorted(result, key=lambda x: x["score"], reverse=True)[:3]
|
| 77 |
-
milestone = top_predictions[0]["label"]
|
| 78 |
-
confidence = top_predictions[0]["score"]
|
| 79 |
-
percent_complete = min(max(int(confidence * 100), 0), 100)
|
| 80 |
-
prediction_details = "\n".join([f"{pred['label']}: {pred['score']:.2f}" for pred in top_predictions])
|
| 81 |
-
|
| 82 |
-
# Update Salesforce record
|
| 83 |
-
record = {
|
| 84 |
-
"Name": project_name,
|
| 85 |
-
"Current_Milestone__c": milestone,
|
| 86 |
-
"Completion_Percentage__c": percent_complete,
|
| 87 |
-
"Last_Updated_On__c": datetime.now().isoformat(),
|
| 88 |
-
"Upload_Status__c": "Success",
|
| 89 |
-
"Comments__c": f"AI Confidence: {confidence:.2f}",
|
| 90 |
-
"Version__c": 1
|
| 91 |
-
}
|
| 92 |
-
|
| 93 |
-
try:
|
| 94 |
-
project_name = project_name.replace("'", "''") # Basic escaping
|
| 95 |
-
query = f"SELECT Id, Version__c FROM Construction_Project__c WHERE Name = '{project_name}'"
|
| 96 |
-
result = sf.query(query)
|
| 97 |
-
if result["totalSize"] > 0:
|
| 98 |
-
project_id = result["records"][0]["Id"]
|
| 99 |
-
current_version = result["records"][0].get("Version__c", 0)
|
| 100 |
-
record["Version__c"] = current_version + 1
|
| 101 |
-
sf.Construction_Project__c.update(project_id, record)
|
| 102 |
-
else:
|
| 103 |
-
sf.Construction_Project__c.create(record)
|
| 104 |
-
except Exception as e:
|
| 105 |
-
return f"Error: Failed to update Salesforce - {str(e)}", "Failure", "", 0.0, 0.0, prediction_details
|
| 106 |
|
| 107 |
return (
|
| 108 |
-
f"Success: Milestone: {milestone}, Completion: {
|
| 109 |
-
"Success",
|
| 110 |
milestone,
|
| 111 |
-
|
| 112 |
-
confidence
|
| 113 |
-
prediction_details
|
| 114 |
)
|
| 115 |
|
| 116 |
except Exception as e:
|
| 117 |
-
return f"Error: {str(e)}", "
|
| 118 |
|
| 119 |
# Gradio interface
|
| 120 |
-
with gr.Blocks(
|
| 121 |
-
gr.Markdown("
|
| 122 |
-
project_name = gr.Textbox(label="Project Name", placeholder="Enter project name")
|
| 123 |
image_input = gr.Image(type="filepath", label="Upload Construction Site Photo (JPG/PNG, ≤ 20MB)")
|
| 124 |
submit_button = gr.Button("Process Image")
|
| 125 |
output_text = gr.Textbox(label="Result")
|
| 126 |
-
upload_status = gr.Textbox(label="Upload Status")
|
| 127 |
milestone = gr.Textbox(label="Detected Milestone")
|
| 128 |
completion_percent = gr.Slider(0, 100, label="Completion Percentage (%)", interactive=False)
|
| 129 |
confidence_score = gr.Slider(0, 1, label="Confidence Score", interactive=False)
|
| 130 |
-
|
| 131 |
-
progress = gr.Slider(0, 100, label="Processing Progress", interactive=False, value=0)
|
| 132 |
|
| 133 |
def update_progress():
|
| 134 |
-
return
|
| 135 |
-
|
| 136 |
-
def complete_progress():
|
| 137 |
-
return 100
|
| 138 |
|
| 139 |
submit_button.click(
|
| 140 |
fn=update_progress,
|
| 141 |
outputs=progress
|
| 142 |
).then(
|
| 143 |
fn=process_image,
|
| 144 |
-
inputs=
|
| 145 |
-
outputs=[output_text,
|
| 146 |
).then(
|
| 147 |
-
fn=
|
| 148 |
outputs=progress
|
| 149 |
)
|
| 150 |
|
| 151 |
if __name__ == "__main__":
|
| 152 |
-
demo.launch()
|
| 153 |
-
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
from PIL import Image
|
| 3 |
+
from transformers import ViTForImageClassification, ViTImageProcessor
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
# Load pre-trained model and processor from Hugging Face
|
| 7 |
+
model_name = "google/vit-base-patch16-224"
|
| 8 |
+
processor = ViTImageProcessor.from_pretrained(model_name)
|
| 9 |
+
model = ViTForImageClassification.from_pretrained(model_name)
|
| 10 |
+
model.eval()
|
| 11 |
+
|
| 12 |
+
def process_image(image):
|
| 13 |
+
"""
|
| 14 |
+
Process uploaded image and predict construction milestone and completion percentage.
|
| 15 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
try:
|
| 17 |
+
# Validate image
|
| 18 |
if image is None:
|
| 19 |
+
return "Error: No image uploaded.", "", 0.0, 0.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
# Preprocess image
|
| 22 |
+
img = Image.open(image).convert("RGB")
|
| 23 |
+
inputs = processor(images=img, return_tensors="pt")
|
| 24 |
+
|
| 25 |
+
# Run inference
|
| 26 |
+
with torch.no_grad():
|
| 27 |
+
outputs = model(**inputs)
|
| 28 |
+
logits = outputs.logits
|
| 29 |
+
probabilities = torch.softmax(logits, dim=1)
|
| 30 |
+
|
| 31 |
+
# Mocked output parsing (replace with your model's label mapping)
|
| 32 |
+
# Assuming model outputs classes like 'Foundation Completed', etc.
|
| 33 |
+
predicted_idx = torch.argmax(probabilities, dim=1).item()
|
| 34 |
+
confidence = probabilities[0][predicted_idx].item()
|
| 35 |
+
|
| 36 |
+
# Mocked milestone and completion (adjust based on your model)
|
| 37 |
+
milestone = model.config.id2label.get(predicted_idx, "Unknown Milestone")
|
| 38 |
+
completion_percent = min(max(int(confidence * 100), 0), 100) # Mocked logic
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
return (
|
| 41 |
+
f"Success: Milestone: {milestone}, Completion: {completion_percent}%",
|
|
|
|
| 42 |
milestone,
|
| 43 |
+
completion_percent,
|
| 44 |
+
confidence
|
|
|
|
| 45 |
)
|
| 46 |
|
| 47 |
except Exception as e:
|
| 48 |
+
return f"Error: {str(e)}", "", 0.0, 0.0
|
| 49 |
|
| 50 |
# Gradio interface
|
| 51 |
+
with gr.Blocks() as demo:
|
| 52 |
+
gr.Markdown("# Construction Milestone Detector")
|
|
|
|
| 53 |
image_input = gr.Image(type="filepath", label="Upload Construction Site Photo (JPG/PNG, ≤ 20MB)")
|
| 54 |
submit_button = gr.Button("Process Image")
|
| 55 |
output_text = gr.Textbox(label="Result")
|
|
|
|
| 56 |
milestone = gr.Textbox(label="Detected Milestone")
|
| 57 |
completion_percent = gr.Slider(0, 100, label="Completion Percentage (%)", interactive=False)
|
| 58 |
confidence_score = gr.Slider(0, 1, label="Confidence Score", interactive=False)
|
| 59 |
+
progress = gr.Textbox(label="Processing Status", value="Ready")
|
|
|
|
| 60 |
|
| 61 |
def update_progress():
|
| 62 |
+
return "Processing..."
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
submit_button.click(
|
| 65 |
fn=update_progress,
|
| 66 |
outputs=progress
|
| 67 |
).then(
|
| 68 |
fn=process_image,
|
| 69 |
+
inputs=image_input,
|
| 70 |
+
outputs=[output_text, milestone, completion_percent, confidence_score]
|
| 71 |
).then(
|
| 72 |
+
fn=lambda: "Ready",
|
| 73 |
outputs=progress
|
| 74 |
)
|
| 75 |
|
| 76 |
if __name__ == "__main__":
|
| 77 |
+
demo.launch()
|
|
|