civil_project / app.py
IqraFatima's picture
Update app.py
f7b150e verified
import gradio as gr
import os
from PIL import Image
import numpy as np
from ultralytics import YOLO
from openai import OpenAI
# πŸ”§ Suppress Ultralytics config warning in Spaces
os.environ["YOLO_CONFIG_DIR"] = "/tmp"
# βœ… Initialize Groq client with Hugging Face secret
client = OpenAI(
api_key=os.getenv("civil_project"),
base_url="https://api.groq.com/openai/v1"
)
# βœ… Load crack-segmentation model from Hugging Face
# Use 'yolov8n-seg.pt' for fast edge inference
model = YOLO("yolov8n-seg.pt") # Downloaded from 'OpenSistemas/YOLOv8-crack-seg' :contentReference[oaicite:1]{index=1}
def ask_groq(prompt):
resp = client.chat.completions.create(
model="llama3-8b-8192",
messages=[
{"role": "system", "content": "You are an expert structural engineer specialized in crack diagnosis."},
{"role": "user", "content": prompt}
],
temperature=0.5
)
return resp.choices[0].message.content.strip()
def process_image(image):
try:
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
elif isinstance(image, str):
image = Image.open(image)
# πŸ” Crack segmentation inference
results = model.predict(source=image, imgsz=640, verbose=False)[0]
masks = results.masks.data if results.masks is not None else []
count = len(masks)
if count > 0:
detected_info = f"{count} crack(s) detected"
prompt_prefix = detected_info
else:
prompt_prefix = (
"No cracks were detected by the vision model, "
"but the image may contain hidden surface damage. "
"Please analyze contextually."
)
user_prompt = f"""
{prompt_prefix}.
Please:
- Diagnose the issue
- Suggest repair methods
- List tools/materials required
- Estimate repair time
"""
return ask_groq(user_prompt)
except Exception as e:
return f"❌ Error: {e}"
with gr.Blocks() as demo:
gr.Markdown("## 🚧 Construction Crack Analyzer")
gr.Markdown("Upload an image of a wall or surface to detect and analyze cracks.")
img_input = gr.Image(type="numpy", label="Upload Damage Image")
output_text = gr.Textbox(label="Diagnosis & Recommendations", lines=8)
gr.Button("Analyze").click(fn=process_image, inputs=img_input, outputs=output_text)
demo.launch()