Keshav-rejoice commited on
Commit
a2b900d
·
verified ·
1 Parent(s): dca6f31

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -0
app.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import base64
3
+ import openai
4
+ import numpy as np
5
+ import cv2
6
+ from tensorflow.keras.models import load_model
7
+ from keras.preprocessing.image import img_to_array
8
+ from keras.applications.inception_v3 import preprocess_input
9
+ import os
10
+
11
+ openai.api_key = os.getenv('OPENAI_API_KEY')
12
+
13
+ class_labels = [
14
+ "Algae",
15
+ "Bubbles and blisters",
16
+ "Cracks",
17
+ "Efflorescence",
18
+ "Fungus",
19
+ "Patchiness",
20
+ "Peeling",
21
+ "Poor Hiding",
22
+ "Shade Variation"
23
+ ]
24
+
25
+ def encode_image(image_path):
26
+ with open(image_path, "rb") as image_file:
27
+ return base64.b64encode(image_file.read()).decode("utf-8")
28
+
29
+ @st.cache_resource
30
+ def load_trained_model():
31
+ return load_model('my_model12.h5')
32
+
33
+ loaded_model = load_trained_model()
34
+
35
+ st.title("Wall Defect Classification and AI Analysis")
36
+ st.write("Upload an image to classify wall defects and generate AI-based descriptions.")
37
+
38
+ uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
39
+
40
+ if uploaded_file is not None:
41
+ # Display the uploaded image
42
+ st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
43
+
44
+ # Read and preprocess the input image
45
+ file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
46
+ input_img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
47
+ input_img_resized = cv2.resize(input_img, dsize=(256, 256), interpolation=cv2.INTER_CUBIC)
48
+
49
+ x = img_to_array(input_img_resized)
50
+ x = np.expand_dims(x, axis=0)
51
+ x = preprocess_input(x)
52
+
53
+ preds = loaded_model.predict(x)
54
+
55
+ threshold = 0.3
56
+
57
+ class_indices = np.where(preds[0] > threshold)[0]
58
+ class_probabilities = preds[0][class_indices]
59
+
60
+ results_text = ""
61
+ predicted_defects = []
62
+ if len(class_indices) > 0:
63
+ for i, class_idx in enumerate(class_indices):
64
+ class_name = class_labels[class_idx]
65
+ results_text += f"{class_name} (Class {class_idx}): Probability {class_probabilities[i]:.2f}\n"
66
+ predicted_defects.append(class_name)
67
+ else:
68
+ results_text = "No classes detected with a probability greater than the threshold."
69
+
70
+ # Display classification results in a text box
71
+ st.text_area("Classification Results:", value=results_text, height=200)
72
+
73
+ # Encode the uploaded image as Base64
74
+ base64_image = base64.b64encode(file_bytes).decode("utf-8")
75
+ image_data = f"data:image/jpeg;base64,{base64_image}"
76
+
77
+ # Generate AI-based descriptions using OpenAI API
78
+ if predicted_defects:
79
+ defects_string = ", ".join(predicted_defects)
80
+ ai_prompt = (
81
+ f"Our trained model predicts the following defects: {defects_string}. "
82
+ f"Can you analyze the following image and generate AI-based descriptions for these defects?"
83
+ )
84
+
85
+ st.write("Analyzing image")
86
+ try:
87
+ response = openai.chat.completions.create(
88
+ model="gpt-4o",
89
+ messages=[
90
+ {
91
+ "role": "user",
92
+ "content": [
93
+ {"type": "text",
94
+ "text":ai_prompt},
95
+ {
96
+ "type": "image_url",
97
+ "image_url": {
98
+ "url": f"data:image/jpeg;base64,{base64_image}",
99
+ },
100
+ },
101
+ ],
102
+ }
103
+ ],
104
+ max_tokens=300,
105
+
106
+ )
107
+ # Extract AI-generated descriptions
108
+ ai_description = response.choices[0].message.content
109
+ st.text_area("AI-Generated Description:", value=ai_description, height=200)
110
+
111
+ except Exception as e:
112
+ st.error(f"An error occurred while generating AI-based descriptions: {str(e)}")
113
+ else:
114
+ st.warning("No defects detected. AI analysis skipped.")