Krish-Upgrix commited on
Commit
4864fc0
·
verified ·
1 Parent(s): 9297303

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +310 -0
  2. binsight_visionapi.json +13 -0
  3. requirements.txt +10 -0
app.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from PIL import Image
4
+ import numpy as np
5
+ from io import BytesIO
6
+ from dotenv import load_dotenv
7
+ from geopy.geocoders import Nominatim
8
+ from tensorflow.keras.applications import MobileNetV2
9
+ from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input
10
+ import requests
11
+ import google.generativeai as genai
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
+
16
+ # Configure Generative AI
17
+ genai.configure(api_key='AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM')
18
+
19
+ # Load MobileNetV2 pre-trained model
20
+ mobilenet_model = MobileNetV2(weights="imagenet")
21
+
22
+ # Function to classify the uploaded image using MobileNetV2
23
+ def classify_image_with_mobilenet(image):
24
+ try:
25
+ img = image.resize((224, 224))
26
+ img_array = np.array(img)
27
+ img_array = np.expand_dims(img_array, axis=0)
28
+ img_array = preprocess_input(img_array)
29
+ predictions = mobilenet_model.predict(img_array)
30
+ labels = decode_predictions(predictions, top=5)[0]
31
+ return {label[1]: float(label[2]) for label in labels}
32
+ except Exception as e:
33
+ st.error(f"Error during image classification: {e}")
34
+ return {}
35
+
36
+ # Function to get user's location
37
+ def get_user_location():
38
+ try:
39
+ ip_info = requests.get("https://ipinfo.io/json").json()
40
+ location = ip_info.get("loc", "").split(",")
41
+ latitude = location[0] if len(location) > 0 else None
42
+ longitude = location[1] if len(location) > 1 else None
43
+
44
+ if latitude and longitude:
45
+ geolocator = Nominatim(user_agent="binsight")
46
+ address = geolocator.reverse(f"{latitude}, {longitude}").address
47
+ return latitude, longitude, address
48
+ return None, None, None
49
+ except Exception as e:
50
+ st.error(f"Unable to get location: {e}")
51
+ return None, None, None
52
+
53
+ # Function to get nearest municipal details with contact info
54
+ def get_nearest_municipal_details(latitude, longitude):
55
+ try:
56
+ if latitude and longitude:
57
+ # Simulating municipal service retrieval
58
+ municipal_services = [
59
+ {"latitude": "12.9716", "longitude": "77.5946", "office": "Bangalore Municipal Office", "phone": "+91-80-12345678"},
60
+ {"latitude": "28.7041", "longitude": "77.1025", "office": "Delhi Municipal Office", "phone": "+91-11-98765432"},
61
+ {"latitude": "19.0760", "longitude": "72.8777", "office": "Mumbai Municipal Office", "phone": "+91-22-22334455"},
62
+ ]
63
+
64
+ # Find the nearest municipal service (mock logic: matching first two decimal points)
65
+ for service in municipal_services:
66
+ if str(latitude).startswith(service["latitude"][:5]) and str(longitude).startswith(service["longitude"][:5]):
67
+ return f"""
68
+ **Office**: {service['office']}
69
+ **Phone**: {service['phone']}
70
+ """
71
+ return "No nearby municipal office found. Please check manually."
72
+ else:
73
+ return "Location not available. Unable to fetch municipal details."
74
+ except Exception as e:
75
+ st.error(f"Unable to fetch municipal details: {e}")
76
+ return None
77
+
78
+ # Function to interact with Generative AI
79
+ def get_genai_response(classification_results, location):
80
+ try:
81
+ classification_summary = "\n".join([f"{label}: {score:.2f}" for label, score in classification_results.items()])
82
+ location_summary = f"""
83
+ Latitude: {location[0] if location[0] else 'N/A'}
84
+ Longitude: {location[1] if location[1] else 'N/A'}
85
+ Address: {location[2] if location[2] else 'N/A'}
86
+ """
87
+ prompt = f"""
88
+ ### You are an environmental expert. Analyze the following:
89
+ 1. **Image Classification**:
90
+ - {classification_summary}
91
+ 2. **Location**:
92
+ - {location_summary}
93
+
94
+ ### Output Required:
95
+ 1. Detailed insights about the waste detected in the image.
96
+ 2. Specific health risks associated with the detected waste type.
97
+ 3. Precautions to mitigate these health risks.
98
+ 4. Recommendations for proper disposal.
99
+ """
100
+ model = genai.GenerativeModel('gemini-pro')
101
+ response = model.generate_content(prompt)
102
+ return response
103
+ except Exception as e:
104
+ st.error(f"Error using Generative AI: {e}")
105
+ return None
106
+
107
+ # Function to display Generative AI response
108
+ def display_genai_response(response):
109
+ st.subheader("Detailed Analysis and Recommendations")
110
+ if response and response.candidates:
111
+ response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
112
+ st.write(response_content)
113
+ else:
114
+ st.write("No response received from Generative AI or quota exceeded.")
115
+
116
+ # Streamlit App
117
+ st.title("BinSight: AI-Powered Dustbin and Waste Analysis System")
118
+ st.text("Upload a dustbin image and get AI-powered analysis of the waste and associated health recommendations.")
119
+
120
+ uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"], help="Upload a clear image of a dustbin for analysis.")
121
+ submit_button = st.button("Analyze Dustbin")
122
+
123
+ if submit_button:
124
+ if uploaded_file is not None:
125
+ image = Image.open(uploaded_file)
126
+ st.image(image, caption="Uploaded Image", use_container_width =True)
127
+
128
+ # Classify the image using MobileNetV2
129
+ st.subheader("Image Classification")
130
+ classification_results = classify_image_with_mobilenet(image)
131
+ for label, score in classification_results.items():
132
+ st.write(f"- **{label}**: {score:.2f}")
133
+
134
+ # Get user location
135
+ location = get_user_location()
136
+ latitude, longitude, address = location
137
+
138
+ st.subheader("User Location")
139
+ st.write(f"Latitude: {latitude if latitude else 'N/A'}")
140
+ st.write(f"Longitude: {longitude if longitude else 'N/A'}")
141
+ st.write(f"Address: {address if address else 'N/A'}")
142
+
143
+ # Get nearest municipal details with contact info
144
+ st.subheader("Nearest Municipal Details")
145
+ municipal_details = get_nearest_municipal_details(latitude, longitude)
146
+ st.write(municipal_details)
147
+
148
+ # Generate detailed analysis with Generative AI
149
+ if classification_results:
150
+ response = get_genai_response(classification_results, location)
151
+ display_genai_response(response)
152
+ else:
153
+ st.write("Please upload an image for analysis.")
154
+
155
+
156
+
157
+
158
+
159
+
160
+
161
+
162
+
163
+
164
+
165
+ # import streamlit as st
166
+ # import os
167
+ # from PIL import Image
168
+ # import numpy as np
169
+ # from io import BytesIO
170
+ # from dotenv import load_dotenv
171
+ # from geopy.geocoders import Nominatim
172
+ # from tensorflow.keras.applications import MobileNetV2
173
+ # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input
174
+ # import requests
175
+ # import google.generativeai as genai
176
+
177
+ # # Load environment variables
178
+ # load_dotenv()
179
+
180
+ # # Configure Generative AI
181
+ # genai.configure(api_key='AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM')
182
+
183
+ # # Load MobileNetV2 pre-trained model
184
+ # mobilenet_model = MobileNetV2(weights="imagenet")
185
+
186
+ # # Function to classify the uploaded image using MobileNetV2
187
+ # def classify_image_with_mobilenet(image):
188
+ # try:
189
+ # # Resize the image to the input size of MobileNetV2
190
+ # img = image.resize((224, 224))
191
+ # img_array = np.array(img)
192
+ # img_array = np.expand_dims(img_array, axis=0)
193
+ # img_array = preprocess_input(img_array)
194
+
195
+ # # Predict using the MobileNetV2 model
196
+ # predictions = mobilenet_model.predict(img_array)
197
+ # labels = decode_predictions(predictions, top=5)[0]
198
+ # return {label[1]: float(label[2]) for label in labels}
199
+ # except Exception as e:
200
+ # st.error(f"Error during image classification: {e}")
201
+ # return {}
202
+
203
+ # # Function to get user's location
204
+ # def get_user_location():
205
+ # try:
206
+ # # Fetch location using the IPInfo API
207
+ # ip_info = requests.get("https://ipinfo.io/json").json()
208
+ # location = ip_info.get("loc", "").split(",")
209
+ # latitude = location[0] if len(location) > 0 else None
210
+ # longitude = location[1] if len(location) > 1 else None
211
+
212
+ # if latitude and longitude:
213
+ # geolocator = Nominatim(user_agent="binsight")
214
+ # address = geolocator.reverse(f"{latitude}, {longitude}").address
215
+ # return latitude, longitude, address
216
+ # return None, None, None
217
+ # except Exception as e:
218
+ # st.error(f"Unable to get location: {e}")
219
+ # return None, None, None
220
+
221
+ # # Function to get nearest municipal details
222
+ # def get_nearest_municipal_details(latitude, longitude):
223
+ # try:
224
+ # if latitude and longitude:
225
+ # # Simulating municipal service retrieval
226
+ # return f"The nearest municipal office is at ({latitude}, {longitude}). Please contact your local authority for waste management services."
227
+ # else:
228
+ # return "Location not available. Unable to fetch municipal details."
229
+ # except Exception as e:
230
+ # st.error(f"Unable to fetch municipal details: {e}")
231
+ # return None
232
+
233
+ # # Function to interact with Generative AI
234
+ # def get_genai_response(classification_results, location):
235
+ # try:
236
+ # # Construct prompt for Generative AI
237
+ # classification_summary = "\n".join([f"{label}: {score:.2f}" for label, score in classification_results.items()])
238
+ # location_summary = f"""
239
+ # Latitude: {location[0] if location[0] else 'N/A'}
240
+ # Longitude: {location[1] if location[1] else 'N/A'}
241
+ # Address: {location[2] if location[2] else 'N/A'}
242
+ # """
243
+ # prompt = f"""
244
+ # ### You are an environmental expert. Analyze the following:
245
+ # 1. **Image Classification**:
246
+ # - {classification_summary}
247
+ # 2. **Location**:
248
+ # - {location_summary}
249
+
250
+ # ### Output Required:
251
+ # 1. Detailed insights about the waste detected in the image.
252
+ # 2. Specific health risks associated with the detected waste type.
253
+ # 3. Precautions to mitigate these health risks.
254
+ # 4. Recommendations for proper disposal.
255
+ # """
256
+
257
+ # model = genai.GenerativeModel('gemini-pro')
258
+ # response = model.generate_content(prompt)
259
+ # return response
260
+ # except Exception as e:
261
+ # st.error(f"Error using Generative AI: {e}")
262
+ # return None
263
+
264
+ # # Function to display Generative AI response
265
+ # def display_genai_response(response):
266
+ # st.subheader("Detailed Analysis and Recommendations")
267
+ # if response and response.candidates:
268
+ # response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
269
+ # st.write(response_content)
270
+ # else:
271
+ # st.write("No response received from Generative AI or quota exceeded.")
272
+
273
+ # # Streamlit App
274
+ # st.title("BinSight: AI-Powered Dustbin and Waste Analysis System")
275
+ # st.text("Upload a dustbin image and get AI-powered analysis of the waste and associated health recommendations.")
276
+
277
+ # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"], help="Upload a clear image of a dustbin for analysis.")
278
+ # submit_button = st.button("Analyze Dustbin")
279
+
280
+ # if submit_button:
281
+ # if uploaded_file is not None:
282
+ # image = Image.open(uploaded_file)
283
+ # st.image(image, caption="Uploaded Image", use_column_width=True)
284
+
285
+ # # Classify the image using MobileNetV2
286
+ # st.subheader("Image Classification")
287
+ # classification_results = classify_image_with_mobilenet(image)
288
+ # for label, score in classification_results.items():
289
+ # st.write(f"- **{label}**: {score:.2f}")
290
+
291
+ # # Get user location
292
+ # location = get_user_location()
293
+ # latitude, longitude, address = location
294
+
295
+ # st.subheader("User Location")
296
+ # st.write(f"Latitude: {latitude if latitude else 'N/A'}")
297
+ # st.write(f"Longitude: {longitude if longitude else 'N/A'}")
298
+ # st.write(f"Address: {address if address else 'N/A'}")
299
+
300
+ # # Get nearest municipal details
301
+ # st.subheader("Nearest Municipal Details")
302
+ # municipal_details = get_nearest_municipal_details(latitude, longitude)
303
+ # st.write(municipal_details)
304
+
305
+ # # Generate detailed analysis with Generative AI
306
+ # if classification_results:
307
+ # response = get_genai_response(classification_results, location)
308
+ # display_genai_response(response)
309
+ # else:
310
+ # st.write("Please upload an image for analysis.")
binsight_visionapi.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "service_account",
3
+ "project_id": "binsight-448310",
4
+ "private_key_id": "86450c821de32c31839542e219a2899ea963db0b",
5
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDRwC8hkk6PZOTp\nf7SY8UW1bMe5WSK/jZjJMD/HS83C8QujRqGvDzcC3C4DnqdGEs4X6eHnt1RrxoxA\nRrRXGnXWgvXL1dbQ+vegZKX/JRMxrCcMpw/K3HGUkDj8tbBH3VMIjRa6R3vpk5JK\nste8FARijK7SsdHi7tGI50mioYJebWwfc9zvkLZEE02xwQN+hg+PLHdkxNsBgECt\nCkhnjdnEWgWgAKIa1xRgX4WHnkPm+Ey3wLbhyrPWKYX2RmBCcnXGLkrJNGSNGtF0\npiYnfFaRWeilg1DHo+LjpIUHyeP0dbZyVKe6EOj0DK6Fx8kSjterNB3ZL6No+RQ0\nOjOg6gV9AgMBAAECggEAC5RShcXv9FablJBqRe876/IfbIoRMwX8I084liYHK/Xm\nsJLbpjBu1weUurptfZ7YJdXRlNs4G+MQJxRALHbXfoooS6J4g3z3YrFrwJQpZRqd\n5ULrykU1OABmRP06yBzd0qEHWi1MF+7/qoQJCOcJ/u7JT/RlI+QPSUGLfSDxc4j5\nUyUHrBlfvWfXAP8JxP+hQDkX7xWgcRhVKlXxeQteh6wheymP0AXPNjuG7YGzoXIK\nKsABm7ROC2IHLTm2h4vc8fA8Pwj4OrOucCjP0IJF7SetkfbucFfdT6n4D/5dVIky\n+D6910zN6p8EwQl9cEmu75OD/UvRHX60sDYUK6LQ+QKBgQDpLJEr5Q8SP53jggny\n5gSBCLTioFCvISQuRUd3UAns555a5HUPS6nD++e+E7YpAQhVEfMjvYdMX7tqQqq5\nNgPGp2ao6DxC9fT7fo7On2Q0sHrdz8uNo13ZrS6dI0caX+myjbMC73fCxp3cp68u\nMjWP50/pQJxOlrv6i/lKyujPzwKBgQDmSJ5IPYToLBzMMmahXNRTPbRHoRsxM/Ck\n02ANlxoaNvkArpf1MP2JR0N6ZUk+wwGvIuPEaMM1q+ricOREUuD+PbAguj/uYqb1\nP06tDfX/GzQKk7yLk+g8eXKrnCqr8KKxSbF8vWRHMOeTdKaCsW2SZVPkKIJ2RHQi\n7qxvAxP88wKBgQDQFlu/pn1atbc7r3Mdd7SRSqnSjWszvwnA2Ua77YvOBa3GQ5dL\n/SQVqJrZgFHSKf+7m3c2cA9sUwq3+6LMAq4//GibWBVfVIw6XGkpcAlHFC+x/50S\nW7aHagvtY+wyV2IBXH9ioT5pbkK3BlZJjblLIQyphmV3pQFAyOXCn25A9QKBgQCj\nEl+L1oysgLhv3W0R7ZOp0rM8WhjQcfCCN/D4Dr18PNt9oSWYivWvZdih7uG8YQlr\nRTC3oFxEQJbXfYwX2fzb7UExG9Mz84Y5e3gyUgWWfmQO7WmCCd5WHMaYQcFx+rir\nBP170P4W78m9gMh9Gjn2hmyu0AT6zSTUq+FNx4c7AwKBgF4f6Ilre1dH3hmm+CbW\n7q6wy2BpzB/ga5iFiZg1vi1X3tgRyaE0K0jNbDxp7NOJL5OZ3Pz6AJMQyZYvsNqW\nltwgvWYF5agH2O1x/mYgru4lHwbcvJqu8YREy1uDdL2Kav+PApnxwMLR9bGWgQ7y\npvslRldo1F5OR8kZUZbg/wtk\n-----END PRIVATE KEY-----\n",
6
+ "client_email": "binsight@binsight-448310.iam.gserviceaccount.com",
7
+ "client_id": "117574259544737739176",
8
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
9
+ "token_uri": "https://oauth2.googleapis.com/token",
10
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
11
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/binsight%40binsight-448310.iam.gserviceaccount.com",
12
+ "universe_domain": "googleapis.com"
13
+ }
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ google.generativeai
3
+ python-dotenv
4
+ pillow
5
+ opencv-python-headless
6
+ numpy
7
+ requests
8
+ geopy
9
+ google-cloud-vision
10
+ tensorflow