| # Final with email functionality also there | |
| import streamlit as st | |
| import requests | |
| import google.generativeai as genai | |
| import firebase_admin | |
| from firebase_admin import credentials, db | |
| from PIL import Image | |
| import numpy as np | |
| import base64 | |
| from io import BytesIO | |
| from tensorflow.keras.applications import MobileNetV2 | |
| from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # Initialize Firebase | |
| if not firebase_admin._apps: | |
| cred = credentials.Certificate("firebase_credentials.json") | |
| firebase_admin.initialize_app(cred, { | |
| 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
| }) | |
| # Configure Gemini 2.0 Flash | |
| genai.configure(api_key="AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM") | |
| mobilenet_model = MobileNetV2(weights="imagenet") | |
| def classify_image(image): | |
| img = image.resize((224, 224)) | |
| img_array = np.array(img) | |
| img_array = np.expand_dims(img_array, axis=0) | |
| img_array = preprocess_input(img_array) | |
| predictions = mobilenet_model.predict(img_array) | |
| labels = decode_predictions(predictions, top=5)[0] | |
| return {label[1]: round(float(label[2]) * 100, 2) for label in labels} | |
| def is_dustbin_image(classification_results): | |
| dustbin_keywords = ["trash", "bin", "garbage", "waste", "dustbin", "ashcan", "recycle", "rubbish"] | |
| return any(any(keyword in label.lower() for keyword in dustbin_keywords) for label in classification_results.keys()) | |
| def convert_image_to_base64(image): | |
| buffered = BytesIO() | |
| image.save(buffered, format="PNG") | |
| return base64.b64encode(buffered.getvalue()).decode() | |
| def get_genai_response(classification_results, location): | |
| try: | |
| classification_summary = "\n".join([f"- **{label}:** {score}%" for label, score in classification_results.items()]) | |
| location_summary = f""" | |
| - **Latitude:** {location[0] if location[0] else 'N/A'} | |
| - **Longitude:** {location[1] if location[1] else 'N/A'} | |
| - **Address:** {location[2] if location[2] else 'N/A'} | |
| """ | |
| prompt = f""" | |
| ### You are an environmental expert. Analyze the following waste classification: | |
| **1. Image Classification Results:** | |
| {classification_summary} | |
| **2. Location Details:** | |
| {location_summary} | |
| ### Required Analysis: | |
| - Describe the waste detected in the image. | |
| - Potential health & environmental risks. | |
| - Recommended disposal methods & precautions. | |
| - Eco-friendly alternatives. | |
| """ | |
| model = genai.GenerativeModel("gemini-1.5-flash") | |
| response = model.generate_content(prompt) | |
| return response.text if response else "⚠️ No response received." | |
| except Exception as e: | |
| st.error(f"⚠️ Error using Generative AI: {e}") | |
| return None | |
| query_params = st.experimental_get_query_params() | |
| latitude = query_params.get("lat", [""])[0] | |
| longitude = query_params.get("lon", [""])[0] | |
| address = query_params.get("addr", [""])[0] | |
| st.header("📍 Detected Location") | |
| st.success(f"**Latitude:** {latitude}") | |
| st.success(f"**Longitude:** {longitude}") | |
| st.success(f"**Address:** {address}") | |
| st.title("🗑️ BinSight: Upload Dustbin Image") | |
| user_email = st.text_input("📧 Enter your email to receive updates") | |
| uploaded_file = st.file_uploader("📷 Upload a dustbin image", type=["jpg", "jpeg", "png"]) | |
| if uploaded_file and user_email: | |
| image = Image.open(uploaded_file) | |
| st.image(image, caption="📸 Uploaded Image", use_column_width=True) | |
| image_base64 = convert_image_to_base64(image) | |
| classification_results = classify_image(image) | |
| st.header("🧪 Classification Results") | |
| for label, confidence in classification_results.items(): | |
| st.write(f"✅ **{label}:** {confidence}%") | |
| if not is_dustbin_image(classification_results): | |
| st.error("⚠️ Please upload a valid dustbin image. No dustbin detected in the image.") | |
| st.stop() | |
| st.header("🧠 AI Analysis & Recommendations") | |
| gemini_response = get_genai_response(classification_results, (latitude, longitude, address)) | |
| st.write(gemini_response) | |
| dustbin_data = { | |
| "email": user_email, | |
| "latitude": latitude, | |
| "longitude": longitude, | |
| "address": address, | |
| "classification": classification_results, | |
| "allocated_truck": None, | |
| "status": "Pending", | |
| "image": image_base64 | |
| } | |
| db.reference("dustbins").push(dustbin_data) | |
| st.success("✅ Dustbin data uploaded successfully!") | |
| st.markdown( | |
| """ | |
| <div style=" | |
| background-color: #DFF2BF; | |
| color: #4F8A10; | |
| padding: 20px; | |
| border-radius: 10px; | |
| text-align: center; | |
| font-size: 18px; | |
| margin-top: 20px;"> | |
| 🎉 <b>Thank You for Your Contribution!</b> 🎉<br><br> | |
| Your effort in uploading this image helps keep our environment clean and green. 🌱♻️<br> | |
| Together, we make a difference! 🙌 | |
| </div> | |
| """, unsafe_allow_html=True | |
| ) | |
| # import streamlit as st | |
| # import requests | |
| # import google.generativeai as genai | |
| # import firebase_admin | |
| # from firebase_admin import credentials, db | |
| # from PIL import Image | |
| # import numpy as np | |
| # import base64 | |
| # from io import BytesIO | |
| # from tensorflow.keras.applications import MobileNetV2 | |
| # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # # Initialize Firebase | |
| # if not firebase_admin._apps: | |
| # cred = credentials.Certificate("firebase_credentials.json") | |
| # firebase_admin.initialize_app(cred, { | |
| # 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
| # }) | |
| # # Configure Gemini 2.0 Flash (gemini-1.5-flash) | |
| # genai.configure(api_key="AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM") # Replace with actual API key | |
| # # Load MobileNetV2 pre-trained model | |
| # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # Function to classify image | |
| # def classify_image(image): | |
| # img = image.resize((224, 224)) | |
| # img_array = np.array(img) | |
| # img_array = np.expand_dims(img_array, axis=0) | |
| # img_array = preprocess_input(img_array) | |
| # predictions = mobilenet_model.predict(img_array) | |
| # labels = decode_predictions(predictions, top=5)[0] | |
| # return {label[1]: round(float(label[2]) * 100, 2) for label in labels} | |
| # # Function to check if image is a dustbin | |
| # def is_dustbin_image(classification_results): | |
| # dustbin_keywords = ["trash", "bin", "garbage", "waste", "dustbin", "ashcan", "recycle", "rubbish"] | |
| # return any(any(keyword in label.lower() for keyword in dustbin_keywords) for label in classification_results.keys()) | |
| # # Function to convert image to Base64 | |
| # def convert_image_to_base64(image): | |
| # buffered = BytesIO() | |
| # image.save(buffered, format="PNG") | |
| # return base64.b64encode(buffered.getvalue()).decode() | |
| # # Function to generate AI recommendations using Gemini 2.0 Flash | |
| # def get_genai_response(classification_results, location): | |
| # try: | |
| # classification_summary = "\n".join([f"- **{label}:** {score}%" for label, score in classification_results.items()]) | |
| # location_summary = f""" | |
| # - **Latitude:** {location[0] if location[0] else 'N/A'} | |
| # - **Longitude:** {location[1] if location[1] else 'N/A'} | |
| # - **Address:** {location[2] if location[2] else 'N/A'} | |
| # """ | |
| # prompt = f""" | |
| # ### You are an environmental expert. Analyze the following waste classification: | |
| # **1. Image Classification Results:** | |
| # {classification_summary} | |
| # **2. Location Details:** | |
| # {location_summary} | |
| # ### Required Analysis: | |
| # - Describe the waste detected in the image. | |
| # - Potential health & environmental risks. | |
| # - Recommended disposal methods & precautions. | |
| # - Eco-friendly alternatives. | |
| # """ | |
| # model = genai.GenerativeModel("gemini-1.5-flash") | |
| # response = model.generate_content(prompt) | |
| # return response.text if response else "⚠️ No response received." | |
| # except Exception as e: | |
| # st.error(f"⚠️ Error using Generative AI: {e}") | |
| # return None | |
| # # Extract query parameters | |
| # query_params = st.experimental_get_query_params() | |
| # latitude = query_params.get("lat", [""])[0] | |
| # longitude = query_params.get("lon", [""])[0] | |
| # address = query_params.get("addr", [""])[0] | |
| # # Display detected location | |
| # st.header("📍 Detected Location") | |
| # st.success(f"**Latitude:** {latitude}") | |
| # st.success(f"**Longitude:** {longitude}") | |
| # st.success(f"**Address:** {address}") | |
| # # Streamlit UI | |
| # st.title("🗑️ BinSight: Upload Dustbin Image") | |
| # uploaded_file = st.file_uploader("📷 Upload a dustbin image", type=["jpg", "jpeg", "png"]) | |
| # if uploaded_file: | |
| # image = Image.open(uploaded_file) | |
| # st.image(image, caption="📸 Uploaded Image", use_column_width=True) | |
| # image_base64 = convert_image_to_base64(image) | |
| # classification_results = classify_image(image) | |
| # st.header("🧪 Classification Results") | |
| # for label, confidence in classification_results.items(): | |
| # st.write(f"✅ **{label}:** {confidence}%") | |
| # if not is_dustbin_image(classification_results): | |
| # st.error("⚠️ Please upload a valid dustbin image. No dustbin detected in the image.") | |
| # st.stop() | |
| # st.header("🧠 AI Analysis & Recommendations") | |
| # gemini_response = get_genai_response(classification_results, (latitude, longitude, address)) | |
| # st.write(gemini_response) | |
| # dustbin_data = { | |
| # "latitude": latitude, | |
| # "longitude": longitude, | |
| # "address": address, | |
| # "classification": classification_results, | |
| # "allocated_truck": None, | |
| # "status": "Pending", | |
| # "image": image_base64 | |
| # } | |
| # db.reference("dustbins").push(dustbin_data) | |
| # st.success("✅ Dustbin data uploaded successfully!") | |
| # # Thank You Message with Styling | |
| # st.markdown( | |
| # """ | |
| # <div style=" | |
| # background-color: #DFF2BF; | |
| # color: #4F8A10; | |
| # padding: 20px; | |
| # border-radius: 10px; | |
| # text-align: center; | |
| # font-size: 18px; | |
| # margin-top: 20px;"> | |
| # 🎉 <b>Thank You for Your Contribution!</b> 🎉<br><br> | |
| # Your effort in uploading this image helps keep our environment clean and green. 🌱♻️<br> | |
| # Together, we make a difference! 🙌 | |
| # </div> | |
| # """, unsafe_allow_html=True | |
| # ) | |
| # Back button to redirect to dashboard | |
| # st.markdown("<br>", unsafe_allow_html=True) | |
| # st.markdown("<a href='https://binsight.onrender.com/dashboard.html' target='_self' style='text-decoration:none;'><button style='padding: 10px 20px; font-size: 16px;'>⬅ Back to Dashboard</button></a>", unsafe_allow_html=True) | |
| # Best version without back button | |
| # import streamlit as st | |
| # import requests | |
| # import google.generativeai as genai | |
| # import firebase_admin | |
| # from firebase_admin import credentials, db | |
| # from PIL import Image | |
| # import numpy as np | |
| # import base64 | |
| # from io import BytesIO | |
| # from tensorflow.keras.applications import MobileNetV2 | |
| # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # # Initialize Firebase | |
| # if not firebase_admin._apps: | |
| # cred = credentials.Certificate("firebase_credentials.json") | |
| # firebase_admin.initialize_app(cred, { | |
| # 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
| # }) | |
| # # Configure Gemini 2.0 Flash (gemini-1.5-flash) | |
| # genai.configure(api_key="AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM") # Replace with actual API key | |
| # # Load MobileNetV2 pre-trained model | |
| # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # Function to classify image | |
| # def classify_image(image): | |
| # img = image.resize((224, 224)) | |
| # img_array = np.array(img) | |
| # img_array = np.expand_dims(img_array, axis=0) | |
| # img_array = preprocess_input(img_array) | |
| # predictions = mobilenet_model.predict(img_array) | |
| # labels = decode_predictions(predictions, top=5)[0] | |
| # return {label[1]: round(float(label[2]) * 100, 2) for label in labels} | |
| # # Function to check if image is a dustbin | |
| # def is_dustbin_image(classification_results): | |
| # dustbin_keywords = ["trash", "bin", "garbage", "waste", "dustbin", "ashcan", "recycle", "rubbish"] | |
| # return any(any(keyword in label.lower() for keyword in dustbin_keywords) for label in classification_results.keys()) | |
| # # Function to convert image to Base64 | |
| # def convert_image_to_base64(image): | |
| # buffered = BytesIO() | |
| # image.save(buffered, format="PNG") | |
| # return base64.b64encode(buffered.getvalue()).decode() | |
| # # Function to generate AI recommendations using Gemini 2.0 Flash | |
| # def get_genai_response(classification_results, location): | |
| # try: | |
| # classification_summary = "\n".join([f"- **{label}:** {score}%" for label, score in classification_results.items()]) | |
| # location_summary = f""" | |
| # - **Latitude:** {location[0] if location[0] else 'N/A'} | |
| # - **Longitude:** {location[1] if location[1] else 'N/A'} | |
| # - **Address:** {location[2] if location[2] else 'N/A'} | |
| # """ | |
| # prompt = f""" | |
| # ### You are an environmental expert. Analyze the following waste classification: | |
| # **1. Image Classification Results:** | |
| # {classification_summary} | |
| # **2. Location Details:** | |
| # {location_summary} | |
| # ### Required Analysis: | |
| # - Describe the waste detected in the image. | |
| # - Potential health & environmental risks. | |
| # - Recommended disposal methods & precautions. | |
| # - Eco-friendly alternatives. | |
| # """ | |
| # model = genai.GenerativeModel("gemini-1.5-flash") | |
| # response = model.generate_content(prompt) | |
| # return response.text if response else "⚠️ No response received." | |
| # except Exception as e: | |
| # st.error(f"⚠️ Error using Generative AI: {e}") | |
| # return None | |
| # # Extract query parameters | |
| # query_params = st.experimental_get_query_params() | |
| # latitude = query_params.get("lat", [""])[0] | |
| # longitude = query_params.get("lon", [""])[0] | |
| # address = query_params.get("addr", [""])[0] | |
| # # Display detected location | |
| # st.header("📍 Detected Location") | |
| # st.success(f"**Latitude:** {latitude}") | |
| # st.success(f"**Longitude:** {longitude}") | |
| # st.success(f"**Address:** {address}") | |
| # # Streamlit UI | |
| # st.title("🗑️ BinSight: Upload Dustbin Image") | |
| # uploaded_file = st.file_uploader("📷 Upload a dustbin image", type=["jpg", "jpeg", "png"]) | |
| # if uploaded_file: | |
| # image = Image.open(uploaded_file) | |
| # st.image(image, caption="📸 Uploaded Image", use_column_width=True) | |
| # image_base64 = convert_image_to_base64(image) | |
| # classification_results = classify_image(image) | |
| # st.header("🧪 Classification Results") | |
| # for label, confidence in classification_results.items(): | |
| # st.write(f"✅ **{label}:** {confidence}%") | |
| # if not is_dustbin_image(classification_results): | |
| # st.error("⚠️ Please upload a valid dustbin image. No dustbin detected in the image.") | |
| # st.stop() | |
| # st.header("🧠 AI Analysis & Recommendations") | |
| # gemini_response = get_genai_response(classification_results, (latitude, longitude, address)) | |
| # st.write(gemini_response) | |
| # dustbin_data = { | |
| # "latitude": latitude, | |
| # "longitude": longitude, | |
| # "address": address, | |
| # "classification": classification_results, | |
| # "allocated_truck": None, | |
| # "status": "Pending", | |
| # "image": image_base64 | |
| # } | |
| # db.reference("dustbins").push(dustbin_data) | |
| # st.success("✅ Dustbin data uploaded successfully!") | |
| # # Thank You Message with Styling | |
| # st.markdown( | |
| # """ | |
| # <div style=" | |
| # background-color: #DFF2BF; | |
| # color: #4F8A10; | |
| # padding: 20px; | |
| # border-radius: 10px; | |
| # text-align: center; | |
| # font-size: 18px; | |
| # margin-top: 20px;"> | |
| # 🎉 <b>Thank You for Your Contribution!</b> 🎉<br><br> | |
| # Your effort in uploading this image helps keep our environment clean and green. 🌱♻️<br> | |
| # Together, we make a difference! 🙌 | |
| # </div> | |
| # """, unsafe_allow_html=True | |
| # ) | |
| # BEST version till now | |
| # import streamlit as st | |
| # import requests | |
| # import google.generativeai as genai | |
| # import firebase_admin | |
| # from firebase_admin import credentials, db | |
| # from PIL import Image | |
| # import numpy as np | |
| # import base64 | |
| # from io import BytesIO | |
| # from tensorflow.keras.applications import MobileNetV2 | |
| # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # # Initialize Firebase | |
| # if not firebase_admin._apps: | |
| # cred = credentials.Certificate("firebase_credentials.json") | |
| # firebase_admin.initialize_app(cred, { | |
| # 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
| # }) | |
| # # Configure Gemini 2.0 Flash (gemini-1.5-flash) | |
| # genai.configure(api_key="AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM") # Replace with actual API key | |
| # # Load MobileNetV2 pre-trained model | |
| # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # Function to classify image | |
| # def classify_image(image): | |
| # img = image.resize((224, 224)) | |
| # img_array = np.array(img) | |
| # img_array = np.expand_dims(img_array, axis=0) | |
| # img_array = preprocess_input(img_array) | |
| # predictions = mobilenet_model.predict(img_array) | |
| # labels = decode_predictions(predictions, top=5)[0] | |
| # return {label[1]: round(float(label[2]) * 100, 2) for label in labels} # Convert confidence to percentage | |
| # # Function to check if image is a dustbin | |
| # def is_dustbin_image(classification_results): | |
| # dustbin_keywords = ["trash", "bin", "garbage", "waste", "dustbin", "ashcan", "recycle", "rubbish"] | |
| # for label, confidence in classification_results.items(): | |
| # if any(keyword in label.lower() for keyword in dustbin_keywords): | |
| # return True | |
| # return False | |
| # # Function to convert image to Base64 | |
| # def convert_image_to_base64(image): | |
| # buffered = BytesIO() | |
| # image.save(buffered, format="PNG") | |
| # return base64.b64encode(buffered.getvalue()).decode() | |
| # # Function to generate AI recommendations using Gemini 2.0 Flash | |
| # def get_genai_response(classification_results, location): | |
| # try: | |
| # classification_summary = "\n".join([f"- **{label}:** {score}%" for label, score in classification_results.items()]) | |
| # location_summary = f""" | |
| # - **Latitude:** {location[0] if location[0] else 'N/A'} | |
| # - **Longitude:** {location[1] if location[1] else 'N/A'} | |
| # - **Address:** {location[2] if location[2] else 'N/A'} | |
| # """ | |
| # prompt = f""" | |
| # ### You are an environmental expert. Analyze the following waste classification: | |
| # **1. Image Classification Results:** | |
| # {classification_summary} | |
| # **2. Location Details:** | |
| # {location_summary} | |
| # ### Required Analysis: | |
| # - Describe the waste detected in the image. | |
| # - Potential health & environmental risks. | |
| # - Recommended disposal methods & precautions. | |
| # - Eco-friendly alternatives. | |
| # """ | |
| # model = genai.GenerativeModel("gemini-1.5-flash") # Using Gemini 2.0 Flash | |
| # response = model.generate_content(prompt) | |
| # return response.text if response else "⚠️ No response received." | |
| # except Exception as e: | |
| # st.error(f"⚠️ Error using Generative AI: {e}") | |
| # return None | |
| # # **Fix: Revert to `st.experimental_get_query_params()` for full location display** | |
| # query_params = st.experimental_get_query_params() | |
| # latitude = query_params.get("lat", [""])[0] # Extract full latitude | |
| # longitude = query_params.get("lon", [""])[0] # Extract full longitude | |
| # address = query_params.get("addr", [""])[0] # Extract full address | |
| # # **Ensure full location values are displayed correctly** | |
| # st.header("📍 Detected Location") | |
| # st.success(f"**Latitude:** {latitude}") | |
| # st.success(f"**Longitude:** {longitude}") | |
| # st.success(f"**Address:** {address}") | |
| # # Streamlit App UI | |
| # st.title("🗑️ BinSight: Upload Dustbin Image") | |
| # uploaded_file = st.file_uploader("📷 Upload a dustbin image", type=["jpg", "jpeg", "png"]) | |
| # if uploaded_file: | |
| # image = Image.open(uploaded_file) | |
| # st.image(image, caption="📸 Uploaded Image", use_container_width=True) | |
| # # Convert image to Base64 | |
| # image_base64 = convert_image_to_base64(image) | |
| # # Classify Image | |
| # classification_results = classify_image(image) | |
| # # Display classification results | |
| # st.header("🧪 Classification Results") | |
| # if classification_results: | |
| # for label, confidence in classification_results.items(): | |
| # st.write(f"✅ **{label}:** {confidence}%") | |
| # else: | |
| # st.error("⚠️ No classification results found.") | |
| # st.stop() | |
| # # **NEW CONDITION**: Ensure image is a dustbin before proceeding | |
| # if not is_dustbin_image(classification_results): | |
| # st.error("⚠️ Please upload a valid dustbin image. No dustbin detected in the image.") | |
| # st.stop() | |
| # # **Generate AI insights (Only Display, Not Store)** | |
| # st.header("🧠 AI Analysis & Recommendations") | |
| # gemini_response = get_genai_response(classification_results, (latitude, longitude, address)) | |
| # st.write(gemini_response) | |
| # # Save only location, classification, and image in Firebase | |
| # dustbin_data = { | |
| # "latitude": latitude, | |
| # "longitude": longitude, | |
| # "address": address, | |
| # "classification": classification_results, | |
| # "allocated_truck": None, | |
| # "status": "Pending", | |
| # "image": image_base64 | |
| # } | |
| # db.reference("dustbins").push(dustbin_data) # Save data to Firebase | |
| # st.success("✅ Dustbin data uploaded successfully!") | |
| ## working well for embedded in html page but it is not taking full location | |
| #import streamlit as st | |
| # import requests | |
| # import google.generativeai as genai | |
| # import firebase_admin | |
| # from firebase_admin import credentials, db | |
| # from PIL import Image | |
| # import numpy as np | |
| # import base64 | |
| # from io import BytesIO | |
| # from tensorflow.keras.applications import MobileNetV2 | |
| # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # # Initialize Firebase | |
| # if not firebase_admin._apps: | |
| # cred = credentials.Certificate("firebase_credentials.json") | |
| # firebase_admin.initialize_app(cred, { | |
| # 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
| # }) | |
| # # Configure Gemini AI | |
| # genai.configure(api_key="AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM") # Replace with your actual API key | |
| # # Load MobileNetV2 pre-trained model | |
| # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # Function to classify image | |
| # def classify_image(image): | |
| # img = image.resize((224, 224)) | |
| # img_array = np.array(img) | |
| # img_array = np.expand_dims(img_array, axis=0) | |
| # img_array = preprocess_input(img_array) | |
| # predictions = mobilenet_model.predict(img_array) | |
| # labels = decode_predictions(predictions, top=5)[0] | |
| # return {label[1]: round(float(label[2]) * 100, 2) for label in labels} # Convert confidence to percentage | |
| # # Function to convert image to Base64 | |
| # def convert_image_to_base64(image): | |
| # buffered = BytesIO() | |
| # image.save(buffered, format="PNG") | |
| # return base64.b64encode(buffered.getvalue()).decode() | |
| # # Function to interact with Gemini AI (Only Show Results, Do NOT Save to Firebase) | |
| # def get_genai_response(classification_results, location): | |
| # try: | |
| # classification_summary = "\n".join([f"- **{label}:** {score}%" for label, score in classification_results.items()]) | |
| # location_summary = f""" | |
| # - **Latitude:** {location[0] if location[0] else 'N/A'} | |
| # - **Longitude:** {location[1] if location[1] else 'N/A'} | |
| # - **Address:** {location[2] if location[2] else 'N/A'} | |
| # """ | |
| # prompt = f""" | |
| # ### You are an environmental expert. Analyze the following waste classification: | |
| # **1. Image Classification Results:** | |
| # {classification_summary} | |
| # **2. Location Details:** | |
| # {location_summary} | |
| # ### Required Analysis: | |
| # - Describe the waste detected in the image. | |
| # - Potential health & environmental risks. | |
| # - Recommended disposal methods & precautions. | |
| # - Eco-friendly alternatives. | |
| # """ | |
| # model = genai.GenerativeModel("gemini-1.5-flash") | |
| # response = model.generate_content(prompt) | |
| # return response.text if response else "⚠️ No response received." | |
| # except Exception as e: | |
| # st.error(f"⚠️ Error using Generative AI: {e}") | |
| # return None | |
| # # Extract location data from URL parameters | |
| # query_params = st.query_params | |
| # latitude = str(query_params.get("lat", [None])[0]) # Convert to string | |
| # longitude = str(query_params.get("lon", [None])[0]) # Convert to string | |
| # address = str(query_params.get("addr", [""])[0]) # Convert to full address | |
| # # Display user location | |
| # st.header("📍 Detected Location") | |
| # if latitude and longitude: | |
| # st.success(f"**Latitude:** {latitude}, **Longitude:** {longitude}") | |
| # st.success(f"**Address:** {address}") | |
| # else: | |
| # st.error("⚠️ Location data not received. Enable location detection in the main page.") | |
| # # Streamlit App UI | |
| # st.title("🗑️ BinSight: Upload Dustbin Image") | |
| # uploaded_file = st.file_uploader("📷 Upload a dustbin image", type=["jpg", "jpeg", "png"]) | |
| # if uploaded_file: | |
| # image = Image.open(uploaded_file) | |
| # st.image(image, caption="📸 Uploaded Image", use_container_width=True) | |
| # # Convert image to Base64 | |
| # image_base64 = convert_image_to_base64(image) | |
| # # Classify Image | |
| # classification_results = classify_image(image) | |
| # # Display classification results | |
| # st.header("🧪 Classification Results") | |
| # if classification_results: | |
| # for label, confidence in classification_results.items(): | |
| # st.write(f"✅ **{label}:** {confidence}%") | |
| # else: | |
| # st.error("⚠️ No classification results found.") | |
| # # Get AI insights but DO NOT save to Firebase | |
| # st.header("🧠 AI Analysis & Recommendations") | |
| # gemini_response = get_genai_response(classification_results, (latitude, longitude, address)) | |
| # st.write(gemini_response) | |
| # # Save only location, classification, and image in Firebase | |
| # dustbin_data = { | |
| # "latitude": latitude, | |
| # "longitude": longitude, | |
| # "address": address, | |
| # "classification": classification_results, | |
| # "allocated_truck": None, | |
| # "status": "Pending", | |
| # "image": image_base64 | |
| # } | |
| # db.reference("dustbins").push(dustbin_data) # Save data to Firebase | |
| # st.success("✅ Dustbin data uploaded successfully!") | |
| ## Best but not working location proper | |
| # import streamlit as st | |
| # import requests | |
| # import firebase_admin | |
| # from firebase_admin import credentials, db, auth | |
| # from PIL import Image | |
| # import numpy as np | |
| # from geopy.geocoders import Nominatim | |
| # from tensorflow.keras.applications import MobileNetV2 | |
| # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # import json | |
| # import base64 | |
| # from io import BytesIO | |
| # # Initialize Firebase (Check if already initialized) | |
| # if not firebase_admin._apps: | |
| # cred = credentials.Certificate("firebase_credentials.json") | |
| # firebase_admin.initialize_app(cred, { | |
| # 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
| # }) | |
| # # Load MobileNetV2 pre-trained model | |
| # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # Function to classify the uploaded image using MobileNetV2 | |
| # def classify_image_with_mobilenet(image): | |
| # try: | |
| # img = image.resize((224, 224)) | |
| # img_array = np.array(img) | |
| # img_array = np.expand_dims(img_array, axis=0) | |
| # img_array = preprocess_input(img_array) | |
| # predictions = mobilenet_model.predict(img_array) | |
| # labels = decode_predictions(predictions, top=5)[0] | |
| # return {label[1]: float(label[2]) for label in labels} | |
| # except Exception as e: | |
| # st.error(f"Error during image classification: {e}") | |
| # return {} | |
| # # Function to get user's location | |
| # def get_user_location(): | |
| # st.write("Fetching location, please allow location access in your browser.") | |
| # geolocator = Nominatim(user_agent="binsight") | |
| # try: | |
| # ip_info = requests.get("https://ipinfo.io/json").json() | |
| # loc = ip_info.get("loc", "").split(",") | |
| # latitude, longitude = loc[0], loc[1] if len(loc) == 2 else (None, None) | |
| # if latitude and longitude: | |
| # address = geolocator.reverse(f"{latitude}, {longitude}").address | |
| # return latitude, longitude, address | |
| # except Exception as e: | |
| # st.error(f"Error retrieving location: {e}") | |
| # return None, None, None | |
| # # Function to convert image to Base64 | |
| # def convert_image_to_base64(image): | |
| # buffered = BytesIO() | |
| # image.save(buffered, format="PNG") # Convert to PNG format | |
| # img_str = base64.b64encode(buffered.getvalue()).decode() # Encode as Base64 | |
| # return img_str | |
| # # User Login | |
| # st.sidebar.header("User Login") | |
| # user_email = st.sidebar.text_input("Enter your email") | |
| # login_button = st.sidebar.button("Login") | |
| # if login_button: | |
| # if user_email: | |
| # st.session_state["user_email"] = user_email | |
| # st.sidebar.success(f"Logged in as {user_email}") | |
| # if "user_email" not in st.session_state: | |
| # st.warning("Please log in first.") | |
| # st.stop() | |
| # # Get user location | |
| # latitude, longitude, address = get_user_location() | |
| # if latitude and longitude: | |
| # st.success(f"Location detected: {address}") | |
| # else: | |
| # st.warning("Unable to fetch location, please enable location access.") | |
| # st.stop() | |
| # # Streamlit App | |
| # st.title("BinSight: Upload Dustbin Image") | |
| # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"]) | |
| # submit_button = st.button("Analyze and Upload") | |
| # if submit_button and uploaded_file: | |
| # image = Image.open(uploaded_file) | |
| # st.image(image, caption="Uploaded Image", use_container_width=True) | |
| # # Convert image to Base64 | |
| # image_base64 = convert_image_to_base64(image) | |
| # # Classify Image | |
| # classification_results = classify_image_with_mobilenet(image) | |
| # if classification_results: | |
| # db_ref = db.reference("dustbins") | |
| # dustbin_data = { | |
| # "user_email": st.session_state["user_email"], | |
| # "latitude": latitude, | |
| # "longitude": longitude, | |
| # "address": address, | |
| # "classification": classification_results, | |
| # "allocated_truck": None, | |
| # "status": "Pending", | |
| # "image": image_base64 # Store image as Base64 string | |
| # } | |
| # db_ref.push(dustbin_data) | |
| # st.success("Dustbin data uploaded successfully!") | |
| # st.write(f"**Location:** {address}") | |
| # st.write(f"**Latitude:** {latitude}, **Longitude:** {longitude}") | |
| # else: | |
| # st.error("Missing classification details. Cannot upload.") | |
| # best without image | |
| # import streamlit as st | |
| # import requests | |
| # import firebase_admin | |
| # from firebase_admin import credentials, db, auth | |
| # from PIL import Image | |
| # import numpy as np | |
| # from geopy.geocoders import Nominatim | |
| # from tensorflow.keras.applications import MobileNetV2 | |
| # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # import json | |
| # # Initialize Firebase | |
| # if not firebase_admin._apps: | |
| # cred = credentials.Certificate("firebase_credentials.json") | |
| # firebase_admin.initialize_app(cred, { | |
| # 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
| # }) | |
| # # Load MobileNetV2 pre-trained model | |
| # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # Function to classify the uploaded image using MobileNetV2 | |
| # def classify_image_with_mobilenet(image): | |
| # try: | |
| # img = image.resize((224, 224)) | |
| # img_array = np.array(img) | |
| # img_array = np.expand_dims(img_array, axis=0) | |
| # img_array = preprocess_input(img_array) | |
| # predictions = mobilenet_model.predict(img_array) | |
| # labels = decode_predictions(predictions, top=5)[0] | |
| # return {label[1]: float(label[2]) for label in labels} | |
| # except Exception as e: | |
| # st.error(f"Error during image classification: {e}") | |
| # return {} | |
| # # Function to get user's location using geolocation API | |
| # def get_user_location(): | |
| # st.write("Fetching location, please allow location access in your browser.") | |
| # geolocator = Nominatim(user_agent="binsight") | |
| # try: | |
| # ip_info = requests.get("https://ipinfo.io/json").json() | |
| # loc = ip_info.get("loc", "").split(",") | |
| # latitude, longitude = loc[0], loc[1] if len(loc) == 2 else (None, None) | |
| # if latitude and longitude: | |
| # address = geolocator.reverse(f"{latitude}, {longitude}").address | |
| # return latitude, longitude, address | |
| # except Exception as e: | |
| # st.error(f"Error retrieving location: {e}") | |
| # return None, None, None | |
| # # User Login | |
| # st.sidebar.header("User Login") | |
| # user_email = st.sidebar.text_input("Enter your email") | |
| # login_button = st.sidebar.button("Login") | |
| # if login_button: | |
| # if user_email: | |
| # st.session_state["user_email"] = user_email | |
| # st.sidebar.success(f"Logged in as {user_email}") | |
| # if "user_email" not in st.session_state: | |
| # st.warning("Please log in first.") | |
| # st.stop() | |
| # # Get user location and display details | |
| # latitude, longitude, address = get_user_location() | |
| # if latitude and longitude: | |
| # st.success(f"Location detected: {address}") | |
| # else: | |
| # st.warning("Unable to fetch location, please ensure location access is enabled.") | |
| # st.stop() | |
| # # Streamlit App | |
| # st.title("BinSight: Upload Dustbin Image") | |
| # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"]) | |
| # submit_button = st.button("Analyze and Upload") | |
| # if submit_button and uploaded_file: | |
| # image = Image.open(uploaded_file) | |
| # st.image(image, caption="Uploaded Image", use_container_width=True) | |
| # classification_results = classify_image_with_mobilenet(image) | |
| # if classification_results: | |
| # db_ref = db.reference("dustbins") | |
| # dustbin_data = { | |
| # "user_email": st.session_state["user_email"], | |
| # "latitude": latitude, | |
| # "longitude": longitude, | |
| # "address": address, | |
| # "classification": classification_results, | |
| # "allocated_truck": None, | |
| # "status": "Pending" | |
| # } | |
| # db_ref.push(dustbin_data) | |
| # st.success("Dustbin data uploaded successfully!") | |
| # st.write(f"**Location:** {address}") | |
| # st.write(f"**Latitude:** {latitude}, **Longitude:** {longitude}") | |
| # else: | |
| # st.error("Missing classification details. Cannot upload.") | |
| # best with firebase but below code is not giving correct location of user. | |
| # import streamlit as st | |
| # import requests | |
| # import firebase_admin | |
| # from firebase_admin import credentials, db, auth | |
| # from PIL import Image | |
| # import numpy as np | |
| # from geopy.geocoders import Nominatim | |
| # from tensorflow.keras.applications import MobileNetV2 | |
| # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # # Initialize Firebase | |
| # if not firebase_admin._apps: | |
| # cred = credentials.Certificate("firebase_credentials.json") | |
| # firebase_admin.initialize_app(cred, { | |
| # 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
| # }) | |
| # # Load MobileNetV2 pre-trained model | |
| # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # Function to classify the uploaded image using MobileNetV2 | |
| # def classify_image_with_mobilenet(image): | |
| # try: | |
| # img = image.resize((224, 224)) | |
| # img_array = np.array(img) | |
| # img_array = np.expand_dims(img_array, axis=0) | |
| # img_array = preprocess_input(img_array) | |
| # predictions = mobilenet_model.predict(img_array) | |
| # labels = decode_predictions(predictions, top=5)[0] | |
| # return {label[1]: float(label[2]) for label in labels} | |
| # except Exception as e: | |
| # st.error(f"Error during image classification: {e}") | |
| # return {} | |
| # # Function to get user's location | |
| # def get_user_location(): | |
| # try: | |
| # ip_info = requests.get("https://ipinfo.io/json").json() | |
| # location = ip_info.get("loc", "").split(",") | |
| # latitude = location[0] if len(location) > 0 else None | |
| # longitude = location[1] if len(location) > 1 else None | |
| # if latitude and longitude: | |
| # geolocator = Nominatim(user_agent="binsight") | |
| # address = geolocator.reverse(f"{latitude}, {longitude}").address | |
| # return latitude, longitude, address | |
| # return None, None, None | |
| # except Exception as e: | |
| # st.error(f"Unable to get location: {e}") | |
| # return None, None, None | |
| # # User Login | |
| # st.sidebar.header("User Login") | |
| # user_email = st.sidebar.text_input("Enter your email") | |
| # login_button = st.sidebar.button("Login") | |
| # if login_button: | |
| # if user_email: | |
| # st.session_state["user_email"] = user_email | |
| # st.sidebar.success(f"Logged in as {user_email}") | |
| # if "user_email" not in st.session_state: | |
| # st.warning("Please log in first.") | |
| # st.stop() | |
| # # Streamlit App | |
| # st.title("BinSight: Upload Dustbin Image") | |
| # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"]) | |
| # submit_button = st.button("Analyze and Upload") | |
| # if submit_button and uploaded_file: | |
| # image = Image.open(uploaded_file) | |
| # st.image(image, caption="Uploaded Image", use_container_width=True) | |
| # classification_results = classify_image_with_mobilenet(image) | |
| # latitude, longitude, address = get_user_location() | |
| # if latitude and longitude and classification_results: | |
| # db_ref = db.reference("dustbins") | |
| # dustbin_data = { | |
| # "user_email": st.session_state["user_email"], | |
| # "latitude": latitude, | |
| # "longitude": longitude, | |
| # "address": address, | |
| # "classification": classification_results, | |
| # "allocated_truck": None, | |
| # "status": "Pending" | |
| # } | |
| # db_ref.push(dustbin_data) | |
| # st.success("Dustbin data uploaded successfully!") | |
| # else: | |
| # st.error("Missing classification or location details. Cannot upload.") | |
| # Below is the old version but it is without of firebase and here is the addition of gemini. | |
| # import streamlit as st | |
| # import os | |
| # from PIL import Image | |
| # import numpy as np | |
| # from io import BytesIO | |
| # from dotenv import load_dotenv | |
| # from geopy.geocoders import Nominatim | |
| # from tensorflow.keras.applications import MobileNetV2 | |
| # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # import requests | |
| # import google.generativeai as genai | |
| # # Load environment variables | |
| # load_dotenv() | |
| # # Configure Generative AI | |
| # genai.configure(api_key='AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM') | |
| # # Load MobileNetV2 pre-trained model | |
| # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # Function to classify the uploaded image using MobileNetV2 | |
| # def classify_image_with_mobilenet(image): | |
| # try: | |
| # img = image.resize((224, 224)) | |
| # img_array = np.array(img) | |
| # img_array = np.expand_dims(img_array, axis=0) | |
| # img_array = preprocess_input(img_array) | |
| # predictions = mobilenet_model.predict(img_array) | |
| # labels = decode_predictions(predictions, top=5)[0] | |
| # return {label[1]: float(label[2]) for label in labels} | |
| # except Exception as e: | |
| # st.error(f"Error during image classification: {e}") | |
| # return {} | |
| # # Function to get user's location | |
| # def get_user_location(): | |
| # try: | |
| # ip_info = requests.get("https://ipinfo.io/json").json() | |
| # location = ip_info.get("loc", "").split(",") | |
| # latitude = location[0] if len(location) > 0 else None | |
| # longitude = location[1] if len(location) > 1 else None | |
| # if latitude and longitude: | |
| # geolocator = Nominatim(user_agent="binsight") | |
| # address = geolocator.reverse(f"{latitude}, {longitude}").address | |
| # return latitude, longitude, address | |
| # return None, None, None | |
| # except Exception as e: | |
| # st.error(f"Unable to get location: {e}") | |
| # return None, None, None | |
| # # Function to get nearest municipal details with contact info | |
| # def get_nearest_municipal_details(latitude, longitude): | |
| # try: | |
| # if latitude and longitude: | |
| # # Simulating municipal service retrieval | |
| # municipal_services = [ | |
| # {"latitude": "12.9716", "longitude": "77.5946", "office": "Bangalore Municipal Office", "phone": "+91-80-12345678"}, | |
| # {"latitude": "28.7041", "longitude": "77.1025", "office": "Delhi Municipal Office", "phone": "+91-11-98765432"}, | |
| # {"latitude": "19.0760", "longitude": "72.8777", "office": "Mumbai Municipal Office", "phone": "+91-22-22334455"}, | |
| # ] | |
| # # Find the nearest municipal service (mock logic: matching first two decimal points) | |
| # for service in municipal_services: | |
| # if str(latitude).startswith(service["latitude"][:5]) and str(longitude).startswith(service["longitude"][:5]): | |
| # return f""" | |
| # **Office**: {service['office']} | |
| # **Phone**: {service['phone']} | |
| # """ | |
| # return "No nearby municipal office found. Please check manually." | |
| # else: | |
| # return "Location not available. Unable to fetch municipal details." | |
| # except Exception as e: | |
| # st.error(f"Unable to fetch municipal details: {e}") | |
| # return None | |
| # # Function to interact with Generative AI | |
| # def get_genai_response(classification_results, location): | |
| # try: | |
| # classification_summary = "\n".join([f"{label}: {score:.2f}" for label, score in classification_results.items()]) | |
| # location_summary = f""" | |
| # Latitude: {location[0] if location[0] else 'N/A'} | |
| # Longitude: {location[1] if location[1] else 'N/A'} | |
| # Address: {location[2] if location[2] else 'N/A'} | |
| # """ | |
| # prompt = f""" | |
| # ### You are an environmental expert. Analyze the following: | |
| # 1. **Image Classification**: | |
| # - {classification_summary} | |
| # 2. **Location**: | |
| # - {location_summary} | |
| # ### Output Required: | |
| # 1. Detailed insights about the waste detected in the image. | |
| # 2. Specific health risks associated with the detected waste type. | |
| # 3. Precautions to mitigate these health risks. | |
| # 4. Recommendations for proper disposal. | |
| # """ | |
| # model = genai.GenerativeModel('gemini-pro') | |
| # response = model.generate_content(prompt) | |
| # return response | |
| # except Exception as e: | |
| # st.error(f"Error using Generative AI: {e}") | |
| # return None | |
| # # Function to display Generative AI response | |
| # def display_genai_response(response): | |
| # st.subheader("Detailed Analysis and Recommendations") | |
| # if response and response.candidates: | |
| # response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else "" | |
| # st.write(response_content) | |
| # else: | |
| # st.write("No response received from Generative AI or quota exceeded.") | |
| # # Streamlit App | |
| # st.title("BinSight: AI-Powered Dustbin and Waste Analysis System") | |
| # st.text("Upload a dustbin image and get AI-powered analysis of the waste and associated health recommendations.") | |
| # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"], help="Upload a clear image of a dustbin for analysis.") | |
| # submit_button = st.button("Analyze Dustbin") | |
| # if submit_button: | |
| # if uploaded_file is not None: | |
| # image = Image.open(uploaded_file) | |
| # st.image(image, caption="Uploaded Image", use_container_width =True) | |
| # # Classify the image using MobileNetV2 | |
| # st.subheader("Image Classification") | |
| # classification_results = classify_image_with_mobilenet(image) | |
| # for label, score in classification_results.items(): | |
| # st.write(f"- **{label}**: {score:.2f}") | |
| # # Get user location | |
| # location = get_user_location() | |
| # latitude, longitude, address = location | |
| # st.subheader("User Location") | |
| # st.write(f"Latitude: {latitude if latitude else 'N/A'}") | |
| # st.write(f"Longitude: {longitude if longitude else 'N/A'}") | |
| # st.write(f"Address: {address if address else 'N/A'}") | |
| # # Get nearest municipal details with contact info | |
| # st.subheader("Nearest Municipal Details") | |
| # municipal_details = get_nearest_municipal_details(latitude, longitude) | |
| # st.write(municipal_details) | |
| # # Generate detailed analysis with Generative AI | |
| # if classification_results: | |
| # response = get_genai_response(classification_results, location) | |
| # display_genai_response(response) | |
| # else: | |
| # st.write("Please upload an image for analysis.") | |
| # # import streamlit as st | |
| # # import os | |
| # # from PIL import Image | |
| # # import numpy as np | |
| # # from io import BytesIO | |
| # # from dotenv import load_dotenv | |
| # # from geopy.geocoders import Nominatim | |
| # # from tensorflow.keras.applications import MobileNetV2 | |
| # # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
| # # import requests | |
| # # import google.generativeai as genai | |
| # # # Load environment variables | |
| # # load_dotenv() | |
| # # # Configure Generative AI | |
| # # genai.configure(api_key='AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM') | |
| # # # Load MobileNetV2 pre-trained model | |
| # # mobilenet_model = MobileNetV2(weights="imagenet") | |
| # # # Function to classify the uploaded image using MobileNetV2 | |
| # # def classify_image_with_mobilenet(image): | |
| # # try: | |
| # # # Resize the image to the input size of MobileNetV2 | |
| # # img = image.resize((224, 224)) | |
| # # img_array = np.array(img) | |
| # # img_array = np.expand_dims(img_array, axis=0) | |
| # # img_array = preprocess_input(img_array) | |
| # # # Predict using the MobileNetV2 model | |
| # # predictions = mobilenet_model.predict(img_array) | |
| # # labels = decode_predictions(predictions, top=5)[0] | |
| # # return {label[1]: float(label[2]) for label in labels} | |
| # # except Exception as e: | |
| # # st.error(f"Error during image classification: {e}") | |
| # # return {} | |
| # # # Function to get user's location | |
| # # def get_user_location(): | |
| # # try: | |
| # # # Fetch location using the IPInfo API | |
| # # ip_info = requests.get("https://ipinfo.io/json").json() | |
| # # location = ip_info.get("loc", "").split(",") | |
| # # latitude = location[0] if len(location) > 0 else None | |
| # # longitude = location[1] if len(location) > 1 else None | |
| # # if latitude and longitude: | |
| # # geolocator = Nominatim(user_agent="binsight") | |
| # # address = geolocator.reverse(f"{latitude}, {longitude}").address | |
| # # return latitude, longitude, address | |
| # # return None, None, None | |
| # # except Exception as e: | |
| # # st.error(f"Unable to get location: {e}") | |
| # # return None, None, None | |
| # # # Function to get nearest municipal details | |
| # # def get_nearest_municipal_details(latitude, longitude): | |
| # # try: | |
| # # if latitude and longitude: | |
| # # # Simulating municipal service retrieval | |
| # # return f"The nearest municipal office is at ({latitude}, {longitude}). Please contact your local authority for waste management services." | |
| # # else: | |
| # # return "Location not available. Unable to fetch municipal details." | |
| # # except Exception as e: | |
| # # st.error(f"Unable to fetch municipal details: {e}") | |
| # # return None | |
| # # # Function to interact with Generative AI | |
| # # def get_genai_response(classification_results, location): | |
| # # try: | |
| # # # Construct prompt for Generative AI | |
| # # classification_summary = "\n".join([f"{label}: {score:.2f}" for label, score in classification_results.items()]) | |
| # # location_summary = f""" | |
| # # Latitude: {location[0] if location[0] else 'N/A'} | |
| # # Longitude: {location[1] if location[1] else 'N/A'} | |
| # # Address: {location[2] if location[2] else 'N/A'} | |
| # # """ | |
| # # prompt = f""" | |
| # # ### You are an environmental expert. Analyze the following: | |
| # # 1. **Image Classification**: | |
| # # - {classification_summary} | |
| # # 2. **Location**: | |
| # # - {location_summary} | |
| # # ### Output Required: | |
| # # 1. Detailed insights about the waste detected in the image. | |
| # # 2. Specific health risks associated with the detected waste type. | |
| # # 3. Precautions to mitigate these health risks. | |
| # # 4. Recommendations for proper disposal. | |
| # # """ | |
| # # model = genai.GenerativeModel('gemini-pro') | |
| # # response = model.generate_content(prompt) | |
| # # return response | |
| # # except Exception as e: | |
| # # st.error(f"Error using Generative AI: {e}") | |
| # # return None | |
| # # # Function to display Generative AI response | |
| # # def display_genai_response(response): | |
| # # st.subheader("Detailed Analysis and Recommendations") | |
| # # if response and response.candidates: | |
| # # response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else "" | |
| # # st.write(response_content) | |
| # # else: | |
| # # st.write("No response received from Generative AI or quota exceeded.") | |
| # # # Streamlit App | |
| # # st.title("BinSight: AI-Powered Dustbin and Waste Analysis System") | |
| # # st.text("Upload a dustbin image and get AI-powered analysis of the waste and associated health recommendations.") | |
| # # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"], help="Upload a clear image of a dustbin for analysis.") | |
| # # submit_button = st.button("Analyze Dustbin") | |
| # # if submit_button: | |
| # # if uploaded_file is not None: | |
| # # image = Image.open(uploaded_file) | |
| # # st.image(image, caption="Uploaded Image", use_column_width=True) | |
| # # # Classify the image using MobileNetV2 | |
| # # st.subheader("Image Classification") | |
| # # classification_results = classify_image_with_mobilenet(image) | |
| # # for label, score in classification_results.items(): | |
| # # st.write(f"- **{label}**: {score:.2f}") | |
| # # # Get user location | |
| # # location = get_user_location() | |
| # # latitude, longitude, address = location | |
| # # st.subheader("User Location") | |
| # # st.write(f"Latitude: {latitude if latitude else 'N/A'}") | |
| # # st.write(f"Longitude: {longitude if longitude else 'N/A'}") | |
| # # st.write(f"Address: {address if address else 'N/A'}") | |
| # # # Get nearest municipal details | |
| # # st.subheader("Nearest Municipal Details") | |
| # # municipal_details = get_nearest_municipal_details(latitude, longitude) | |
| # # st.write(municipal_details) | |
| # # # Generate detailed analysis with Generative AI | |
| # # if classification_results: | |
| # # response = get_genai_response(classification_results, location) | |
| # # display_genai_response(response) | |
| # # else: | |
| # # st.write("Please upload an image for analysis.") | |