Spaces:
Sleeping
Sleeping
Upload 11 files
Browse files- .gitattributes +6 -0
- README.md +11 -0
- mobilenet_best_model.keras +3 -0
- mobilenet_best_model1.keras +3 -0
- mobilenet_best_model2.keras +3 -0
- mobilenet_best_model_merged_bee_nobee.keras +3 -0
- mobilenet_best_model_merged_mite_nomite.keras +3 -0
- mobilenet_best_model_merged_queen_noqueen.keras +3 -0
- predict.py +435 -0
- preprocessor_config.json +11 -0
- report.py +317 -0
- requirements.txt +21 -0
.gitattributes
CHANGED
|
@@ -1 +1,7 @@
|
|
| 1 |
logo.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
logo.png filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
mobilenet_best_model_merged_bee_nobee.keras filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
mobilenet_best_model_merged_mite_nomite.keras filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
mobilenet_best_model_merged_queen_noqueen.keras filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
mobilenet_best_model.keras filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
mobilenet_best_model1.keras filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
mobilenet_best_model2.keras filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Bee Notbee
|
| 3 |
+
emoji: 😻
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: red
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
license: mit
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
mobilenet_best_model.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9c06dc6d2209efe5f39c461cd83b2edd58ab6f1e0b5be9c202e1fd4de53bcad
|
| 3 |
+
size 13539546
|
mobilenet_best_model1.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10e757e202532397e6ca5aa0ce5a3b2ee48482f01ffb3adce0e166aaf24b5651
|
| 3 |
+
size 31466423
|
mobilenet_best_model2.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14e389b76c39e707765e69073c83f3eb37f82af06b2ad3b5a4729a682dec75ce
|
| 3 |
+
size 13561171
|
mobilenet_best_model_merged_bee_nobee.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e20a01cd950764c873b888a36428b7b489748daed39af72033669129e270ae49
|
| 3 |
+
size 13561172
|
mobilenet_best_model_merged_mite_nomite.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a59eaa08ce9460c85a405752c043c159e685dd41a603645ea5cadb11463d787a
|
| 3 |
+
size 13561171
|
mobilenet_best_model_merged_queen_noqueen.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cfb49050677429f3adb44197f395e3f033b6a409b469ac44f2fbfb275e3e87f1
|
| 3 |
+
size 13561172
|
predict.py
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import librosa
|
| 4 |
+
import numpy as np
|
| 5 |
+
import tensorflow as tf
|
| 6 |
+
from tensorflow.keras.models import load_model
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from googleapiclient.discovery import build
|
| 10 |
+
from google.oauth2 import service_account
|
| 11 |
+
from googleapiclient.http import MediaIoBaseUpload
|
| 12 |
+
from flask import jsonify
|
| 13 |
+
import logging
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
import subprocess
|
| 16 |
+
import tempfile
|
| 17 |
+
import shutil
|
| 18 |
+
from database import update_hive_health_in_db
|
| 19 |
+
|
| 20 |
+
# Ensure matplotlib runs in non-GUI mode
|
| 21 |
+
import matplotlib
|
| 22 |
+
matplotlib.use("Agg")
|
| 23 |
+
|
| 24 |
+
# Configure logging
|
| 25 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
# Google Drive API Setup
|
| 29 |
+
SCOPES = ['https://www.googleapis.com/auth/drive.file']
|
| 30 |
+
PARENT_FOLDER_ID = os.getenv("GOOGLE_DRIVE_FOLDER_ID")
|
| 31 |
+
|
| 32 |
+
service_account_info = json.loads(os.getenv("G_Drive_Credentials"))
|
| 33 |
+
credentials = service_account.Credentials.from_service_account_info(service_account_info)
|
| 34 |
+
|
| 35 |
+
# Initialize Google Drive API
|
| 36 |
+
drive_service = build("drive", "v3", credentials=credentials)
|
| 37 |
+
|
| 38 |
+
# Load the bee/no bee model
|
| 39 |
+
MODEL_PATH = "./mobilenet_best_model_merged_bee_nobee.keras"
|
| 40 |
+
bee_model = load_model(MODEL_PATH)
|
| 41 |
+
|
| 42 |
+
# Load the queen model (MobileNet)
|
| 43 |
+
QUEEN_MODEL_PATH = "./mobilenet_best_model_merged_queen_noqueen.keras"
|
| 44 |
+
queen_model = load_model(QUEEN_MODEL_PATH)
|
| 45 |
+
|
| 46 |
+
# Load the mite attack model (MobileNet)
|
| 47 |
+
MITE_MODEL_PATH = "./mobilenet_best_model_merged_mite_nomite.keras"
|
| 48 |
+
mite_model = load_model(MITE_MODEL_PATH)
|
| 49 |
+
|
| 50 |
+
def check_ffmpeg():
|
| 51 |
+
"""Check if FFmpeg is available."""
|
| 52 |
+
return shutil.which("ffmpeg") is not None
|
| 53 |
+
|
| 54 |
+
def convert_to_wav(input_path, output_path):
|
| 55 |
+
"""Convert audio file to WAV format using FFmpeg."""
|
| 56 |
+
try:
|
| 57 |
+
if not os.path.exists(input_path):
|
| 58 |
+
raise FileNotFoundError("Input audio file does not exist")
|
| 59 |
+
|
| 60 |
+
if not check_ffmpeg():
|
| 61 |
+
raise RuntimeError("FFmpeg is not installed or not found in PATH")
|
| 62 |
+
|
| 63 |
+
cmd = [
|
| 64 |
+
"ffmpeg",
|
| 65 |
+
"-i", input_path,
|
| 66 |
+
"-acodec", "pcm_s16le",
|
| 67 |
+
"-ar", "44100",
|
| 68 |
+
"-ac", "2",
|
| 69 |
+
"-y",
|
| 70 |
+
output_path
|
| 71 |
+
]
|
| 72 |
+
result = subprocess.run(
|
| 73 |
+
cmd,
|
| 74 |
+
stdout=subprocess.PIPE,
|
| 75 |
+
stderr=subprocess.PIPE,
|
| 76 |
+
text=True
|
| 77 |
+
)
|
| 78 |
+
if result.returncode != 0:
|
| 79 |
+
raise RuntimeError(f"FFmpeg conversion failed: {result.stderr}")
|
| 80 |
+
|
| 81 |
+
logger.info(f"Converted {input_path} to {output_path}")
|
| 82 |
+
return output_path
|
| 83 |
+
|
| 84 |
+
except Exception as e:
|
| 85 |
+
logger.error(f"Error converting audio to WAV: {str(e)}")
|
| 86 |
+
raise
|
| 87 |
+
|
| 88 |
+
def get_or_create_folder(folder_name, parent_id):
|
| 89 |
+
"""Retrieve or create a folder in Google Drive and return its ID."""
|
| 90 |
+
query = f"name='{folder_name}' and mimeType='application/vnd.google-apps.folder' and '{parent_id}' in parents and trashed=false"
|
| 91 |
+
response = drive_service.files().list(q=query, spaces='drive', fields='files(id, name)').execute()
|
| 92 |
+
files = response.get('files', [])
|
| 93 |
+
|
| 94 |
+
if files:
|
| 95 |
+
return files[0].get('id')
|
| 96 |
+
|
| 97 |
+
folder_metadata = {
|
| 98 |
+
'name': folder_name,
|
| 99 |
+
'mimeType': 'application/vnd.google-apps.folder',
|
| 100 |
+
'parents': [parent_id]
|
| 101 |
+
}
|
| 102 |
+
folder = drive_service.files().create(body=folder_metadata, fields='id').execute()
|
| 103 |
+
return folder.get('id')
|
| 104 |
+
|
| 105 |
+
def upload_to_drive(audio_file, result, user_predict=None):
|
| 106 |
+
"""Uploads an audio file to the appropriate Google Drive folder(s) based on prediction result and user_predict."""
|
| 107 |
+
try:
|
| 108 |
+
file_ids = []
|
| 109 |
+
|
| 110 |
+
if result == "not bee":
|
| 111 |
+
folder_name = "not bee"
|
| 112 |
+
folder_id = get_or_create_folder(folder_name, PARENT_FOLDER_ID)
|
| 113 |
+
file_metadata = {
|
| 114 |
+
'name': audio_file.filename,
|
| 115 |
+
'parents': [folder_id]
|
| 116 |
+
}
|
| 117 |
+
media = MediaIoBaseUpload(audio_file, mimetype=audio_file.content_type, resumable=True)
|
| 118 |
+
file = drive_service.files().create(
|
| 119 |
+
body=file_metadata,
|
| 120 |
+
media_body=media,
|
| 121 |
+
fields='id'
|
| 122 |
+
).execute()
|
| 123 |
+
file_ids.append(file.get('id'))
|
| 124 |
+
logger.info(f"Uploaded {audio_file.filename} to Google Drive folder '{folder_name}' with file ID: {file.get('id')}")
|
| 125 |
+
|
| 126 |
+
else:
|
| 127 |
+
bee_folder_id = get_or_create_folder("bee", PARENT_FOLDER_ID)
|
| 128 |
+
audio_file.seek(0)
|
| 129 |
+
file_metadata = {
|
| 130 |
+
'name': audio_file.filename,
|
| 131 |
+
'parents': [bee_folder_id]
|
| 132 |
+
}
|
| 133 |
+
media = MediaIoBaseUpload(audio_file, mimetype=audio_file.content_type, resumable=True)
|
| 134 |
+
file = drive_service.files().create(
|
| 135 |
+
body=file_metadata,
|
| 136 |
+
media_body=media,
|
| 137 |
+
fields='id'
|
| 138 |
+
).execute()
|
| 139 |
+
file_ids.append(file.get('id'))
|
| 140 |
+
logger.info(f"Uploaded {audio_file.filename} to Google Drive folder 'bee' with file ID: {file.get('id')}")
|
| 141 |
+
|
| 142 |
+
if user_predict:
|
| 143 |
+
user_predict = user_predict.strip().lower()
|
| 144 |
+
valid_folders = {'healthy', 'no queen', 'mite attack', 'chalkbrood'}
|
| 145 |
+
if user_predict in valid_folders:
|
| 146 |
+
folder_id = get_or_create_folder(user_predict, PARENT_FOLDER_ID)
|
| 147 |
+
audio_file.seek(0)
|
| 148 |
+
file_metadata = {
|
| 149 |
+
'name': audio_file.filename,
|
| 150 |
+
'parents': [folder_id]
|
| 151 |
+
}
|
| 152 |
+
media = MediaIoBaseUpload(audio_file, mimetype=audio_file.content_type, resumable=True)
|
| 153 |
+
file = drive_service.files().create(
|
| 154 |
+
body=file_metadata,
|
| 155 |
+
media_body=media,
|
| 156 |
+
fields='id'
|
| 157 |
+
).execute()
|
| 158 |
+
file_ids.append(file.get('id'))
|
| 159 |
+
logger.info(f"Uploaded {audio_file.filename} to Google Drive folder '{user_predict}' with file ID: {file.get('id')}")
|
| 160 |
+
else:
|
| 161 |
+
logger.warning(f"Ignoring invalid user_predict value: {user_predict}")
|
| 162 |
+
|
| 163 |
+
return file_ids[0]
|
| 164 |
+
|
| 165 |
+
except Exception as e:
|
| 166 |
+
logger.error(f"Error uploading to Google Drive: {str(e)}")
|
| 167 |
+
raise
|
| 168 |
+
|
| 169 |
+
def create_mel_spectrogram(audio_segment, sr):
|
| 170 |
+
"""Creates a mel spectrogram from an audio segment."""
|
| 171 |
+
try:
|
| 172 |
+
spectrogram = librosa.feature.melspectrogram(y=audio_segment, sr=sr, n_mels=128)
|
| 173 |
+
spectrogram_db = librosa.power_to_db(spectrogram, ref=np.max)
|
| 174 |
+
|
| 175 |
+
plt.figure(figsize=(2, 2), dpi=100)
|
| 176 |
+
plt.axis('off')
|
| 177 |
+
plt.imshow(spectrogram_db, aspect='auto', cmap='magma', origin='lower')
|
| 178 |
+
plt.tight_layout(pad=0)
|
| 179 |
+
|
| 180 |
+
temp_image_path = "/tmp/temp_spectrogram.png"
|
| 181 |
+
plt.savefig(temp_image_path, bbox_inches='tight', pad_inches=0)
|
| 182 |
+
plt.close()
|
| 183 |
+
|
| 184 |
+
img = Image.open(temp_image_path).convert('RGB')
|
| 185 |
+
img = img.resize((224, 224))
|
| 186 |
+
img_array = np.array(img) / 255.0
|
| 187 |
+
|
| 188 |
+
os.remove(temp_image_path)
|
| 189 |
+
return img_array
|
| 190 |
+
|
| 191 |
+
except Exception as e:
|
| 192 |
+
logger.error(f"Error creating spectrogram: {e}")
|
| 193 |
+
return None
|
| 194 |
+
|
| 195 |
+
def predict_queen_audio(file_path, model):
|
| 196 |
+
"""Processes audio for queen detection using 10-second segments."""
|
| 197 |
+
try:
|
| 198 |
+
y, sr = librosa.load(file_path, sr=None)
|
| 199 |
+
duration = librosa.get_duration(y=y, sr=sr)
|
| 200 |
+
if duration <= 10:
|
| 201 |
+
return {"error": "Audio file must be longer than 10 seconds"}
|
| 202 |
+
|
| 203 |
+
healthy_count = 0
|
| 204 |
+
total_segments = 0
|
| 205 |
+
segment_start = 0
|
| 206 |
+
|
| 207 |
+
while segment_start < duration:
|
| 208 |
+
segment_end = min(segment_start + 10, duration)
|
| 209 |
+
if segment_end - segment_start < 10 and segment_start > 0:
|
| 210 |
+
segment_start = max(0, duration - 10)
|
| 211 |
+
segment_end = duration
|
| 212 |
+
|
| 213 |
+
audio_segment = y[int(segment_start * sr):int(segment_end * sr)]
|
| 214 |
+
spectrogram = create_mel_spectrogram(audio_segment, sr)
|
| 215 |
+
|
| 216 |
+
if spectrogram is not None:
|
| 217 |
+
spectrogram = np.expand_dims(spectrogram, axis=0)
|
| 218 |
+
prediction = model.predict(spectrogram)
|
| 219 |
+
probability = prediction[0][0] # Assuming binary classification (0: no queen, 1: healthy)
|
| 220 |
+
if probability >= 0.8: # Aligned with predict_audio for consistency
|
| 221 |
+
healthy_count += 1
|
| 222 |
+
total_segments += 1
|
| 223 |
+
|
| 224 |
+
segment_start += 10
|
| 225 |
+
|
| 226 |
+
if total_segments > 0:
|
| 227 |
+
healthy_percentage = (healthy_count / total_segments) * 100
|
| 228 |
+
result = "healthy" if healthy_percentage >= 70 else "no queen"
|
| 229 |
+
return result
|
| 230 |
+
else:
|
| 231 |
+
return {"error": "No valid segments processed"}
|
| 232 |
+
|
| 233 |
+
except Exception as e:
|
| 234 |
+
logger.error(f"Error in queen prediction: {e}")
|
| 235 |
+
return {"error": str(e)}
|
| 236 |
+
|
| 237 |
+
def predict_mite_audio(file_path, model):
|
| 238 |
+
"""Processes audio for mite attack detection using 10-second segments."""
|
| 239 |
+
try:
|
| 240 |
+
y, sr = librosa.load(file_path, sr=None)
|
| 241 |
+
duration = librosa.get_duration(y=y, sr=sr)
|
| 242 |
+
if duration <= 10:
|
| 243 |
+
return {"error": "Audio file must be longer than 10 seconds"}
|
| 244 |
+
|
| 245 |
+
healthy_count = 0
|
| 246 |
+
total_segments = 0
|
| 247 |
+
segment_start = 0
|
| 248 |
+
|
| 249 |
+
while segment_start < duration:
|
| 250 |
+
segment_end = min(segment_start + 10, duration)
|
| 251 |
+
if segment_end - segment_start < 10 and segment_start > 0:
|
| 252 |
+
segment_start = max(0, duration - 10)
|
| 253 |
+
segment_end = duration
|
| 254 |
+
|
| 255 |
+
audio_segment = y[int(segment_start * sr):int(segment_end * sr)]
|
| 256 |
+
spectrogram = create_mel_spectrogram(audio_segment, sr)
|
| 257 |
+
|
| 258 |
+
if spectrogram is not None:
|
| 259 |
+
spectrogram = np.expand_dims(spectrogram, axis=0)
|
| 260 |
+
prediction = model.predict(spectrogram)
|
| 261 |
+
probability = prediction[0][0] # Assuming binary classification (0: mite attack, 1: healthy)
|
| 262 |
+
if probability >= 0.8: # Consistent threshold
|
| 263 |
+
healthy_count += 1
|
| 264 |
+
total_segments += 1
|
| 265 |
+
|
| 266 |
+
segment_start += 10
|
| 267 |
+
|
| 268 |
+
if total_segments > 0:
|
| 269 |
+
healthy_percentage = (healthy_count / total_segments) * 100
|
| 270 |
+
result = "healthy" if healthy_percentage >= 70 else "mite attack"
|
| 271 |
+
return result
|
| 272 |
+
else:
|
| 273 |
+
return {"error": "No valid segments processed"}
|
| 274 |
+
|
| 275 |
+
except Exception as e:
|
| 276 |
+
logger.error(f"Error in mite attack prediction: {e}")
|
| 277 |
+
return {"error": str(e)}
|
| 278 |
+
|
| 279 |
+
def predict_audio(audio_path, request_id):
|
| 280 |
+
"""Predicts whether an audio file contains bee sounds."""
|
| 281 |
+
try:
|
| 282 |
+
y, sr = librosa.load(audio_path, sr=None)
|
| 283 |
+
if y is None or sr is None:
|
| 284 |
+
return {"error": "Failed to load audio"}
|
| 285 |
+
|
| 286 |
+
duration = librosa.get_duration(y=y, sr=sr)
|
| 287 |
+
if duration <= 10:
|
| 288 |
+
return {"error": "Audio file must be longer than 10 seconds"}
|
| 289 |
+
|
| 290 |
+
bee_count = 0
|
| 291 |
+
total_segments = 0
|
| 292 |
+
segment_start = 0
|
| 293 |
+
|
| 294 |
+
while segment_start < duration:
|
| 295 |
+
segment_end = min(segment_start + 10, duration)
|
| 296 |
+
if segment_end - segment_start < 10 and segment_start > 0:
|
| 297 |
+
segment_start = max(0, duration - 10)
|
| 298 |
+
segment_end = duration
|
| 299 |
+
|
| 300 |
+
audio_segment = y[int(segment_start * sr):int(segment_end * sr)]
|
| 301 |
+
spectrogram = create_mel_spectrogram(audio_segment, sr)
|
| 302 |
+
|
| 303 |
+
if spectrogram is not None:
|
| 304 |
+
spectrogram = np.expand_dims(spectrogram, axis=0)
|
| 305 |
+
prediction = bee_model.predict(spectrogram)
|
| 306 |
+
probability = prediction[0][0]
|
| 307 |
+
|
| 308 |
+
if probability <= 0.2:
|
| 309 |
+
bee_count += 1
|
| 310 |
+
|
| 311 |
+
total_segments += 1
|
| 312 |
+
|
| 313 |
+
segment_start += 10
|
| 314 |
+
|
| 315 |
+
if total_segments > 0:
|
| 316 |
+
bee_percentage = (bee_count / total_segments) * 100
|
| 317 |
+
result = "bee" if bee_percentage >= 70 else "not bee"
|
| 318 |
+
logger.info(f"Request {request_id} - Prediction result: {result}")
|
| 319 |
+
return {"result": result}
|
| 320 |
+
else:
|
| 321 |
+
return {"result": "try again"}
|
| 322 |
+
|
| 323 |
+
except Exception as e:
|
| 324 |
+
logger.error(f"Request {request_id} - Error during bee prediction: {e}")
|
| 325 |
+
return {"error": str(e)}
|
| 326 |
+
|
| 327 |
+
def handle_predict(request, save_prediction):
|
| 328 |
+
"""Handles the prediction logic for the /predict route."""
|
| 329 |
+
request_id = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
| 330 |
+
logger.info(f"Request {request_id} - Starting prediction")
|
| 331 |
+
|
| 332 |
+
if 'audio' not in request.files or 'user_id' not in request.form:
|
| 333 |
+
logger.error(f"Request {request_id} - Missing data")
|
| 334 |
+
return jsonify({"error": "Missing data"}), 400
|
| 335 |
+
|
| 336 |
+
user_id = request.form['user_id']
|
| 337 |
+
audio_file = request.files['audio']
|
| 338 |
+
hive_id = request.form.get('hive_id')
|
| 339 |
+
user_predict = request.form.get('user_predict')
|
| 340 |
+
|
| 341 |
+
if audio_file.filename == '':
|
| 342 |
+
logger.error(f"Request {request_id} - No file selected")
|
| 343 |
+
return jsonify({"error": "No file selected"}), 400
|
| 344 |
+
|
| 345 |
+
if not check_ffmpeg():
|
| 346 |
+
logger.error(f"Request {request_id} - FFmpeg not found")
|
| 347 |
+
return jsonify({"error": "Server error: FFmpeg is not available"}), 500
|
| 348 |
+
|
| 349 |
+
temp_dir = tempfile.gettempdir()
|
| 350 |
+
original_filename = audio_file.filename
|
| 351 |
+
original_ext = os.path.splitext(original_filename)[1].lower()
|
| 352 |
+
temp_original_path = os.path.join(temp_dir, f"temp_audio_{request_id}{original_ext}")
|
| 353 |
+
audio_file.save(temp_original_path)
|
| 354 |
+
|
| 355 |
+
temp_wav_path = os.path.join(temp_dir, f"temp_audio_{request_id}.wav")
|
| 356 |
+
if original_ext not in ['.wav', '.mp3']:
|
| 357 |
+
try:
|
| 358 |
+
convert_to_wav(temp_original_path, temp_wav_path)
|
| 359 |
+
except Exception as e:
|
| 360 |
+
os.remove(temp_original_path)
|
| 361 |
+
logger.error(f"Request {request_id} - Audio conversion failed: {str(e)}")
|
| 362 |
+
return jsonify({"error": f"Failed to process audio file: {str(e)}"}), 400
|
| 363 |
+
else:
|
| 364 |
+
temp_wav_path = temp_original_path
|
| 365 |
+
logger.info(f"Request {request_id} - Using original file (no conversion needed): {temp_wav_path}")
|
| 366 |
+
|
| 367 |
+
prediction_result = predict_audio(temp_wav_path, request_id)
|
| 368 |
+
|
| 369 |
+
if "error" in prediction_result:
|
| 370 |
+
os.remove(temp_original_path)
|
| 371 |
+
if temp_wav_path != temp_original_path and os.path.exists(temp_wav_path):
|
| 372 |
+
os.remove(temp_wav_path)
|
| 373 |
+
logger.error(f"Request {request_id} - Prediction failed: {prediction_result['error']}")
|
| 374 |
+
return jsonify({"result": "try again"}), 400
|
| 375 |
+
|
| 376 |
+
result = prediction_result["result"].lower()
|
| 377 |
+
|
| 378 |
+
if result == "try again":
|
| 379 |
+
os.remove(temp_original_path)
|
| 380 |
+
if temp_wav_path != temp_original_path and os.path.exists(temp_wav_path):
|
| 381 |
+
os.remove(temp_wav_path)
|
| 382 |
+
logger.info(f"Request {request_id} - Result: try again")
|
| 383 |
+
return jsonify({"result": "try again"})
|
| 384 |
+
|
| 385 |
+
if result == "bee":
|
| 386 |
+
queen_result = predict_queen_audio(temp_wav_path, queen_model)
|
| 387 |
+
mite_result = predict_mite_audio(temp_wav_path, mite_model)
|
| 388 |
+
|
| 389 |
+
if "error" in queen_result or "error" in mite_result:
|
| 390 |
+
os.remove(temp_original_path)
|
| 391 |
+
if temp_wav_path != temp_original_path and os.path.exists(temp_wav_path):
|
| 392 |
+
os.remove(temp_wav_path)
|
| 393 |
+
logger.error(f"Request {request_id} - Queen prediction failed: {queen_result.get('error', 'Unknown')}, Mite prediction failed: {mite_result.get('error', 'Unknown')}")
|
| 394 |
+
return jsonify({"result": "try again"}), 400
|
| 395 |
+
|
| 396 |
+
logger.info(f"Request {request_id} - Queen prediction result: {queen_result}, Mite prediction result: {mite_result}")
|
| 397 |
+
|
| 398 |
+
# Combine results according to specified logic
|
| 399 |
+
if queen_result == "healthy" and mite_result == "healthy":
|
| 400 |
+
result = "healthy"
|
| 401 |
+
elif queen_result == "no queen" and mite_result == "healthy":
|
| 402 |
+
result = "no queen"
|
| 403 |
+
elif queen_result == "healthy" and mite_result == "mite attack":
|
| 404 |
+
result = "mite attack"
|
| 405 |
+
elif queen_result == "no queen" and mite_result == "mite attack":
|
| 406 |
+
result = "no queen,mite attack"
|
| 407 |
+
else:
|
| 408 |
+
result = "try again" # Fallback for unexpected cases
|
| 409 |
+
logger.warning(f"Request {request_id} - Unexpected combination: queen={queen_result}, mite={mite_result}")
|
| 410 |
+
|
| 411 |
+
if user_predict and hive_id:
|
| 412 |
+
try:
|
| 413 |
+
user_predict = user_predict.strip().lower()
|
| 414 |
+
update_hive_health_in_db(hive_id, user_predict)
|
| 415 |
+
logger.info(f"Request {request_id} - Updated hive {hive_id} health_status to {user_predict}")
|
| 416 |
+
except Exception as e:
|
| 417 |
+
logger.error(f"Request {request_id} - Failed to update hive health status: {str(e)}")
|
| 418 |
+
return jsonify({"error": f"Failed to update hive health status: {str(e)}"}), 400
|
| 419 |
+
|
| 420 |
+
file_id = None
|
| 421 |
+
try:
|
| 422 |
+
audio_file.seek(0)
|
| 423 |
+
file_id = upload_to_drive(audio_file, result, user_predict)
|
| 424 |
+
except Exception as e:
|
| 425 |
+
logger.error(f"Request {request_id} - Failed to upload to Google Drive: {str(e)}")
|
| 426 |
+
# Continue with saving prediction and returning result with file_id=None
|
| 427 |
+
|
| 428 |
+
save_prediction(user_id, audio_file.filename, result, file_id, hive_id, user_predict)
|
| 429 |
+
|
| 430 |
+
os.remove(temp_original_path)
|
| 431 |
+
if temp_wav_path != temp_original_path and os.path.exists(temp_wav_path):
|
| 432 |
+
os.remove(temp_wav_path)
|
| 433 |
+
|
| 434 |
+
logger.info(f"Request {request_id} - Final result: {result}, file_id: {file_id}")
|
| 435 |
+
return jsonify({"result": result, "file_id": file_id})
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": 224,
|
| 3 |
+
"do_center_crop": true,
|
| 4 |
+
"do_normalize": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"feature_extractor_type": "ViTImageProcessor",
|
| 7 |
+
"image_mean": [0.5, 0.5, 0.5],
|
| 8 |
+
"image_std": [0.5, 0.5, 0.5],
|
| 9 |
+
"resample": 3,
|
| 10 |
+
"size": 224
|
| 11 |
+
}
|
report.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from reportlab.lib.pagesizes import letter
|
| 2 |
+
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, Image
|
| 3 |
+
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
| 4 |
+
from reportlab.lib import colors
|
| 5 |
+
from reportlab.lib.units import inch
|
| 6 |
+
from database import get_farm_details_from_db, get_hives_from_db, get_hive_detail_from_db, get_history, get_user_profile
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
import io
|
| 9 |
+
import os
|
| 10 |
+
import matplotlib
|
| 11 |
+
matplotlib.use('Agg') # Non-GUI backend for server environments
|
| 12 |
+
import matplotlib.pyplot as plt
|
| 13 |
+
import tempfile
|
| 14 |
+
import uuid
|
| 15 |
+
import logging
|
| 16 |
+
|
| 17 |
+
# Configure logging
|
| 18 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
def create_table(data, col_widths=None):
|
| 22 |
+
"""Helper function to create a styled table."""
|
| 23 |
+
table = Table(data, colWidths=col_widths)
|
| 24 |
+
table.setStyle(TableStyle([
|
| 25 |
+
('GRID', (0, 0), (-1, -1), 1, colors.black),
|
| 26 |
+
('FONT', (0, 0), (-1, -1), 'Helvetica', 10),
|
| 27 |
+
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
|
| 28 |
+
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
|
| 29 |
+
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
|
| 30 |
+
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
|
| 31 |
+
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
|
| 32 |
+
('LEFTPADDING', (0, 0), (-1, -1), 6),
|
| 33 |
+
('RIGHTPADDING', (0, 0), (-1, -1), 6),
|
| 34 |
+
('TOPPADDING', (0, 0), (-1, -1), 6),
|
| 35 |
+
('BOTTOMPADDING', (0, 0), (-1, -1), 6),
|
| 36 |
+
]))
|
| 37 |
+
return table
|
| 38 |
+
|
| 39 |
+
def create_pie_chart(data_counts, output_path, title):
|
| 40 |
+
"""Generate a pie chart for given data distribution."""
|
| 41 |
+
try:
|
| 42 |
+
if not data_counts:
|
| 43 |
+
logger.warning(f"No data for {title} pie chart, skipping generation")
|
| 44 |
+
return False
|
| 45 |
+
|
| 46 |
+
labels = list(data_counts.keys())
|
| 47 |
+
sizes = list(data_counts.values())
|
| 48 |
+
colors_list = ['#ff9999', '#66b3ff', '#99ff99', '#ffcc99', '#ffb3e6', '#c2c2f0']
|
| 49 |
+
|
| 50 |
+
plt.figure(figsize=(4, 4))
|
| 51 |
+
plt.pie(sizes, labels=labels, colors=colors_list[:len(labels)], autopct='%1.1f%%', startangle=90)
|
| 52 |
+
plt.axis('equal') # Ensure circular shape
|
| 53 |
+
plt.title(title)
|
| 54 |
+
plt.savefig(output_path, bbox_inches='tight', dpi=150, format='png')
|
| 55 |
+
plt.close()
|
| 56 |
+
|
| 57 |
+
if os.path.exists(output_path):
|
| 58 |
+
return True
|
| 59 |
+
else:
|
| 60 |
+
logger.error(f"Pie chart file not found at {output_path} after saving")
|
| 61 |
+
return False
|
| 62 |
+
except Exception as e:
|
| 63 |
+
logger.error(f"Error generating {title} pie chart: {str(e)}")
|
| 64 |
+
return False
|
| 65 |
+
|
| 66 |
+
def create_bar_chart(history, output_path):
|
| 67 |
+
"""Generate a bar chart for prediction results."""
|
| 68 |
+
try:
|
| 69 |
+
result_counts = {'healthy': 0, 'no queen': 0, 'not bee': 0}
|
| 70 |
+
for entry in history:
|
| 71 |
+
result = entry['result'].lower()
|
| 72 |
+
if result in result_counts:
|
| 73 |
+
result_counts[result] += 1
|
| 74 |
+
|
| 75 |
+
labels = list(result_counts.keys())
|
| 76 |
+
counts = list(result_counts.values())
|
| 77 |
+
|
| 78 |
+
plt.figure(figsize=(6, 4))
|
| 79 |
+
plt.bar(labels, counts, color=['#66b3ff', '#ff9999', '#99ff99'])
|
| 80 |
+
plt.xlabel('Prediction Result')
|
| 81 |
+
plt.ylabel('Count')
|
| 82 |
+
plt.title('Prediction Result Distribution')
|
| 83 |
+
for i, v in enumerate(counts):
|
| 84 |
+
plt.text(i, v + 0.1, str(v), ha='center')
|
| 85 |
+
plt.savefig(output_path, bbox_inches='tight', dpi=150, format='png')
|
| 86 |
+
plt.close()
|
| 87 |
+
|
| 88 |
+
if os.path.exists(output_path):
|
| 89 |
+
return True
|
| 90 |
+
else:
|
| 91 |
+
logger.error(f"Bar chart file not found at {output_path} after saving")
|
| 92 |
+
return False
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error(f"Error generating bar chart: {str(e)}")
|
| 95 |
+
return False
|
| 96 |
+
|
| 97 |
+
def add_footer(canvas, doc):
|
| 98 |
+
"""Add a footer with page number to each page."""
|
| 99 |
+
canvas.saveState()
|
| 100 |
+
canvas.setFont('Helvetica', 9)
|
| 101 |
+
page_number = f"Page {doc.page}"
|
| 102 |
+
canvas.drawCentredString(letter[0] / 2, 0.5 * inch, page_number)
|
| 103 |
+
canvas.restoreState()
|
| 104 |
+
|
| 105 |
+
def generate_report(user_id):
|
| 106 |
+
"""
|
| 107 |
+
Generates a PDF report for a user's bee hives with visualizations and returns a BytesIO buffer.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
user_id (str): The ID of the user for whom the report is generated.
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
io.BytesIO: A buffer containing the generated PDF report.
|
| 114 |
+
|
| 115 |
+
Raises:
|
| 116 |
+
Exception: If farm details are not found or an error occurs during report generation.
|
| 117 |
+
"""
|
| 118 |
+
logger.info(f"Generating report for user_id: {user_id}")
|
| 119 |
+
|
| 120 |
+
# Create a unique temporary directory for this report
|
| 121 |
+
temp_dir = os.path.join(tempfile.gettempdir(), f"report_{uuid.uuid4()}")
|
| 122 |
+
os.makedirs(temp_dir, exist_ok=True)
|
| 123 |
+
|
| 124 |
+
try:
|
| 125 |
+
# Get user details
|
| 126 |
+
user = get_user_profile(user_id)
|
| 127 |
+
if 'error' in user:
|
| 128 |
+
logger.error(f"User not found for user_id: {user_id}")
|
| 129 |
+
user_details = ["User details not found."]
|
| 130 |
+
else:
|
| 131 |
+
user_details = [
|
| 132 |
+
f"Name: {user.get('fullname', 'N/A')}",
|
| 133 |
+
f"Email: {user.get('email', 'N/A')}",
|
| 134 |
+
f"Location: {user.get('city', 'N/A')}, {user.get('country', 'N/A')}",
|
| 135 |
+
f"Gender: {user.get('gender', 'N/A')}",
|
| 136 |
+
f"Phone: {user.get('phone_number', 'N/A')}"
|
| 137 |
+
]
|
| 138 |
+
|
| 139 |
+
# Get farm details
|
| 140 |
+
farm = get_farm_details_from_db(user_id)
|
| 141 |
+
if not farm:
|
| 142 |
+
logger.error(f"No farm details found for user_id: {user_id}")
|
| 143 |
+
raise Exception("Farm details not found")
|
| 144 |
+
|
| 145 |
+
# Get hives
|
| 146 |
+
hives = get_hives_from_db(farm['farm_id'])
|
| 147 |
+
hive_details = []
|
| 148 |
+
health_status_counts = {}
|
| 149 |
+
bee_type_counts = {}
|
| 150 |
+
|
| 151 |
+
for hive in hives:
|
| 152 |
+
hive_detail = get_hive_detail_from_db(hive['hive_id'])
|
| 153 |
+
if 'error' not in hive_detail:
|
| 154 |
+
hive_details.append(hive_detail)
|
| 155 |
+
health_status = hive_detail.get('health_status', 'Unknown')
|
| 156 |
+
health_status_counts[health_status] = health_status_counts.get(health_status, 0) + 1
|
| 157 |
+
bee_type = hive_detail.get('bee_type', 'Unknown')
|
| 158 |
+
bee_type_counts[bee_type] = bee_type_counts.get(bee_type, 0) + 1
|
| 159 |
+
|
| 160 |
+
# Get prediction history
|
| 161 |
+
history = get_history(user_id)
|
| 162 |
+
|
| 163 |
+
# Generate recommendations
|
| 164 |
+
recommendations = []
|
| 165 |
+
if health_status_counts.get('Unhealthy', 0) > 0:
|
| 166 |
+
recommendations.append("Inspect hives with 'Unhealthy' status immediately and consult a beekeeping expert.")
|
| 167 |
+
if health_status_counts.get('Unknown', 0) > 0:
|
| 168 |
+
recommendations.append("Update health status for hives marked as 'Unknown' to ensure accurate monitoring.")
|
| 169 |
+
no_queen_count = sum(1 for entry in history if entry['result'].lower() == 'no queen')
|
| 170 |
+
if no_queen_count > len(history) * 0.3: # More than 30% no queen results
|
| 171 |
+
recommendations.append("Multiple hives lack a queen. Consider introducing new queens or requeening.")
|
| 172 |
+
if len(history) > 0 and len([entry for entry in history if entry['result'].lower() == 'not bee']) > len(history) * 0.5:
|
| 173 |
+
recommendations.append("High number of 'not bee' predictions. Verify audio recordings and hive activity.")
|
| 174 |
+
|
| 175 |
+
# Create PDF
|
| 176 |
+
buffer = io.BytesIO()
|
| 177 |
+
doc = SimpleDocTemplate(buffer, pagesize=letter, topMargin=0.5*inch, bottomMargin=0.5*inch)
|
| 178 |
+
styles = getSampleStyleSheet()
|
| 179 |
+
|
| 180 |
+
# Custom styles
|
| 181 |
+
styles.add(ParagraphStyle(name='CenteredTitle', parent=styles['Title'], alignment=1))
|
| 182 |
+
styles.add(ParagraphStyle(name='BoldNormal', parent=styles['Normal'], fontName='Helvetica-Bold'))
|
| 183 |
+
|
| 184 |
+
elements = []
|
| 185 |
+
|
| 186 |
+
# Title
|
| 187 |
+
elements.append(Paragraph("Bee Hive Monitoring Report", styles['CenteredTitle']))
|
| 188 |
+
elements.append(Paragraph(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", styles['Normal']))
|
| 189 |
+
elements.append(Spacer(1, 12))
|
| 190 |
+
|
| 191 |
+
# User Details
|
| 192 |
+
elements.append(Paragraph("User Details", styles['Heading2']))
|
| 193 |
+
for detail in user_details:
|
| 194 |
+
elements.append(Paragraph(detail, styles['Normal']))
|
| 195 |
+
elements.append(Spacer(1, 12))
|
| 196 |
+
|
| 197 |
+
# Farm Details
|
| 198 |
+
try:
|
| 199 |
+
farm_data = [
|
| 200 |
+
["Farm ID", farm.get('farm_id', 'N/A')],
|
| 201 |
+
["Name", farm.get('fullname', 'N/A')],
|
| 202 |
+
["Location", f"{farm.get('city', 'N/A')}, {farm.get('country', 'N/A')} {farm.get('zip', 'N/A')}"]
|
| 203 |
+
]
|
| 204 |
+
farm_table = create_table(farm_data, col_widths=[2*inch, 4*inch])
|
| 205 |
+
elements.append(farm_table)
|
| 206 |
+
except Exception as e:
|
| 207 |
+
logger.error(f"Error creating farm details table: {e}")
|
| 208 |
+
elements.append(Paragraph("Error: Unable to display farm details.", styles['Normal']))
|
| 209 |
+
elements.append(Spacer(1, 12))
|
| 210 |
+
|
| 211 |
+
# Hive Summary
|
| 212 |
+
elements.append(Paragraph("Hive Summary", styles['Heading2']))
|
| 213 |
+
hive_data = [["Hive #", "Bee Type", "Frames", "Health", "Created"]]
|
| 214 |
+
for hive in hive_details:
|
| 215 |
+
hive_data.append([
|
| 216 |
+
hive.get('hive_number', 'N/A'),
|
| 217 |
+
hive.get('bee_type', 'N/A'),
|
| 218 |
+
hive.get('number_of_frames', 'N/A'),
|
| 219 |
+
hive.get('health_status', 'Unknown'),
|
| 220 |
+
hive.get('creation_date', datetime.now()).strftime('%Y-%m-%d')
|
| 221 |
+
])
|
| 222 |
+
hive_table = create_table(hive_data, col_widths=[1.2*inch, 1.8*inch, 1.2*inch, 1.2*inch, 1.6*inch])
|
| 223 |
+
elements.append(hive_table)
|
| 224 |
+
elements.append(Spacer(1, 12))
|
| 225 |
+
|
| 226 |
+
# Bee Type Distribution with Pie Chart
|
| 227 |
+
elements.append(Paragraph("Bee Type Distribution", styles['Heading2']))
|
| 228 |
+
bee_type_table = create_table([["Bee Type", "Count"]] + [[bt, count] for bt, count in bee_type_counts.items()], col_widths=[3*inch, 1*inch])
|
| 229 |
+
elements.append(bee_type_table)
|
| 230 |
+
bee_type_chart_path = os.path.join(temp_dir, f"bee_type_pie_{user_id}.png")
|
| 231 |
+
if bee_type_counts:
|
| 232 |
+
if create_pie_chart(bee_type_counts, bee_type_chart_path, "Bee Type Distribution"):
|
| 233 |
+
if os.path.exists(bee_type_chart_path):
|
| 234 |
+
elements.append(Spacer(1, 12))
|
| 235 |
+
elements.append(Image(bee_type_chart_path, width=3*inch, height=3*inch, kind='proportional'))
|
| 236 |
+
else:
|
| 237 |
+
elements.append(Paragraph("Error: Unable to display bee type pie chart.", styles['Normal']))
|
| 238 |
+
else:
|
| 239 |
+
elements.append(Paragraph("No bee type data available for pie chart.", styles['Normal']))
|
| 240 |
+
else:
|
| 241 |
+
elements.append(Paragraph("No bee type data available for pie chart.", styles['Normal']))
|
| 242 |
+
elements.append(Spacer(1, 12))
|
| 243 |
+
|
| 244 |
+
# Health Status Overview with Pie Chart
|
| 245 |
+
elements.append(Paragraph("Health Status Overview", styles['Heading2']))
|
| 246 |
+
health_data = [[status, count] for status, count in health_status_counts.items()]
|
| 247 |
+
health_table = create_table([["Status", "Count"]] + health_data, col_widths=[3*inch, 1*inch])
|
| 248 |
+
elements.append(health_table)
|
| 249 |
+
health_chart_path = os.path.join(temp_dir, f"health_pie_{user_id}.png")
|
| 250 |
+
if health_status_counts:
|
| 251 |
+
if create_pie_chart(health_status_counts, health_chart_path, "Health Status Distribution"):
|
| 252 |
+
if os.path.exists(health_chart_path):
|
| 253 |
+
elements.append(Spacer(1, 12))
|
| 254 |
+
elements.append(Image(health_chart_path, width=3*inch, height=3*inch, kind='proportional'))
|
| 255 |
+
else:
|
| 256 |
+
elements.append(Paragraph("Error: Unable to display health status pie chart.", styles['Normal']))
|
| 257 |
+
else:
|
| 258 |
+
elements.append(Paragraph("No health status data available for pie chart.", styles['Normal']))
|
| 259 |
+
else:
|
| 260 |
+
elements.append(Paragraph("No health status data available for pie chart.", styles['Normal']))
|
| 261 |
+
elements.append(Spacer(1, 12))
|
| 262 |
+
|
| 263 |
+
# Prediction History with Bar Chart
|
| 264 |
+
elements.append(Paragraph("Prediction History", styles['Heading2']))
|
| 265 |
+
history_data = [["Timestamp", "Audio", "Result", "Hive #"]]
|
| 266 |
+
for entry in history[:10]:
|
| 267 |
+
history_data.append([
|
| 268 |
+
entry.get('timestamp', 'N/A'),
|
| 269 |
+
entry.get('audio_name', 'N/A'),
|
| 270 |
+
entry.get('result', 'N/A'),
|
| 271 |
+
entry.get('hive_number', 'N/A') or "N/A"
|
| 272 |
+
])
|
| 273 |
+
history_table = create_table(history_data, col_widths=[1.5*inch, 2*inch, 1*inch, 1*inch])
|
| 274 |
+
elements.append(history_table)
|
| 275 |
+
bar_chart_path = os.path.join(temp_dir, f"prediction_bar_{user_id}.png")
|
| 276 |
+
if history:
|
| 277 |
+
if create_bar_chart(history, bar_chart_path):
|
| 278 |
+
if os.path.exists(bar_chart_path):
|
| 279 |
+
elements.append(Spacer(1, 12))
|
| 280 |
+
elements.append(Image(bar_chart_path, width=4*inch, height=2.5*inch))
|
| 281 |
+
else:
|
| 282 |
+
elements.append(Paragraph("Error: Unable to display prediction history bar chart.", styles['Normal']))
|
| 283 |
+
else:
|
| 284 |
+
elements.append(Paragraph("No prediction data available for bar chart.", styles['Normal']))
|
| 285 |
+
else:
|
| 286 |
+
elements.append(Paragraph("No prediction data available for bar chart.", styles['Normal']))
|
| 287 |
+
elements.append(Spacer(1, 12))
|
| 288 |
+
|
| 289 |
+
# Recommendations
|
| 290 |
+
elements.append(Paragraph("Recommendations", styles['Heading2']))
|
| 291 |
+
for rec in recommendations:
|
| 292 |
+
elements.append(Paragraph(f"• {rec}", styles['BoldNormal']))
|
| 293 |
+
elements.append(Spacer(1, 12))
|
| 294 |
+
|
| 295 |
+
# Build PDF with footer
|
| 296 |
+
try:
|
| 297 |
+
doc.build(elements, onFirstPage=add_footer, onLaterPages=add_footer)
|
| 298 |
+
except Exception as e:
|
| 299 |
+
logger.error(f"Error building PDF: {e}")
|
| 300 |
+
raise Exception(f"Failed to generate PDF: {e}")
|
| 301 |
+
|
| 302 |
+
finally:
|
| 303 |
+
# Clean up temporary directory
|
| 304 |
+
if os.path.exists(temp_dir):
|
| 305 |
+
for file in os.listdir(temp_dir):
|
| 306 |
+
try:
|
| 307 |
+
os.remove(os.path.join(temp_dir, file))
|
| 308 |
+
except Exception as e:
|
| 309 |
+
logger.error(f"Error cleaning up file {file}: {e}")
|
| 310 |
+
try:
|
| 311 |
+
os.rmdir(temp_dir)
|
| 312 |
+
except Exception as e:
|
| 313 |
+
logger.error(f"Error cleaning up directory {temp_dir}: {e}")
|
| 314 |
+
|
| 315 |
+
buffer.seek(0)
|
| 316 |
+
logger.info(f"Report generated successfully for user_id: {user_id}")
|
| 317 |
+
return buffer
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tensorflow
|
| 2 |
+
librosa==0.9.2
|
| 3 |
+
numba==0.56.4
|
| 4 |
+
llvmlite==0.39.1
|
| 5 |
+
reportlab
|
| 6 |
+
matplotlib
|
| 7 |
+
pillow
|
| 8 |
+
torch
|
| 9 |
+
soundfile
|
| 10 |
+
Flask==2.3.3
|
| 11 |
+
flask-cors==4.0.0
|
| 12 |
+
psycopg2-binary==2.9.9
|
| 13 |
+
google-auth==2.27.0
|
| 14 |
+
google-auth-oauthlib==1.1.0
|
| 15 |
+
google-auth-httplib2==0.2.0
|
| 16 |
+
google-api-python-client==2.123.0
|
| 17 |
+
flask-dance
|
| 18 |
+
Authlib
|
| 19 |
+
requests
|
| 20 |
+
sendgrid
|
| 21 |
+
apscheduler
|