Spaces:
Running
Running
Commit
·
09aaa98
1
Parent(s):
c474f12
added navigation control to map
Browse files- io-app-backend/app.py +50 -47
- io-app-backend/evaluate_system.py +46 -159
- io-app-backend/metrics.py +80 -79
- io-app-backend/plotting_utils.py +0 -429
- io-app-backend/terramindFunctions.py +258 -201
- io-app-front/README.md +0 -73
- io-app-front/index.html +4 -2
- io-app-front/public/vite.svg +0 -1
- io-app-front/src/App.jsx +15 -9
- io-app-front/src/assets/react.svg +0 -1
- io-app-front/src/components/AdvancedAnalysisModal/AdvancedAnalysisModal.jsx +2 -22
- io-app-front/src/components/AdvancedAnalysisModal/AdvancedAnalysisModal.module.css +8 -22
- io-app-front/src/components/AnalysisOptions/AnalysisOptions.jsx +4 -3
- io-app-front/src/components/AnalysisOptions/AnalysisOptions.module.css +1 -1
- io-app-front/src/components/AnalysisPanel/AnalysisPanel.jsx +1 -1
- io-app-front/src/components/LayerOpacitySlider/LayerOpacitySlider.module.css +2 -1
- io-app-front/src/components/SearchBar/SearchBar.module.css +1 -0
- io-app-front/src/hooks/useMap.js +8 -0
- io-app-front/src/utils/mapUtils.js +14 -1
io-app-backend/app.py
CHANGED
|
@@ -10,8 +10,9 @@ app = Flask(__name__, static_folder='static', static_url_path='')
|
|
| 10 |
|
| 11 |
CORS(app)
|
| 12 |
|
| 13 |
-
#
|
| 14 |
-
|
|
|
|
| 15 |
@app.route('/api/config', methods=['GET'])
|
| 16 |
def get_config():
|
| 17 |
return jsonify({
|
|
@@ -22,7 +23,7 @@ def get_config():
|
|
| 22 |
def analyze_endpoint():
|
| 23 |
try:
|
| 24 |
data = request.json
|
| 25 |
-
print(f"
|
| 26 |
|
| 27 |
location = data.get('location', {})
|
| 28 |
lat = location.get('lat')
|
|
@@ -40,8 +41,8 @@ def analyze_endpoint():
|
|
| 40 |
'error': 'Missing coordinates (lat/lng)'
|
| 41 |
}), 400
|
| 42 |
|
| 43 |
-
print(f"
|
| 44 |
-
print(f"
|
| 45 |
|
| 46 |
result = analyze(
|
| 47 |
location_data=[lat, lng],
|
|
@@ -77,26 +78,26 @@ def analyze_endpoint():
|
|
| 77 |
}
|
| 78 |
}
|
| 79 |
|
| 80 |
-
print(f"
|
| 81 |
return jsonify(response)
|
| 82 |
|
| 83 |
except Exception as e:
|
| 84 |
-
print(f"
|
| 85 |
traceback.print_exc()
|
| 86 |
return jsonify({'success': False, 'error': str(e)}), 500
|
| 87 |
|
| 88 |
@app.route('/api/advanced-analyze', methods=['POST'])
|
| 89 |
def advanced_analyze_endpoint():
|
| 90 |
"""
|
| 91 |
-
Endpoint
|
| 92 |
-
|
| 93 |
-
-
|
| 94 |
-
-
|
| 95 |
-
-
|
| 96 |
"""
|
| 97 |
try:
|
| 98 |
data = request.json
|
| 99 |
-
print(f"
|
| 100 |
|
| 101 |
location = data.get('location', {})
|
| 102 |
lat = location.get('lat')
|
|
@@ -113,11 +114,11 @@ def advanced_analyze_endpoint():
|
|
| 113 |
'error': 'Missing coordinates (lat/lng)'
|
| 114 |
}), 400
|
| 115 |
|
| 116 |
-
print(f"
|
| 117 |
-
print(f"
|
| 118 |
-
print(f"
|
| 119 |
|
| 120 |
-
#
|
| 121 |
eval_result = run_evaluation_with_models(lat, lng, buffer_km, model_a, model_b)
|
| 122 |
|
| 123 |
if 'error' in eval_result:
|
|
@@ -126,35 +127,35 @@ def advanced_analyze_endpoint():
|
|
| 126 |
'error': eval_result.get('error')
|
| 127 |
}), 404
|
| 128 |
|
| 129 |
-
#
|
| 130 |
-
print("
|
| 131 |
import base64
|
| 132 |
import io
|
| 133 |
from PIL import Image
|
| 134 |
import numpy as np
|
| 135 |
|
| 136 |
-
#
|
| 137 |
-
model_a_map = eval_result['maps']['modelA'] #
|
| 138 |
-
model_a_map_raw = eval_result['maps']['modelA_raw'] #
|
| 139 |
-
model_b_map = eval_result['maps']['modelB'] #
|
| 140 |
-
model_b_map_raw = eval_result['maps']['modelB_raw'] #
|
| 141 |
raw_data = eval_result['raw_data']
|
| 142 |
input_tensor = eval_result['input_tensor']
|
| 143 |
|
| 144 |
-
#
|
| 145 |
def rgb_to_base64(rgb_array):
|
| 146 |
-
"""
|
| 147 |
img = Image.fromarray(rgb_array.astype(np.uint8))
|
| 148 |
buf = io.BytesIO()
|
| 149 |
img.save(buf, format='PNG')
|
| 150 |
buf.seek(0)
|
| 151 |
return base64.b64encode(buf.read()).decode('utf-8')
|
| 152 |
|
| 153 |
-
# RGB
|
| 154 |
rgb_image = tm.create_rgb_image(input_tensor)
|
| 155 |
rgb_base64 = rgb_to_base64(rgb_image)
|
| 156 |
|
| 157 |
-
#
|
| 158 |
model_a_raw_segmentation = tm.create_segmentation_image(model_a_map_raw)
|
| 159 |
model_a_segmentation = tm.create_segmentation_image(model_a_map)
|
| 160 |
model_b_raw_segmentation = tm.create_segmentation_image(model_b_map_raw)
|
|
@@ -165,17 +166,17 @@ def advanced_analyze_endpoint():
|
|
| 165 |
model_b_raw_seg_base64 = rgb_to_base64(model_b_raw_segmentation)
|
| 166 |
model_b_seg_base64 = rgb_to_base64(model_b_segmentation)
|
| 167 |
|
| 168 |
-
#
|
| 169 |
indices = eval_result['indices']
|
| 170 |
masks_dict = {}
|
| 171 |
|
| 172 |
-
#
|
| 173 |
index_masks = tm.generate_index_masks(indices)
|
| 174 |
|
| 175 |
if isinstance(index_masks, dict):
|
| 176 |
for mask_name, mask_array in index_masks.items():
|
| 177 |
try:
|
| 178 |
-
#
|
| 179 |
mask_binary = mask_array.astype(np.uint8) * 255
|
| 180 |
img_mask = Image.fromarray(mask_binary, mode='L')
|
| 181 |
buf_mask = io.BytesIO()
|
|
@@ -183,9 +184,9 @@ def advanced_analyze_endpoint():
|
|
| 183 |
buf_mask.seek(0)
|
| 184 |
masks_dict[mask_name] = base64.b64encode(buf_mask.read()).decode('utf-8')
|
| 185 |
except Exception as e:
|
| 186 |
-
print(f"
|
| 187 |
|
| 188 |
-
#
|
| 189 |
response = {
|
| 190 |
'success': True,
|
| 191 |
'date': eval_result.get('date'),
|
|
@@ -195,7 +196,7 @@ def advanced_analyze_endpoint():
|
|
| 195 |
'image_width': eval_result.get('image_width', 512),
|
| 196 |
'image_height': eval_result.get('image_height', 512),
|
| 197 |
|
| 198 |
-
#
|
| 199 |
'modelA': {
|
| 200 |
'name': model_a.split('_')[2].upper(),
|
| 201 |
'rgb': f"data:image/png;base64,{rgb_base64}",
|
|
@@ -217,18 +218,18 @@ def advanced_analyze_endpoint():
|
|
| 217 |
}
|
| 218 |
},
|
| 219 |
|
| 220 |
-
#
|
| 221 |
'metrics': eval_result.get('metrics', {})
|
| 222 |
}
|
| 223 |
|
| 224 |
-
print(f"
|
| 225 |
-
print(f"
|
| 226 |
-
print(f"
|
| 227 |
|
| 228 |
return jsonify(response)
|
| 229 |
|
| 230 |
except Exception as e:
|
| 231 |
-
print(f"
|
| 232 |
traceback.print_exc()
|
| 233 |
return jsonify({
|
| 234 |
'success': False,
|
|
@@ -238,31 +239,33 @@ def advanced_analyze_endpoint():
|
|
| 238 |
|
| 239 |
@app.route('/api/health', methods=['GET'])
|
| 240 |
def health_check():
|
| 241 |
-
return jsonify({'status': 'ok', 'message': 'Backend
|
| 242 |
|
| 243 |
|
| 244 |
-
#
|
|
|
|
|
|
|
| 245 |
|
| 246 |
@app.route('/')
|
| 247 |
def serve_index():
|
| 248 |
-
"""
|
| 249 |
return send_from_directory(app.static_folder, 'index.html')
|
| 250 |
|
| 251 |
@app.route('/<path:path>')
|
| 252 |
def serve_static_files(path):
|
| 253 |
-
"""
|
| 254 |
return send_from_directory(app.static_folder, path)
|
| 255 |
|
| 256 |
@app.errorhandler(404)
|
| 257 |
def not_found(e):
|
| 258 |
-
"""
|
| 259 |
return send_from_directory(app.static_folder, 'index.html')
|
| 260 |
|
| 261 |
if __name__ == '__main__':
|
| 262 |
print("\n" + "="*60)
|
| 263 |
-
print("
|
| 264 |
print("="*60)
|
| 265 |
-
|
| 266 |
-
# Port 7860
|
| 267 |
port = int(os.environ.get("FLASK_RUN_PORT", 7860))
|
| 268 |
app.run(host='0.0.0.0', port=port, debug=False)
|
|
|
|
| 10 |
|
| 11 |
CORS(app)
|
| 12 |
|
| 13 |
+
# =========================================
|
| 14 |
+
# API ENDPOINTS
|
| 15 |
+
# =========================================
|
| 16 |
@app.route('/api/config', methods=['GET'])
|
| 17 |
def get_config():
|
| 18 |
return jsonify({
|
|
|
|
| 23 |
def analyze_endpoint():
|
| 24 |
try:
|
| 25 |
data = request.json
|
| 26 |
+
print(f"[REQUEST] Received request: {data}")
|
| 27 |
|
| 28 |
location = data.get('location', {})
|
| 29 |
lat = location.get('lat')
|
|
|
|
| 41 |
'error': 'Missing coordinates (lat/lng)'
|
| 42 |
}), 400
|
| 43 |
|
| 44 |
+
print(f"[RUNNING] Starting analysis for: [{lat}, {lng}]")
|
| 45 |
+
print(f"[PARAMETERS] Buffer={buffer_km}km, MaxCloudCover<{max_cloud_cover}%, HistoryDays={days_back}")
|
| 46 |
|
| 47 |
result = analyze(
|
| 48 |
location_data=[lat, lng],
|
|
|
|
| 78 |
}
|
| 79 |
}
|
| 80 |
|
| 81 |
+
print(f"[SUCCESS] Sending result to frontend")
|
| 82 |
return jsonify(response)
|
| 83 |
|
| 84 |
except Exception as e:
|
| 85 |
+
print(f"[ERROR] Error during analysis: {str(e)}")
|
| 86 |
traceback.print_exc()
|
| 87 |
return jsonify({'success': False, 'error': str(e)}), 500
|
| 88 |
|
| 89 |
@app.route('/api/advanced-analyze', methods=['POST'])
|
| 90 |
def advanced_analyze_endpoint():
|
| 91 |
"""
|
| 92 |
+
Endpoint for advanced comparative analysis with metrics.
|
| 93 |
+
Compares two selected models and returns:
|
| 94 |
+
- Images from both models (RGB, raw, final)
|
| 95 |
+
- Spectral masks
|
| 96 |
+
- Comparative metrics
|
| 97 |
"""
|
| 98 |
try:
|
| 99 |
data = request.json
|
| 100 |
+
print(f"[REQUEST] Received advanced analysis request: {data}")
|
| 101 |
|
| 102 |
location = data.get('location', {})
|
| 103 |
lat = location.get('lat')
|
|
|
|
| 114 |
'error': 'Missing coordinates (lat/lng)'
|
| 115 |
}), 400
|
| 116 |
|
| 117 |
+
print(f"[RUNNING] Starting advanced analysis for: [{lat}, {lng}]")
|
| 118 |
+
print(f"[COMPARISON] {model_a} vs {model_b}")
|
| 119 |
+
print(f"[BUFFER] {buffer_km} km")
|
| 120 |
|
| 121 |
+
# Run evaluate_system.run_evaluation_with_models()
|
| 122 |
eval_result = run_evaluation_with_models(lat, lng, buffer_km, model_a, model_b)
|
| 123 |
|
| 124 |
if 'error' in eval_result:
|
|
|
|
| 127 |
'error': eval_result.get('error')
|
| 128 |
}), 404
|
| 129 |
|
| 130 |
+
# Convert maps to images (using terramindFunctions)
|
| 131 |
+
print("[CONVERTING] Converting maps to images...")
|
| 132 |
import base64
|
| 133 |
import io
|
| 134 |
from PIL import Image
|
| 135 |
import numpy as np
|
| 136 |
|
| 137 |
+
# Maps with corrections and without corrections
|
| 138 |
+
model_a_map = eval_result['maps']['modelA'] # With corrections
|
| 139 |
+
model_a_map_raw = eval_result['maps']['modelA_raw'] # Without corrections
|
| 140 |
+
model_b_map = eval_result['maps']['modelB'] # With corrections
|
| 141 |
+
model_b_map_raw = eval_result['maps']['modelB_raw'] # Without corrections
|
| 142 |
raw_data = eval_result['raw_data']
|
| 143 |
input_tensor = eval_result['input_tensor']
|
| 144 |
|
| 145 |
+
# Function to convert numpy RGB array to base64
|
| 146 |
def rgb_to_base64(rgb_array):
|
| 147 |
+
"""Converts RGB array to PNG base64"""
|
| 148 |
img = Image.fromarray(rgb_array.astype(np.uint8))
|
| 149 |
buf = io.BytesIO()
|
| 150 |
img.save(buf, format='PNG')
|
| 151 |
buf.seek(0)
|
| 152 |
return base64.b64encode(buf.read()).decode('utf-8')
|
| 153 |
|
| 154 |
+
# RGB from raw_data (using create_rgb_image from terramindFunctions)
|
| 155 |
rgb_image = tm.create_rgb_image(input_tensor)
|
| 156 |
rgb_base64 = rgb_to_base64(rgb_image)
|
| 157 |
|
| 158 |
+
# Segmentation maps - raw (without corrections) and final (with corrections)
|
| 159 |
model_a_raw_segmentation = tm.create_segmentation_image(model_a_map_raw)
|
| 160 |
model_a_segmentation = tm.create_segmentation_image(model_a_map)
|
| 161 |
model_b_raw_segmentation = tm.create_segmentation_image(model_b_map_raw)
|
|
|
|
| 166 |
model_b_raw_seg_base64 = rgb_to_base64(model_b_raw_segmentation)
|
| 167 |
model_b_seg_base64 = rgb_to_base64(model_b_segmentation)
|
| 168 |
|
| 169 |
+
# Spectral masks (from indices)
|
| 170 |
indices = eval_result['indices']
|
| 171 |
masks_dict = {}
|
| 172 |
|
| 173 |
+
# Generate masks like in analyze()
|
| 174 |
index_masks = tm.generate_index_masks(indices)
|
| 175 |
|
| 176 |
if isinstance(index_masks, dict):
|
| 177 |
for mask_name, mask_array in index_masks.items():
|
| 178 |
try:
|
| 179 |
+
# Convert binary masks to 0-255
|
| 180 |
mask_binary = mask_array.astype(np.uint8) * 255
|
| 181 |
img_mask = Image.fromarray(mask_binary, mode='L')
|
| 182 |
buf_mask = io.BytesIO()
|
|
|
|
| 184 |
buf_mask.seek(0)
|
| 185 |
masks_dict[mask_name] = base64.b64encode(buf_mask.read()).decode('utf-8')
|
| 186 |
except Exception as e:
|
| 187 |
+
print(f"[WARNING] Error converting mask {mask_name}: {e}")
|
| 188 |
|
| 189 |
+
# Format the response
|
| 190 |
response = {
|
| 191 |
'success': True,
|
| 192 |
'date': eval_result.get('date'),
|
|
|
|
| 196 |
'image_width': eval_result.get('image_width', 512),
|
| 197 |
'image_height': eval_result.get('image_height', 512),
|
| 198 |
|
| 199 |
+
# IMAGES
|
| 200 |
'modelA': {
|
| 201 |
'name': model_a.split('_')[2].upper(),
|
| 202 |
'rgb': f"data:image/png;base64,{rgb_base64}",
|
|
|
|
| 218 |
}
|
| 219 |
},
|
| 220 |
|
| 221 |
+
# METRICS
|
| 222 |
'metrics': eval_result.get('metrics', {})
|
| 223 |
}
|
| 224 |
|
| 225 |
+
print(f"[SUCCESS] Sending advanced analysis to frontend")
|
| 226 |
+
print(f" Image date: {eval_result.get('date')}")
|
| 227 |
+
print(f" Metrics: {list(eval_result.get('metrics', {}).keys())}")
|
| 228 |
|
| 229 |
return jsonify(response)
|
| 230 |
|
| 231 |
except Exception as e:
|
| 232 |
+
print(f"[ERROR] Error during advanced analysis: {str(e)}")
|
| 233 |
traceback.print_exc()
|
| 234 |
return jsonify({
|
| 235 |
'success': False,
|
|
|
|
| 239 |
|
| 240 |
@app.route('/api/health', methods=['GET'])
|
| 241 |
def health_check():
|
| 242 |
+
return jsonify({'status': 'ok', 'message': 'Backend is working correctly'})
|
| 243 |
|
| 244 |
|
| 245 |
+
# =========================================
|
| 246 |
+
# FRONTEND SERVING
|
| 247 |
+
# =========================================
|
| 248 |
|
| 249 |
@app.route('/')
|
| 250 |
def serve_index():
|
| 251 |
+
"""Serves the main index.html file of the frontend."""
|
| 252 |
return send_from_directory(app.static_folder, 'index.html')
|
| 253 |
|
| 254 |
@app.route('/<path:path>')
|
| 255 |
def serve_static_files(path):
|
| 256 |
+
"""Serves remaining static files (js, css, images)."""
|
| 257 |
return send_from_directory(app.static_folder, path)
|
| 258 |
|
| 259 |
@app.errorhandler(404)
|
| 260 |
def not_found(e):
|
| 261 |
+
"""Handles page refresh (React Router) - redirects back to index.html."""
|
| 262 |
return send_from_directory(app.static_folder, 'index.html')
|
| 263 |
|
| 264 |
if __name__ == '__main__':
|
| 265 |
print("\n" + "="*60)
|
| 266 |
+
print("[START] TerraMind server ready on Hugging Face!")
|
| 267 |
print("="*60)
|
| 268 |
+
|
| 269 |
+
# Port 7860 is required by Hugging Face
|
| 270 |
port = int(os.environ.get("FLASK_RUN_PORT", 7860))
|
| 271 |
app.run(host='0.0.0.0', port=port, debug=False)
|
io-app-backend/evaluate_system.py
CHANGED
|
@@ -1,14 +1,14 @@
|
|
| 1 |
import torch
|
| 2 |
import terramindFunctions as tm
|
| 3 |
from terratorch import FULL_MODEL_REGISTRY
|
| 4 |
-
from metrics import calculate_precision,calculate_recall,calculate_accuracy, calculate_miou,
|
| 5 |
|
| 6 |
-
#
|
| 7 |
DEVICE = tm.device
|
| 8 |
|
| 9 |
def load_model(model_name):
|
| 10 |
-
"""
|
| 11 |
-
print(f"
|
| 12 |
try:
|
| 13 |
model = FULL_MODEL_REGISTRY.build(
|
| 14 |
model_name,
|
|
@@ -20,8 +20,8 @@ def load_model(model_name):
|
|
| 20 |
model.eval()
|
| 21 |
return model
|
| 22 |
except Exception as e:
|
| 23 |
-
print(f"
|
| 24 |
-
print(f"
|
| 25 |
try:
|
| 26 |
model = FULL_MODEL_REGISTRY.build(
|
| 27 |
"terramind_v1_large_generate",
|
|
@@ -33,199 +33,90 @@ def load_model(model_name):
|
|
| 33 |
model.eval()
|
| 34 |
return model
|
| 35 |
except Exception as e2:
|
| 36 |
-
print(f"
|
| 37 |
return None
|
| 38 |
|
| 39 |
-
def load_teacher_model():
|
| 40 |
-
"""Ładuje model Large (Nauczyciela)."""
|
| 41 |
-
print(f"⏳ Ładowanie Nauczyciela...")
|
| 42 |
-
return load_model("terramind_v1_large_generate")
|
| 43 |
-
|
| 44 |
-
def process_with_model(model, input_tensor, indices):
|
| 45 |
-
"""
|
| 46 |
-
To jest ta "wspólna logika", o którą pytałeś.
|
| 47 |
-
Bierze DOWOLNY model (Student lub Teacher) i zwraca gotową mapę po korektach.
|
| 48 |
-
Używa funkcji z terramindFunctions.py.
|
| 49 |
-
"""
|
| 50 |
-
# 1. Inferencja (To samo co w analyze)
|
| 51 |
-
raw_output = tm.run_inference(model, input_tensor)
|
| 52 |
-
|
| 53 |
-
# 2. Dekodowanie (To samo co w analyze)
|
| 54 |
-
class_map = tm.decode_output(raw_output)
|
| 55 |
-
|
| 56 |
-
# # 3. Korekty (To samo co w analyze)
|
| 57 |
-
# # Dzięki temu Teacher też dostanie poprawki wody/roślinności!
|
| 58 |
-
# final_map = tm.apply_hybrid_corrections(class_map, indices)
|
| 59 |
-
|
| 60 |
-
# return final_map
|
| 61 |
-
return class_map
|
| 62 |
-
|
| 63 |
-
def run_evaluation(lat, lon, buffer_km=5):
|
| 64 |
-
print(f"🔍 ROZPOCZYNAM EWALUACJĘ DLA: {lat}, {lon}")
|
| 65 |
-
|
| 66 |
-
# 1. POBRANIE DANYCH (Raz dla obu modeli - oszczędność czasu!)
|
| 67 |
-
dl_result = tm.download_sentinel2(lat, lon, buffer_km, max_cloud_cover=10, days_back=180)
|
| 68 |
-
|
| 69 |
-
if dl_result is None:
|
| 70 |
-
return {"error": "Nie udało się pobrać zdjęć satelitarnych"}
|
| 71 |
-
|
| 72 |
-
raw_data, date, scene_id = dl_result
|
| 73 |
-
|
| 74 |
-
# 2. PRZYGOTOWANIE WSPÓLNYCH DANYCH
|
| 75 |
-
input_tensor = tm.prepare_input(raw_data)
|
| 76 |
-
# Obliczamy indeksy raz i używamy dla obu modeli (spójność)
|
| 77 |
-
indices = tm.calculate_spectral_indices(input_tensor)
|
| 78 |
-
|
| 79 |
-
# ==========================================
|
| 80 |
-
# STUDENT
|
| 81 |
-
# ==========================================
|
| 82 |
-
print("🤖 Przetwarzanie: Student...")
|
| 83 |
-
student_model = tm.get_model()
|
| 84 |
-
# 👇 Używamy naszej funkcji pomocniczej
|
| 85 |
-
student_map = process_with_model(student_model, input_tensor, indices)
|
| 86 |
-
|
| 87 |
-
# ==========================================
|
| 88 |
-
# NAUCZYCIEL
|
| 89 |
-
# ==========================================
|
| 90 |
-
print("👨🏫 Przetwarzanie: Nauczyciel...")
|
| 91 |
-
teacher_model = load_teacher_model()
|
| 92 |
-
|
| 93 |
-
if teacher_model is None:
|
| 94 |
-
return {"error": "Błąd modelu Nauczyciela"}
|
| 95 |
-
|
| 96 |
-
# 👇 Używamy tej samej funkcji pomocniczej (gwarancja identycznego procesu)
|
| 97 |
-
teacher_map = process_with_model(teacher_model, input_tensor, indices)
|
| 98 |
-
|
| 99 |
-
# Czyszczenie pamięci
|
| 100 |
-
del teacher_model
|
| 101 |
-
if torch.cuda.is_available():
|
| 102 |
-
torch.cuda.empty_cache()
|
| 103 |
-
|
| 104 |
-
# ==========================================
|
| 105 |
-
# OBLICZANIE METRYK (Wewnątrz run_evaluation)
|
| 106 |
-
# ==========================================
|
| 107 |
-
print("📊 Liczenie metryk...")
|
| 108 |
-
|
| 109 |
-
# 1. Liczymy wszystko osobno
|
| 110 |
-
acc = calculate_accuracy(student_map, teacher_map)
|
| 111 |
-
miou, iou_details = calculate_miou(student_map, teacher_map)
|
| 112 |
-
fw_iou = calculate_fw_iou(student_map, teacher_map)
|
| 113 |
-
dice = calculate_dice_score(student_map, teacher_map)
|
| 114 |
-
|
| 115 |
-
# Wywołujemy nowe funkcje
|
| 116 |
-
mean_precision, precision_details = calculate_precision(student_map, teacher_map)
|
| 117 |
-
mean_recall, recall_details = calculate_recall(student_map, teacher_map)
|
| 118 |
-
|
| 119 |
-
# 2. Łączymy szczegóły w jeden słownik dla Frontendu
|
| 120 |
-
# Frontend oczekuje struktury: { "Las": { iou: 90, precision: 85, recall: 95 }, ... }
|
| 121 |
-
combined_details = {}
|
| 122 |
-
|
| 123 |
-
# Bierzemy klucze (nazwy klas) z IoU (bo ono jest zawsze liczone)
|
| 124 |
-
for class_name in iou_details.keys():
|
| 125 |
-
combined_details[class_name] = {
|
| 126 |
-
"iou": iou_details.get(class_name, 0.0),
|
| 127 |
-
"precision": precision_details.get(class_name, 0.0),
|
| 128 |
-
"recall": recall_details.get(class_name, 0.0)
|
| 129 |
-
}
|
| 130 |
-
|
| 131 |
-
# 3. Final Score (np. średnia z 4 głównych)
|
| 132 |
-
final_score = (acc + miou + fw_iou + dice) / 4.0
|
| 133 |
-
|
| 134 |
-
return {
|
| 135 |
-
"status": "success",
|
| 136 |
-
"metrics": {
|
| 137 |
-
"accuracy": acc,
|
| 138 |
-
"miou": miou,
|
| 139 |
-
"fw_iou": fw_iou,
|
| 140 |
-
"dice": dice,
|
| 141 |
-
"mean_precision": mean_precision,
|
| 142 |
-
"mean_recall": mean_recall,
|
| 143 |
-
"final_score": final_score,
|
| 144 |
-
"class_details": combined_details
|
| 145 |
-
},
|
| 146 |
-
"maps": {
|
| 147 |
-
"student": student_map,
|
| 148 |
-
"teacher": teacher_map
|
| 149 |
-
},
|
| 150 |
-
"raw_data": raw_data,
|
| 151 |
-
"indices": indices,
|
| 152 |
-
"date": date
|
| 153 |
-
}
|
| 154 |
-
|
| 155 |
def run_evaluation_with_models(lat, lon, buffer_km=5, model_a_name=None, model_b_name=None):
|
| 156 |
"""
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
"""
|
| 164 |
if model_a_name is None:
|
| 165 |
model_a_name = 'terramind_v1_small_generate'
|
| 166 |
if model_b_name is None:
|
| 167 |
model_b_name = 'terramind_v1_large_generate'
|
| 168 |
|
| 169 |
-
print(f"
|
| 170 |
print(f" Model A: {model_a_name}")
|
| 171 |
print(f" Model B: {model_b_name}")
|
| 172 |
|
| 173 |
-
#
|
| 174 |
dl_result = tm.download_sentinel2(lat, lon, buffer_km, max_cloud_cover=10, days_back=180)
|
| 175 |
|
| 176 |
if dl_result is None:
|
| 177 |
-
return {"error": "
|
| 178 |
|
| 179 |
raw_data, date, scene_id = dl_result
|
| 180 |
|
| 181 |
-
#
|
| 182 |
original_height, original_width = raw_data.shape[1], raw_data.shape[2]
|
| 183 |
-
print(f"
|
| 184 |
|
| 185 |
-
#
|
| 186 |
input_tensor = tm.prepare_input(raw_data)
|
| 187 |
-
#
|
| 188 |
indices = tm.calculate_spectral_indices(input_tensor)
|
| 189 |
|
| 190 |
# ==========================================
|
| 191 |
-
#
|
| 192 |
# ==========================================
|
| 193 |
-
print(f"
|
| 194 |
model_a = load_model(model_a_name)
|
| 195 |
if model_a is None:
|
| 196 |
-
return {"error": f"
|
| 197 |
|
| 198 |
raw_output_a = tm.run_inference(model_a, input_tensor)
|
| 199 |
map_a_raw = tm.decode_output(raw_output_a)
|
| 200 |
-
#
|
| 201 |
map_a, _ = tm.apply_hybrid_corrections(map_a_raw, indices)
|
| 202 |
del model_a
|
| 203 |
|
| 204 |
# ==========================================
|
| 205 |
-
#
|
| 206 |
# ==========================================
|
| 207 |
-
print(f"
|
| 208 |
model_b = load_model(model_b_name)
|
| 209 |
if model_b is None:
|
| 210 |
-
return {"error": f"
|
| 211 |
|
| 212 |
raw_output_b = tm.run_inference(model_b, input_tensor)
|
| 213 |
map_b_raw = tm.decode_output(raw_output_b)
|
| 214 |
-
#
|
| 215 |
map_b, _ = tm.apply_hybrid_corrections(map_b_raw, indices)
|
| 216 |
del model_b
|
| 217 |
|
| 218 |
-
#
|
| 219 |
if torch.cuda.is_available():
|
| 220 |
torch.cuda.empty_cache()
|
| 221 |
|
| 222 |
# ==========================================
|
| 223 |
-
#
|
| 224 |
# ==========================================
|
| 225 |
-
print("
|
| 226 |
|
| 227 |
-
#
|
| 228 |
-
print("
|
| 229 |
acc_raw = calculate_accuracy(map_a_raw, map_b_raw)
|
| 230 |
miou_raw, iou_details_raw = calculate_miou(map_a_raw, map_b_raw)
|
| 231 |
fw_iou_raw = calculate_fw_iou(map_a_raw, map_b_raw)
|
|
@@ -233,7 +124,7 @@ def run_evaluation_with_models(lat, lon, buffer_km=5, model_a_name=None, model_b
|
|
| 233 |
mean_precision_raw, precision_details_raw = calculate_precision(map_a_raw, map_b_raw)
|
| 234 |
mean_recall_raw, recall_details_raw = calculate_recall(map_a_raw, map_b_raw)
|
| 235 |
|
| 236 |
-
#
|
| 237 |
combined_details_raw = {}
|
| 238 |
for class_name in iou_details_raw.keys():
|
| 239 |
combined_details_raw[class_name] = {
|
|
@@ -242,21 +133,21 @@ def run_evaluation_with_models(lat, lon, buffer_km=5, model_a_name=None, model_b
|
|
| 242 |
"recall": recall_details_raw.get(class_name, 0.0)
|
| 243 |
}
|
| 244 |
|
| 245 |
-
#
|
| 246 |
-
print("
|
| 247 |
acc = calculate_accuracy(map_a, map_b)
|
| 248 |
miou, iou_details = calculate_miou(map_a, map_b)
|
| 249 |
fw_iou = calculate_fw_iou(map_a, map_b)
|
| 250 |
dice = calculate_dice_score(map_a, map_b)
|
| 251 |
|
| 252 |
-
#
|
| 253 |
mean_precision, precision_details = calculate_precision(map_a, map_b)
|
| 254 |
mean_recall, recall_details = calculate_recall(map_a, map_b)
|
| 255 |
|
| 256 |
-
#
|
| 257 |
combined_details = {}
|
| 258 |
|
| 259 |
-
#
|
| 260 |
for class_name in iou_details.keys():
|
| 261 |
combined_details[class_name] = {
|
| 262 |
"iou": iou_details.get(class_name, 0.0),
|
|
@@ -264,10 +155,6 @@ def run_evaluation_with_models(lat, lon, buffer_km=5, model_a_name=None, model_b
|
|
| 264 |
"recall": recall_details.get(class_name, 0.0)
|
| 265 |
}
|
| 266 |
|
| 267 |
-
# 3. Final Score (np. średnia z 4 głównych)
|
| 268 |
-
# 3. Final Score (np. średnia z 4 głównych)
|
| 269 |
-
final_score = (acc + miou + fw_iou + dice) / 4.0
|
| 270 |
-
|
| 271 |
return {
|
| 272 |
"status": "success",
|
| 273 |
"metrics": {
|
|
|
|
| 1 |
import torch
|
| 2 |
import terramindFunctions as tm
|
| 3 |
from terratorch import FULL_MODEL_REGISTRY
|
| 4 |
+
from metrics import calculate_precision,calculate_recall,calculate_accuracy, calculate_miou, calculate_fw_iou, calculate_dice_score
|
| 5 |
|
| 6 |
+
# [CONFIGURATION] Device setup
|
| 7 |
DEVICE = tm.device
|
| 8 |
|
| 9 |
def load_model(model_name):
|
| 10 |
+
"""Loads selected model from TerraTorch registry. Falls back to Large version if loading fails."""
|
| 11 |
+
print(f"[LOADING] Loading model: {model_name}...")
|
| 12 |
try:
|
| 13 |
model = FULL_MODEL_REGISTRY.build(
|
| 14 |
model_name,
|
|
|
|
| 20 |
model.eval()
|
| 21 |
return model
|
| 22 |
except Exception as e:
|
| 23 |
+
print(f"[WARNING] Model loading error {model_name}: {e}")
|
| 24 |
+
print(f"[FALLBACK] Attempting terramind_v1_large_generate...")
|
| 25 |
try:
|
| 26 |
model = FULL_MODEL_REGISTRY.build(
|
| 27 |
"terramind_v1_large_generate",
|
|
|
|
| 33 |
model.eval()
|
| 34 |
return model
|
| 35 |
except Exception as e2:
|
| 36 |
+
print(f"[ERROR] Fallback model loading error: {e2}")
|
| 37 |
return None
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
def run_evaluation_with_models(lat, lon, buffer_km=5, model_a_name=None, model_b_name=None):
|
| 40 |
"""
|
| 41 |
+
Runs comparison of two selected models on satellite imagery.
|
| 42 |
+
Downloads data once and processes both models with spectral corrections.
|
| 43 |
+
Computes metrics for both raw and corrected outputs.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
lat: latitude coordinate
|
| 47 |
+
lon: longitude coordinate
|
| 48 |
+
buffer_km: search radius in kilometers
|
| 49 |
+
model_a_name: name of first model (default: terramind_v1_small_generate)
|
| 50 |
+
model_b_name: name of second model (default: terramind_v1_large_generate)
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
dict with comparison metrics, class maps, and imagery data
|
| 54 |
"""
|
| 55 |
if model_a_name is None:
|
| 56 |
model_a_name = 'terramind_v1_small_generate'
|
| 57 |
if model_b_name is None:
|
| 58 |
model_b_name = 'terramind_v1_large_generate'
|
| 59 |
|
| 60 |
+
print(f"[COMPARE] Model comparison for: {lat}, {lon}")
|
| 61 |
print(f" Model A: {model_a_name}")
|
| 62 |
print(f" Model B: {model_b_name}")
|
| 63 |
|
| 64 |
+
# [DOWNLOAD] Download data once for both models (time efficient)
|
| 65 |
dl_result = tm.download_sentinel2(lat, lon, buffer_km, max_cloud_cover=10, days_back=180)
|
| 66 |
|
| 67 |
if dl_result is None:
|
| 68 |
+
return {"error": "Failed to download satellite imagery"}
|
| 69 |
|
| 70 |
raw_data, date, scene_id = dl_result
|
| 71 |
|
| 72 |
+
# [DIMENSIONS] Save original image dimensions before scaling to 224x224
|
| 73 |
original_height, original_width = raw_data.shape[1], raw_data.shape[2]
|
| 74 |
+
print(f"[DIMENSIONS] Original image size: {original_width}x{original_height}")
|
| 75 |
|
| 76 |
+
# [PREPARE] Prepare common data for both models
|
| 77 |
input_tensor = tm.prepare_input(raw_data)
|
| 78 |
+
# [INDICES] Calculate spectral indices once and use for both models (consistency)
|
| 79 |
indices = tm.calculate_spectral_indices(input_tensor)
|
| 80 |
|
| 81 |
# ==========================================
|
| 82 |
+
# [MODEL_A] Model A processing
|
| 83 |
# ==========================================
|
| 84 |
+
print(f"[PROCESSING] Processing: {model_a_name}...")
|
| 85 |
model_a = load_model(model_a_name)
|
| 86 |
if model_a is None:
|
| 87 |
+
return {"error": f"Error loading model {model_a_name}"}
|
| 88 |
|
| 89 |
raw_output_a = tm.run_inference(model_a, input_tensor)
|
| 90 |
map_a_raw = tm.decode_output(raw_output_a)
|
| 91 |
+
# [CORRECTIONS] Apply spectral corrections
|
| 92 |
map_a, _ = tm.apply_hybrid_corrections(map_a_raw, indices)
|
| 93 |
del model_a
|
| 94 |
|
| 95 |
# ==========================================
|
| 96 |
+
# [MODEL_B] Model B processing
|
| 97 |
# ==========================================
|
| 98 |
+
print(f"[PROCESSING] Processing: {model_b_name}...")
|
| 99 |
model_b = load_model(model_b_name)
|
| 100 |
if model_b is None:
|
| 101 |
+
return {"error": f"Error loading model {model_b_name}"}
|
| 102 |
|
| 103 |
raw_output_b = tm.run_inference(model_b, input_tensor)
|
| 104 |
map_b_raw = tm.decode_output(raw_output_b)
|
| 105 |
+
# [CORRECTIONS] Apply spectral corrections
|
| 106 |
map_b, _ = tm.apply_hybrid_corrections(map_b_raw, indices)
|
| 107 |
del model_b
|
| 108 |
|
| 109 |
+
# [CLEANUP] Memory cleanup
|
| 110 |
if torch.cuda.is_available():
|
| 111 |
torch.cuda.empty_cache()
|
| 112 |
|
| 113 |
# ==========================================
|
| 114 |
+
# [METRICS] Calculate evaluation metrics
|
| 115 |
# ==========================================
|
| 116 |
+
print("[METRICS] Computing metrics...")
|
| 117 |
|
| 118 |
+
# [RAW_METRICS] Metrics for RAW segmentation (without spectral indices)
|
| 119 |
+
print(" [RAW] Computing metrics for raw segmentation (without spectral indices)...")
|
| 120 |
acc_raw = calculate_accuracy(map_a_raw, map_b_raw)
|
| 121 |
miou_raw, iou_details_raw = calculate_miou(map_a_raw, map_b_raw)
|
| 122 |
fw_iou_raw = calculate_fw_iou(map_a_raw, map_b_raw)
|
|
|
|
| 124 |
mean_precision_raw, precision_details_raw = calculate_precision(map_a_raw, map_b_raw)
|
| 125 |
mean_recall_raw, recall_details_raw = calculate_recall(map_a_raw, map_b_raw)
|
| 126 |
|
| 127 |
+
# [COMBINE_RAW] Combine details for RAW
|
| 128 |
combined_details_raw = {}
|
| 129 |
for class_name in iou_details_raw.keys():
|
| 130 |
combined_details_raw[class_name] = {
|
|
|
|
| 133 |
"recall": recall_details_raw.get(class_name, 0.0)
|
| 134 |
}
|
| 135 |
|
| 136 |
+
# [CORRECTED_METRICS] Metrics for CORRECTED segmentation (with spectral corrections)
|
| 137 |
+
print(" [CORRECTED] Computing metrics for corrected segmentation (with spectral indices)...")
|
| 138 |
acc = calculate_accuracy(map_a, map_b)
|
| 139 |
miou, iou_details = calculate_miou(map_a, map_b)
|
| 140 |
fw_iou = calculate_fw_iou(map_a, map_b)
|
| 141 |
dice = calculate_dice_score(map_a, map_b)
|
| 142 |
|
| 143 |
+
# [NEW_METRICS] Invoke precision and recall functions
|
| 144 |
mean_precision, precision_details = calculate_precision(map_a, map_b)
|
| 145 |
mean_recall, recall_details = calculate_recall(map_a, map_b)
|
| 146 |
|
| 147 |
+
# [COMBINE_CORRECTED] Combine details for CORRECTED
|
| 148 |
combined_details = {}
|
| 149 |
|
| 150 |
+
# [KEYS] Extract class names from IoU details (always computed)
|
| 151 |
for class_name in iou_details.keys():
|
| 152 |
combined_details[class_name] = {
|
| 153 |
"iou": iou_details.get(class_name, 0.0),
|
|
|
|
| 155 |
"recall": recall_details.get(class_name, 0.0)
|
| 156 |
}
|
| 157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
return {
|
| 159 |
"status": "success",
|
| 160 |
"metrics": {
|
io-app-backend/metrics.py
CHANGED
|
@@ -1,16 +1,18 @@
|
|
| 1 |
import numpy as np
|
| 2 |
-
#
|
| 3 |
-
from terramindFunctions import ESA_CLASSES
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
import numpy as np
|
| 7 |
from terramindFunctions import ESA_CLASSES
|
| 8 |
|
| 9 |
def calculate_precision(pred_map, target_map):
|
| 10 |
"""
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
"""
|
| 15 |
classes_in_target = np.unique(target_map)
|
| 16 |
precision_list = []
|
|
@@ -23,16 +25,16 @@ def calculate_precision(pred_map, target_map):
|
|
| 23 |
t_mask = (target_map == cls)
|
| 24 |
|
| 25 |
true_positives = np.logical_and(p_mask, t_mask).sum()
|
| 26 |
-
false_positives = np.logical_and(p_mask, ~t_mask).sum()
|
| 27 |
|
| 28 |
if (true_positives + false_positives) > 0:
|
| 29 |
precision = true_positives / (true_positives + false_positives)
|
| 30 |
else:
|
| 31 |
-
precision = 0.0
|
| 32 |
|
| 33 |
precision_list.append(precision)
|
| 34 |
|
| 35 |
-
class_name = ESA_CLASSES.get(cls, f"
|
| 36 |
details[class_name] = precision * 100.0
|
| 37 |
|
| 38 |
mean_precision = np.mean(precision_list) * 100.0 if precision_list else 0.0
|
|
@@ -41,9 +43,15 @@ def calculate_precision(pred_map, target_map):
|
|
| 41 |
|
| 42 |
def calculate_recall(pred_map, target_map):
|
| 43 |
"""
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
"""
|
| 48 |
classes_in_target = np.unique(target_map)
|
| 49 |
recall_list = []
|
|
@@ -56,16 +64,16 @@ def calculate_recall(pred_map, target_map):
|
|
| 56 |
t_mask = (target_map == cls)
|
| 57 |
|
| 58 |
true_positives = np.logical_and(p_mask, t_mask).sum()
|
| 59 |
-
false_negatives = np.logical_and(~p_mask, t_mask).sum()
|
| 60 |
|
| 61 |
if (true_positives + false_negatives) > 0:
|
| 62 |
recall = true_positives / (true_positives + false_negatives)
|
| 63 |
else:
|
| 64 |
-
recall = 0.0
|
| 65 |
|
| 66 |
recall_list.append(recall)
|
| 67 |
|
| 68 |
-
class_name = ESA_CLASSES.get(cls, f"
|
| 69 |
details[class_name] = recall * 100.0
|
| 70 |
|
| 71 |
mean_recall = np.mean(recall_list) * 100.0 if recall_list else 0.0
|
|
@@ -73,31 +81,38 @@ def calculate_recall(pred_map, target_map):
|
|
| 73 |
|
| 74 |
def calculate_dice_score(pred_map, target_map):
|
| 75 |
"""
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
"""
|
| 80 |
classes_in_target = np.unique(target_map)
|
| 81 |
dice_list = []
|
| 82 |
|
| 83 |
for cls in classes_in_target:
|
| 84 |
-
#
|
| 85 |
if cls == 0:
|
| 86 |
continue
|
| 87 |
|
| 88 |
-
#
|
| 89 |
p_mask = (pred_map == cls)
|
| 90 |
t_mask = (target_map == cls)
|
| 91 |
|
| 92 |
intersection = np.logical_and(p_mask, t_mask).sum()
|
| 93 |
|
| 94 |
-
#
|
| 95 |
area_pred = p_mask.sum()
|
| 96 |
area_target = t_mask.sum()
|
| 97 |
|
| 98 |
-
#
|
| 99 |
if area_pred + area_target == 0:
|
| 100 |
-
dice = 1.0
|
| 101 |
else:
|
| 102 |
dice = (2.0 * intersection) / (area_pred + area_target)
|
| 103 |
|
|
@@ -111,12 +126,19 @@ def calculate_dice_score(pred_map, target_map):
|
|
| 111 |
|
| 112 |
def calculate_accuracy(pred_map, target_map):
|
| 113 |
"""
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
"""
|
| 116 |
p = pred_map.flatten()
|
| 117 |
t = target_map.flatten()
|
| 118 |
|
| 119 |
-
#
|
| 120 |
valid_mask = (t != 0)
|
| 121 |
|
| 122 |
if np.sum(valid_mask) == 0:
|
|
@@ -129,19 +151,28 @@ def calculate_accuracy(pred_map, target_map):
|
|
| 129 |
|
| 130 |
def calculate_miou(pred_map, target_map, verbose=False):
|
| 131 |
"""
|
| 132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
"""
|
| 134 |
-
#
|
| 135 |
classes_in_target = np.unique(target_map)
|
| 136 |
iou_list = []
|
| 137 |
class_report = {}
|
| 138 |
|
| 139 |
for cls in classes_in_target:
|
| 140 |
-
#
|
| 141 |
if cls == 0:
|
| 142 |
continue
|
| 143 |
|
| 144 |
-
#
|
| 145 |
p_mask = (pred_map == cls)
|
| 146 |
t_mask = (target_map == cls)
|
| 147 |
|
|
@@ -152,8 +183,8 @@ def calculate_miou(pred_map, target_map, verbose=False):
|
|
| 152 |
iou = intersection / union
|
| 153 |
iou_list.append(iou)
|
| 154 |
|
| 155 |
-
#
|
| 156 |
-
class_name = ESA_CLASSES.get(cls, f"
|
| 157 |
class_report[class_name] = iou * 100.0
|
| 158 |
|
| 159 |
if len(iou_list) == 0:
|
|
@@ -161,63 +192,33 @@ def calculate_miou(pred_map, target_map, verbose=False):
|
|
| 161 |
|
| 162 |
miou = np.mean(iou_list) * 100.0
|
| 163 |
|
| 164 |
-
#
|
| 165 |
if verbose:
|
| 166 |
-
print("\n
|
| 167 |
for name, score in class_report.items():
|
| 168 |
print(f"{name:<20}: {score:.2f}%")
|
| 169 |
|
| 170 |
return miou, class_report
|
| 171 |
|
| 172 |
-
def
|
| 173 |
-
"""
|
| 174 |
-
Oblicza ostateczny wynik jako średnią z trzech metryk:
|
| 175 |
-
1. Pixel Accuracy
|
| 176 |
-
2. mIoU
|
| 177 |
-
3. fwIoU
|
| 178 |
-
"""
|
| 179 |
-
return (accuracy + miou + fwiou) / 3.0
|
| 180 |
-
|
| 181 |
-
def print_report(accuracy, miou, final_score=None, class_details=None, model_name="Model"):
|
| 182 |
-
"""
|
| 183 |
-
Wyświetla ładny, czytelny raport w konsoli.
|
| 184 |
"""
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
if class_details:
|
| 190 |
-
print(f"{'KLASA':<25} | {'IoU':<10}")
|
| 191 |
-
print("-" * 40)
|
| 192 |
-
for name, score in class_details.items():
|
| 193 |
-
print(f"{name:<25} | {score:.2f}%")
|
| 194 |
-
print("-" * 40)
|
| 195 |
-
|
| 196 |
-
print(f"Pixel Accuracy (Ogólna): {accuracy:.2f}%")
|
| 197 |
-
print(f"mIoU (Średnia z klas): {miou:.2f}%")
|
| 198 |
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
print("=" * 40 + "\n")
|
| 203 |
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
# ... (reszta pliku bez zmian) ...
|
| 208 |
-
|
| 209 |
-
def calculate_fw_iou(pred_map, target_map):
|
| 210 |
-
"""
|
| 211 |
-
Oblicza Frequency Weighted IoU.
|
| 212 |
-
Wagi zależą od tego, jak często dana klasa występuje na obrazku (Target).
|
| 213 |
-
To daje "sprawiedliwszy" wynik wizualny (duży las ma większą wagę niż mała rzeczka).
|
| 214 |
"""
|
| 215 |
classes_in_target = np.unique(target_map)
|
| 216 |
|
| 217 |
fw_iou_sum = 0.0
|
| 218 |
total_valid_pixels = 0
|
| 219 |
|
| 220 |
-
#
|
| 221 |
for cls in classes_in_target:
|
| 222 |
if cls == 0: continue
|
| 223 |
total_valid_pixels += np.sum(target_map == cls)
|
|
@@ -225,11 +226,11 @@ def calculate_fw_iou(pred_map, target_map):
|
|
| 225 |
if total_valid_pixels == 0:
|
| 226 |
return 0.0
|
| 227 |
|
| 228 |
-
#
|
| 229 |
for cls in classes_in_target:
|
| 230 |
if cls == 0: continue
|
| 231 |
|
| 232 |
-
# IoU
|
| 233 |
p_mask = (pred_map == cls)
|
| 234 |
t_mask = (target_map == cls)
|
| 235 |
|
|
@@ -240,10 +241,10 @@ def calculate_fw_iou(pred_map, target_map):
|
|
| 240 |
if union > 0:
|
| 241 |
iou = intersection / union
|
| 242 |
|
| 243 |
-
#
|
| 244 |
frequency = np.sum(t_mask) / total_valid_pixels
|
| 245 |
|
| 246 |
-
#
|
| 247 |
fw_iou_sum += (frequency * iou)
|
| 248 |
|
| 249 |
return fw_iou_sum * 100.0
|
|
|
|
| 1 |
import numpy as np
|
| 2 |
+
# [IMPORTS] Import ESA class names dictionary for metric interpretation
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from terramindFunctions import ESA_CLASSES
|
| 4 |
|
| 5 |
def calculate_precision(pred_map, target_map):
|
| 6 |
"""
|
| 7 |
+
Calculates Precision metric: "What percentage of predictions are correct?"
|
| 8 |
+
Formula: TP / (TP + FP)
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
pred_map: numpy array with predicted class codes
|
| 12 |
+
target_map: numpy array with ground truth class codes
|
| 13 |
+
|
| 14 |
+
Returns:
|
| 15 |
+
tuple of (mean_precision_percent, class_details_dict)
|
| 16 |
"""
|
| 17 |
classes_in_target = np.unique(target_map)
|
| 18 |
precision_list = []
|
|
|
|
| 25 |
t_mask = (target_map == cls)
|
| 26 |
|
| 27 |
true_positives = np.logical_and(p_mask, t_mask).sum()
|
| 28 |
+
false_positives = np.logical_and(p_mask, ~t_mask).sum() # [FP] Model predicted: YES, Truth: NO
|
| 29 |
|
| 30 |
if (true_positives + false_positives) > 0:
|
| 31 |
precision = true_positives / (true_positives + false_positives)
|
| 32 |
else:
|
| 33 |
+
precision = 0.0 # [NOTE] If model didn't detect this class, precision = 0 (safe default)
|
| 34 |
|
| 35 |
precision_list.append(precision)
|
| 36 |
|
| 37 |
+
class_name = ESA_CLASSES.get(cls, f"Class {cls}")
|
| 38 |
details[class_name] = precision * 100.0
|
| 39 |
|
| 40 |
mean_precision = np.mean(precision_list) * 100.0 if precision_list else 0.0
|
|
|
|
| 43 |
|
| 44 |
def calculate_recall(pred_map, target_map):
|
| 45 |
"""
|
| 46 |
+
Calculates Recall metric: "What percentage of true objects did the model find?"
|
| 47 |
+
Formula: TP / (TP + FN)
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
pred_map: numpy array with predicted class codes
|
| 51 |
+
target_map: numpy array with ground truth class codes
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
tuple of (mean_recall_percent, class_details_dict)
|
| 55 |
"""
|
| 56 |
classes_in_target = np.unique(target_map)
|
| 57 |
recall_list = []
|
|
|
|
| 64 |
t_mask = (target_map == cls)
|
| 65 |
|
| 66 |
true_positives = np.logical_and(p_mask, t_mask).sum()
|
| 67 |
+
false_negatives = np.logical_and(~p_mask, t_mask).sum() # [FN] Model predicted: NO, Truth: YES
|
| 68 |
|
| 69 |
if (true_positives + false_negatives) > 0:
|
| 70 |
recall = true_positives / (true_positives + false_negatives)
|
| 71 |
else:
|
| 72 |
+
recall = 0.0 # [NOTE] Shouldn't happen since iterating over target classes
|
| 73 |
|
| 74 |
recall_list.append(recall)
|
| 75 |
|
| 76 |
+
class_name = ESA_CLASSES.get(cls, f"Class {cls}")
|
| 77 |
details[class_name] = recall * 100.0
|
| 78 |
|
| 79 |
mean_recall = np.mean(recall_list) * 100.0 if recall_list else 0.0
|
|
|
|
| 81 |
|
| 82 |
def calculate_dice_score(pred_map, target_map):
|
| 83 |
"""
|
| 84 |
+
Calculates Dice coefficient (F1-Score for pixels).
|
| 85 |
+
Returns mean Dice score across all classes.
|
| 86 |
+
Dice score is typically higher than IoU for the same data.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
pred_map: numpy array with predicted class codes
|
| 90 |
+
target_map: numpy array with ground truth class codes
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
float - mean Dice score as percentage
|
| 94 |
"""
|
| 95 |
classes_in_target = np.unique(target_map)
|
| 96 |
dice_list = []
|
| 97 |
|
| 98 |
for cls in classes_in_target:
|
| 99 |
+
# [SKIP] Skip class 0 (No data / No Data)
|
| 100 |
if cls == 0:
|
| 101 |
continue
|
| 102 |
|
| 103 |
+
# [MASKS] Binary masks for current class
|
| 104 |
p_mask = (pred_map == cls)
|
| 105 |
t_mask = (target_map == cls)
|
| 106 |
|
| 107 |
intersection = np.logical_and(p_mask, t_mask).sum()
|
| 108 |
|
| 109 |
+
# [AREAS] Number of pixels in prediction and target
|
| 110 |
area_pred = p_mask.sum()
|
| 111 |
area_target = t_mask.sum()
|
| 112 |
|
| 113 |
+
# [SAFE] Prevent division by zero
|
| 114 |
if area_pred + area_target == 0:
|
| 115 |
+
dice = 1.0 # Both empty = perfect match
|
| 116 |
else:
|
| 117 |
dice = (2.0 * intersection) / (area_pred + area_target)
|
| 118 |
|
|
|
|
| 126 |
|
| 127 |
def calculate_accuracy(pred_map, target_map):
|
| 128 |
"""
|
| 129 |
+
Calculates Pixel Accuracy (percentage of correctly classified pixels).
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
pred_map: numpy array with predicted class codes
|
| 133 |
+
target_map: numpy array with ground truth class codes
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
float - pixel accuracy as percentage
|
| 137 |
"""
|
| 138 |
p = pred_map.flatten()
|
| 139 |
t = target_map.flatten()
|
| 140 |
|
| 141 |
+
# [VALID] Only compare pixels where target doesn't have "No data" (class 0)
|
| 142 |
valid_mask = (t != 0)
|
| 143 |
|
| 144 |
if np.sum(valid_mask) == 0:
|
|
|
|
| 151 |
|
| 152 |
def calculate_miou(pred_map, target_map, verbose=False):
|
| 153 |
"""
|
| 154 |
+
Calculates mean Intersection over Union (mIoU) metric.
|
| 155 |
+
Optionally prints per-class results if verbose=True.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
pred_map: numpy array with predicted class codes
|
| 159 |
+
target_map: numpy array with ground truth class codes
|
| 160 |
+
verbose: if True, print per-class IoU scores
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
tuple of (mean_iou_percent, class_iou_dict)
|
| 164 |
"""
|
| 165 |
+
# [FIND_CLASSES] Find all classes present in ground truth
|
| 166 |
classes_in_target = np.unique(target_map)
|
| 167 |
iou_list = []
|
| 168 |
class_report = {}
|
| 169 |
|
| 170 |
for cls in classes_in_target:
|
| 171 |
+
# [SKIP] Skip class 0 (No data / No Data)
|
| 172 |
if cls == 0:
|
| 173 |
continue
|
| 174 |
|
| 175 |
+
# [MASKS] Create binary masks (where this class appears)
|
| 176 |
p_mask = (pred_map == cls)
|
| 177 |
t_mask = (target_map == cls)
|
| 178 |
|
|
|
|
| 183 |
iou = intersection / union
|
| 184 |
iou_list.append(iou)
|
| 185 |
|
| 186 |
+
# [CLASS_NAME] Get class name from dictionary
|
| 187 |
+
class_name = ESA_CLASSES.get(cls, f"Class {cls}")
|
| 188 |
class_report[class_name] = iou * 100.0
|
| 189 |
|
| 190 |
if len(iou_list) == 0:
|
|
|
|
| 192 |
|
| 193 |
miou = np.mean(iou_list) * 100.0
|
| 194 |
|
| 195 |
+
# [VERBOSE] If verbose=True, print details to console
|
| 196 |
if verbose:
|
| 197 |
+
print("\n[IoU] IoU details per class")
|
| 198 |
for name, score in class_report.items():
|
| 199 |
print(f"{name:<20}: {score:.2f}%")
|
| 200 |
|
| 201 |
return miou, class_report
|
| 202 |
|
| 203 |
+
def calculate_fw_iou(pred_map, target_map):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
"""
|
| 205 |
+
Calculates Frequency Weighted IoU.
|
| 206 |
+
Weights depend on class frequency in target image.
|
| 207 |
+
Provides "fairer" visual results (large forest weighted more than small river).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
|
| 209 |
+
Args:
|
| 210 |
+
pred_map: numpy array with predicted class codes
|
| 211 |
+
target_map: numpy array with ground truth class codes
|
|
|
|
| 212 |
|
| 213 |
+
Returns:
|
| 214 |
+
float - frequency weighted IoU as percentage
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
"""
|
| 216 |
classes_in_target = np.unique(target_map)
|
| 217 |
|
| 218 |
fw_iou_sum = 0.0
|
| 219 |
total_valid_pixels = 0
|
| 220 |
|
| 221 |
+
# [TOTAL] Sum all valid pixels (excluding class 0 - no data)
|
| 222 |
for cls in classes_in_target:
|
| 223 |
if cls == 0: continue
|
| 224 |
total_valid_pixels += np.sum(target_map == cls)
|
|
|
|
| 226 |
if total_valid_pixels == 0:
|
| 227 |
return 0.0
|
| 228 |
|
| 229 |
+
# [WEIGHTED] Compute weighted IoU for each class
|
| 230 |
for cls in classes_in_target:
|
| 231 |
if cls == 0: continue
|
| 232 |
|
| 233 |
+
# [IOU] IoU for this class
|
| 234 |
p_mask = (pred_map == cls)
|
| 235 |
t_mask = (target_map == cls)
|
| 236 |
|
|
|
|
| 241 |
if union > 0:
|
| 242 |
iou = intersection / union
|
| 243 |
|
| 244 |
+
# [FREQUENCY] Class frequency (weight)
|
| 245 |
frequency = np.sum(t_mask) / total_valid_pixels
|
| 246 |
|
| 247 |
+
# [ADD] Add to sum: Weight * IoU
|
| 248 |
fw_iou_sum += (frequency * iou)
|
| 249 |
|
| 250 |
return fw_iou_sum * 100.0
|
io-app-backend/plotting_utils.py
DELETED
|
@@ -1,429 +0,0 @@
|
|
| 1 |
-
|
| 2 |
-
import torch
|
| 3 |
-
import numpy as np
|
| 4 |
-
import textwrap
|
| 5 |
-
import matplotlib.pyplot as plt
|
| 6 |
-
from matplotlib.colors import hex2color, LinearSegmentedColormap
|
| 7 |
-
|
| 8 |
-
# Plotting utils
|
| 9 |
-
COLORBLIND_HEX = ["#000000", "#3171AD", "#469C76", '#83CA70', "#EAE159", "#C07CB8", "#C19368", "#6FB2E4", "#F1F1F1",
|
| 10 |
-
"#C66526"]
|
| 11 |
-
COLORBLIND_RGB = [hex2color(hex) for hex in COLORBLIND_HEX]
|
| 12 |
-
lulc_cmap = LinearSegmentedColormap.from_list('lulc', COLORBLIND_RGB, N=10)
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
def rgb_smooth_quantiles(array, tolerance=0.02, scaling=0.5, default=2000):
|
| 16 |
-
"""
|
| 17 |
-
array: numpy array with dimensions [C, H, W]
|
| 18 |
-
returns 0-1 scaled array
|
| 19 |
-
"""
|
| 20 |
-
|
| 21 |
-
# Get scaling thresholds for smoothing the brightness
|
| 22 |
-
limit_low, median, limit_high = np.quantile(array, q=[tolerance, 0.5, 1. - tolerance])
|
| 23 |
-
limit_high = limit_high.clip(default) # Scale only pixels above default value
|
| 24 |
-
limit_low = limit_low.clip(0, 1000) # Scale only pixels below 1000
|
| 25 |
-
limit_low = np.where(median > default / 2, limit_low, 0) # Make image only darker if it is not dark already
|
| 26 |
-
|
| 27 |
-
# Smooth very dark and bright values using linear scaling
|
| 28 |
-
array = np.where(array >= limit_low, array, limit_low + (array - limit_low) * scaling)
|
| 29 |
-
array = np.where(array <= limit_high, array, limit_high + (array - limit_high) * scaling)
|
| 30 |
-
|
| 31 |
-
# Update scaling params using a 10th of the tolerance for max value
|
| 32 |
-
limit_low, limit_high = np.quantile(array, q=[tolerance/10, 1. - tolerance/10])
|
| 33 |
-
limit_high = limit_high.clip(default, 20000) # Scale only pixels above default value
|
| 34 |
-
limit_low = limit_low.clip(0, 500) # Scale only pixels below 500
|
| 35 |
-
limit_low = np.where(median > default / 2, limit_low, 0) # Make image only darker if it is not dark already
|
| 36 |
-
|
| 37 |
-
# Scale data to 0-255
|
| 38 |
-
array = (array - limit_low) / (limit_high - limit_low)
|
| 39 |
-
|
| 40 |
-
return array
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
def s2_to_rgb(data, smooth_quantiles=False):
|
| 44 |
-
if isinstance(data, torch.Tensor):
|
| 45 |
-
# to numpy
|
| 46 |
-
data = data.clone().cpu().numpy()
|
| 47 |
-
if len(data.shape) == 4:
|
| 48 |
-
# Remove batch dim
|
| 49 |
-
data = data[0]
|
| 50 |
-
|
| 51 |
-
# Select
|
| 52 |
-
if data.shape[0] > 13:
|
| 53 |
-
# assuming channel last
|
| 54 |
-
rgb = data[:, :, [3, 2, 1]]
|
| 55 |
-
else:
|
| 56 |
-
# assuming channel first
|
| 57 |
-
rgb = data[[3, 2, 1]].transpose((1, 2, 0))
|
| 58 |
-
|
| 59 |
-
if smooth_quantiles:
|
| 60 |
-
rgb = rgb_smooth_quantiles(rgb)
|
| 61 |
-
else:
|
| 62 |
-
rgb = rgb / 2000
|
| 63 |
-
|
| 64 |
-
# to uint8
|
| 65 |
-
rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
|
| 66 |
-
|
| 67 |
-
return rgb
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
def s1_to_rgb(data):
|
| 71 |
-
if isinstance(data, torch.Tensor):
|
| 72 |
-
# to numpy
|
| 73 |
-
data = data.clone().cpu().numpy()
|
| 74 |
-
if len(data.shape) == 4:
|
| 75 |
-
# Remove batch dim
|
| 76 |
-
data = data[0]
|
| 77 |
-
|
| 78 |
-
vv = data[0]
|
| 79 |
-
vh = data[1]
|
| 80 |
-
r = (vv + 30) / 40 # scale -30 to +10
|
| 81 |
-
g = (vh + 40) / 40 # scale -40 to +0
|
| 82 |
-
b = vv / vh.clip(-40, -1) / 1.5 # VV / VH
|
| 83 |
-
|
| 84 |
-
rgb = np.dstack([r, g, b])
|
| 85 |
-
rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
|
| 86 |
-
return rgb
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
def s1_to_power(data):
|
| 90 |
-
# Convert dB to power
|
| 91 |
-
data = 10 ** (data / 10)
|
| 92 |
-
return data * 10000
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
def s1_power_to_rgb(data):
|
| 96 |
-
if isinstance(data, torch.Tensor):
|
| 97 |
-
# to numpy
|
| 98 |
-
data = data.clone().cpu().numpy()
|
| 99 |
-
if len(data.shape) == 4:
|
| 100 |
-
# Remove batch dim
|
| 101 |
-
data = data[0]
|
| 102 |
-
|
| 103 |
-
vv = data[0]
|
| 104 |
-
vh = data[1]
|
| 105 |
-
r = vv / 500
|
| 106 |
-
g = vh / 2200
|
| 107 |
-
b = vv / vh / 2
|
| 108 |
-
|
| 109 |
-
rgb = np.dstack([r, g, b])
|
| 110 |
-
rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
|
| 111 |
-
return rgb
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
def dem_to_rgb(data, cmap='BrBG_r', buffer=5):
|
| 115 |
-
if isinstance(data, torch.Tensor):
|
| 116 |
-
# to numpy
|
| 117 |
-
data = data.clone().cpu().numpy()
|
| 118 |
-
while len(data.shape) > 2:
|
| 119 |
-
# Remove batch dim etc.
|
| 120 |
-
data = data[0]
|
| 121 |
-
|
| 122 |
-
# Add 10m buffer to highlight flat areas
|
| 123 |
-
data_min, data_max = data.min(), data.max()
|
| 124 |
-
data_min -= buffer
|
| 125 |
-
data_max += buffer
|
| 126 |
-
data = (data - data_min) / (data_max - data_min + 1e-6)
|
| 127 |
-
|
| 128 |
-
rgb = plt.get_cmap(cmap)(data)[:, :, :3]
|
| 129 |
-
rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
|
| 130 |
-
return rgb
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
def ndvi_to_rgb(data, cmap='RdYlGn'):
|
| 134 |
-
if isinstance(data, torch.Tensor):
|
| 135 |
-
# to numpy
|
| 136 |
-
data = data.clone().cpu().numpy()
|
| 137 |
-
while len(data.shape) > 2:
|
| 138 |
-
# Remove batch dim etc.
|
| 139 |
-
data = data[0]
|
| 140 |
-
|
| 141 |
-
# Scale NDVI to 0-1
|
| 142 |
-
data = (data + 1) / 2
|
| 143 |
-
|
| 144 |
-
rgb = plt.get_cmap(cmap)(data)[:, :, :3]
|
| 145 |
-
rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
|
| 146 |
-
return rgb
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
def lulc_to_rgb(data, cmap=lulc_cmap, num_classes=10):
|
| 150 |
-
while len(data.shape) > 2:
|
| 151 |
-
if data.shape[0] == num_classes:
|
| 152 |
-
data = data.argmax(axis=0) # First dim are class logits
|
| 153 |
-
else:
|
| 154 |
-
# Remove batch dim
|
| 155 |
-
data = data[0]
|
| 156 |
-
|
| 157 |
-
rgb = cmap(data)[:, :, :3]
|
| 158 |
-
rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
|
| 159 |
-
return rgb
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
def coords_to_text(data):
|
| 163 |
-
if isinstance(data, torch.Tensor):
|
| 164 |
-
data = data.clone().cpu().numpy()
|
| 165 |
-
if len(data.shape) > 1:
|
| 166 |
-
# Remove batch dim etc.
|
| 167 |
-
data = data[0]
|
| 168 |
-
if data.shape[0] > 2:
|
| 169 |
-
# Not coords
|
| 170 |
-
return str(data)
|
| 171 |
-
else:
|
| 172 |
-
|
| 173 |
-
return f'lon={data[0]:.2f}, lat={data[1]:.2f}'
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
def plot_s2(data, ax=None, smooth_quantiles=False, *args, **kwargs):
|
| 177 |
-
rgb = s2_to_rgb(data, smooth_quantiles=smooth_quantiles)
|
| 178 |
-
|
| 179 |
-
if ax is None:
|
| 180 |
-
plt.imshow(rgb)
|
| 181 |
-
plt.axis('off')
|
| 182 |
-
plt.show()
|
| 183 |
-
else:
|
| 184 |
-
ax.imshow(rgb)
|
| 185 |
-
ax.axis('off')
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
def plot_s1(data, ax=None, power=False, *args, **kwargs):
|
| 189 |
-
if power:
|
| 190 |
-
data = s1_to_power(data)
|
| 191 |
-
rgb = s1_power_to_rgb(data)
|
| 192 |
-
else:
|
| 193 |
-
rgb = s1_to_rgb(data)
|
| 194 |
-
|
| 195 |
-
if ax is None:
|
| 196 |
-
plt.imshow(rgb)
|
| 197 |
-
plt.axis('off')
|
| 198 |
-
plt.show()
|
| 199 |
-
else:
|
| 200 |
-
ax.imshow(rgb)
|
| 201 |
-
ax.axis('off')
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
def plot_dem(data, ax=None, *args, **kwargs):
|
| 205 |
-
if isinstance(data, torch.Tensor):
|
| 206 |
-
# to numpy
|
| 207 |
-
data = data.clone().cpu().numpy()
|
| 208 |
-
while len(data.shape) > 2:
|
| 209 |
-
# Remove batch dim etc.
|
| 210 |
-
data = data[0]
|
| 211 |
-
|
| 212 |
-
# Add 10m buffer to highlight flat areas
|
| 213 |
-
data_min, data_max = data.min(), data.max()
|
| 214 |
-
data_min -= 5
|
| 215 |
-
data_max += 5
|
| 216 |
-
data = (data - data_min) / (data_max - data_min + 1e-6)
|
| 217 |
-
|
| 218 |
-
data = (data * 255).round().clip(0, 255).astype(np.uint8)
|
| 219 |
-
|
| 220 |
-
if ax is None:
|
| 221 |
-
plt.imshow(data, vmin=0, vmax=255, cmap='BrBG_r')
|
| 222 |
-
plt.axis('off')
|
| 223 |
-
plt.show()
|
| 224 |
-
else:
|
| 225 |
-
ax.imshow(data, vmin=0, vmax=255, cmap='BrBG_r')
|
| 226 |
-
ax.axis('off')
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
def plot_lulc(data, ax=None, num_classes=10, *args, **kwargs):
|
| 230 |
-
if isinstance(data, torch.Tensor):
|
| 231 |
-
# to numpy
|
| 232 |
-
data = data.clone().cpu().numpy()
|
| 233 |
-
while len(data.shape) > 2:
|
| 234 |
-
if data.shape[0] == num_classes:
|
| 235 |
-
data = data.argmax(axis=0) # First dim are class logits
|
| 236 |
-
else:
|
| 237 |
-
# Remove batch dim
|
| 238 |
-
data = data[0]
|
| 239 |
-
|
| 240 |
-
if ax is None:
|
| 241 |
-
plt.imshow(data, vmin=0, vmax=num_classes-1, cmap=lulc_cmap, interpolation='nearest')
|
| 242 |
-
plt.axis('off')
|
| 243 |
-
plt.show()
|
| 244 |
-
else:
|
| 245 |
-
ax.imshow(data, vmin=0, vmax=num_classes-1, cmap=lulc_cmap, interpolation='nearest')
|
| 246 |
-
ax.axis('off')
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
def plot_ndvi(data, ax=None, *args, **kwargs):
|
| 250 |
-
if isinstance(data, torch.Tensor):
|
| 251 |
-
# to numpy
|
| 252 |
-
data = data.clone().cpu().numpy()
|
| 253 |
-
while len(data.shape) > 2:
|
| 254 |
-
# Remove batch dim etc.
|
| 255 |
-
data = data[0]
|
| 256 |
-
|
| 257 |
-
if ax is None:
|
| 258 |
-
plt.imshow(data, vmin=-1, vmax=+1, cmap='RdYlGn')
|
| 259 |
-
plt.axis('off')
|
| 260 |
-
plt.show()
|
| 261 |
-
else:
|
| 262 |
-
ax.imshow(data, vmin=-1, vmax=+1, cmap='RdYlGn')
|
| 263 |
-
ax.axis('off')
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
def wrap_text(text, ax, font_size):
|
| 267 |
-
# Get the width of the axis in pixels
|
| 268 |
-
bbox = ax.get_window_extent()
|
| 269 |
-
width, height = bbox.width, bbox.height
|
| 270 |
-
|
| 271 |
-
# Calculate the number of characters per line
|
| 272 |
-
char_width = font_size * 0.6 # Approximate width of a character
|
| 273 |
-
max_chars_per_line = int(width / char_width * 0.75)
|
| 274 |
-
max_lines = int(height / font_size * 0.5)
|
| 275 |
-
|
| 276 |
-
# Wrap the text
|
| 277 |
-
wrapped_text = textwrap.wrap(text, width=max_chars_per_line)
|
| 278 |
-
|
| 279 |
-
if len(wrapped_text) > max_lines:
|
| 280 |
-
wrapped_text = wrapped_text[:max_lines]
|
| 281 |
-
wrapped_text[-1] += '...'
|
| 282 |
-
|
| 283 |
-
return '\n'.join(wrapped_text)
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
def plot_text(data, ax=None, *args, **kwargs):
|
| 287 |
-
if isinstance(data, str):
|
| 288 |
-
text = data
|
| 289 |
-
elif isinstance(data, torch.Tensor) or isinstance(data, np.ndarray):
|
| 290 |
-
# assuming coordinates
|
| 291 |
-
text = coords_to_text(data)
|
| 292 |
-
else:
|
| 293 |
-
raise ValueError()
|
| 294 |
-
|
| 295 |
-
font_size = 14 if len(text) > 150 else 20
|
| 296 |
-
|
| 297 |
-
if ax is None:
|
| 298 |
-
fig, ax = plt.subplots()
|
| 299 |
-
wrapped_text = wrap_text(text, ax, font_size)
|
| 300 |
-
ax.text(0.5, 0.5, wrapped_text, fontsize=font_size, ha='center', va='center', wrap=True)
|
| 301 |
-
ax.set_xticks([])
|
| 302 |
-
ax.set_yticks([])
|
| 303 |
-
plt.show()
|
| 304 |
-
else:
|
| 305 |
-
wrapped_text = wrap_text(text, ax, font_size)
|
| 306 |
-
ax.text(0.5, 0.5, wrapped_text, fontsize=font_size, ha='center', va='center', wrap=True)
|
| 307 |
-
ax.set_xticks([])
|
| 308 |
-
ax.set_yticks([])
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
def plot_modality(modality, data, ax=None, **kwargs):
|
| 312 |
-
if 's2' in modality.lower():
|
| 313 |
-
plot_s2(data, ax=ax, **kwargs)
|
| 314 |
-
elif 's1' in modality.lower():
|
| 315 |
-
plot_s1(data, ax=ax, **kwargs)
|
| 316 |
-
elif 'dem' in modality.lower():
|
| 317 |
-
plot_dem(data, ax=ax, **kwargs)
|
| 318 |
-
elif 'ndvi' in modality.lower():
|
| 319 |
-
plot_ndvi(data, ax=ax, **kwargs)
|
| 320 |
-
elif 'lulc' in modality.lower():
|
| 321 |
-
plot_lulc(data, ax=ax, **kwargs)
|
| 322 |
-
elif 'coords' in modality.lower() or 'caption' in modality.lower() or 'text' in modality.lower():
|
| 323 |
-
plot_text(data, ax=ax, **kwargs)
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
# Metryki v0.2 (patrz terramind_generation.ipynb na komórke z "print(f"⏳ Ładowanie modelu: {model_name}...")")
|
| 328 |
-
|
| 329 |
-
def calculate_lulc_score(pred_tensor, target_tensor, num_classes=10):
|
| 330 |
-
"""
|
| 331 |
-
Porównanie map LULC:
|
| 332 |
-
- Pixel Accuracy
|
| 333 |
-
- mIoU
|
| 334 |
-
- Final Score (0–100)
|
| 335 |
-
|
| 336 |
-
Teacher (Large) = proxy ground truth
|
| 337 |
-
"""
|
| 338 |
-
|
| 339 |
-
if isinstance(pred_tensor, torch.Tensor):
|
| 340 |
-
pred_tensor = pred_tensor.detach().cpu()
|
| 341 |
-
if isinstance(target_tensor, torch.Tensor):
|
| 342 |
-
target_tensor = target_tensor.detach().cpu()
|
| 343 |
-
|
| 344 |
-
if pred_tensor.ndim == 4:
|
| 345 |
-
pred_tensor = pred_tensor[0]
|
| 346 |
-
if target_tensor.ndim == 4:
|
| 347 |
-
target_tensor = target_tensor[0]
|
| 348 |
-
|
| 349 |
-
if pred_tensor.ndim == 3:
|
| 350 |
-
pred_mask = torch.argmax(pred_tensor, dim=0).numpy()
|
| 351 |
-
else:
|
| 352 |
-
pred_mask = pred_tensor.numpy()
|
| 353 |
-
|
| 354 |
-
if target_tensor.ndim == 3:
|
| 355 |
-
target_mask = torch.argmax(target_tensor, dim=0).numpy()
|
| 356 |
-
else:
|
| 357 |
-
target_mask = target_tensor.numpy()
|
| 358 |
-
|
| 359 |
-
accuracy = (pred_mask == target_mask).sum() / pred_mask.size * 100.0
|
| 360 |
-
|
| 361 |
-
iou_list = []
|
| 362 |
-
for c in range(num_classes):
|
| 363 |
-
pred_c = (pred_mask == c)
|
| 364 |
-
target_c = (target_mask == c)
|
| 365 |
-
|
| 366 |
-
union = np.logical_or(pred_c, target_c).sum()
|
| 367 |
-
intersection = np.logical_and(pred_c, target_c).sum()
|
| 368 |
-
|
| 369 |
-
if union > 0:
|
| 370 |
-
iou_list.append(intersection / union)
|
| 371 |
-
|
| 372 |
-
miou = np.mean(iou_list) * 100.0 if len(iou_list) > 0 else 0.0
|
| 373 |
-
final_score = 0.3 * accuracy + 0.7 * miou
|
| 374 |
-
|
| 375 |
-
return {
|
| 376 |
-
"pixel_accuracy": accuracy,
|
| 377 |
-
"miou": miou,
|
| 378 |
-
"final_score": final_score
|
| 379 |
-
}
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
def prepare_masks_for_score(*masks):
|
| 383 |
-
"""Prepare one or more label maps for `calculate_lulc_score`.
|
| 384 |
-
|
| 385 |
-
Args:
|
| 386 |
-
*masks: numpy arrays or torch tensors representing label maps (H,W) or (C,H,W) logits.
|
| 387 |
-
|
| 388 |
-
Returns:
|
| 389 |
-
(tensors, num_classes):
|
| 390 |
-
tensors: list of `torch.LongTensor` remapped to contiguous labels 0..N-1
|
| 391 |
-
num_classes: number of unique labels across all inputs
|
| 392 |
-
|
| 393 |
-
This helper will extract class indices if given logits, compute the union of labels,
|
| 394 |
-
remap them to 0..N-1 and return torch tensors suitable for `calculate_lulc_score`.
|
| 395 |
-
"""
|
| 396 |
-
np_masks = []
|
| 397 |
-
for m in masks:
|
| 398 |
-
if isinstance(m, torch.Tensor):
|
| 399 |
-
m = m.detach().cpu()
|
| 400 |
-
if m.ndim == 4:
|
| 401 |
-
m = m[0]
|
| 402 |
-
if m.ndim == 3 and m.shape[0] > 1:
|
| 403 |
-
m = m.argmax(axis=0).numpy()
|
| 404 |
-
else:
|
| 405 |
-
m = m.numpy()
|
| 406 |
-
elif isinstance(m, np.ndarray):
|
| 407 |
-
while m.ndim > 2:
|
| 408 |
-
# remove batch dim
|
| 409 |
-
m = m[0]
|
| 410 |
-
else:
|
| 411 |
-
raise ValueError("Unsupported mask type")
|
| 412 |
-
|
| 413 |
-
np_masks.append(np.array(m, dtype=np.int64))
|
| 414 |
-
|
| 415 |
-
# union of labels across all masks
|
| 416 |
-
if len(np_masks) > 1:
|
| 417 |
-
union = np.unique(np.concatenate([m.ravel() for m in np_masks]))
|
| 418 |
-
else:
|
| 419 |
-
union = np.unique(np_masks[0])
|
| 420 |
-
label_to_idx = {int(l): i for i, l in enumerate(union)}
|
| 421 |
-
|
| 422 |
-
remapped_tensors = []
|
| 423 |
-
for arr in np_masks:
|
| 424 |
-
flat = arr.ravel()
|
| 425 |
-
rem = np.vectorize(lambda x: label_to_idx[int(x)])(flat)
|
| 426 |
-
remapped = rem.reshape(arr.shape).astype(np.int64)
|
| 427 |
-
remapped_tensors.append(torch.from_numpy(remapped))
|
| 428 |
-
|
| 429 |
-
return remapped_tensors, len(union)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
io-app-backend/terramindFunctions.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import os
|
| 2 |
import torch
|
| 3 |
import numpy as np
|
|
@@ -12,7 +13,7 @@ from PIL import Image
|
|
| 12 |
import gc
|
| 13 |
|
| 14 |
# =========================================
|
| 15 |
-
#
|
| 16 |
# =========================================
|
| 17 |
|
| 18 |
DEFAULT_BUFFER_KM = 5
|
|
@@ -24,7 +25,7 @@ TIMESTEPS = 50
|
|
| 24 |
BRIGHTNESS_BOOST = 2.5
|
| 25 |
NORMALIZATION_MODE = "offset"
|
| 26 |
|
| 27 |
-
#
|
| 28 |
NDWI_THRESHOLD = 0.1
|
| 29 |
MNDWI_THRESHOLD = 0.1
|
| 30 |
NDVI_THRESHOLD = 0.3
|
|
@@ -35,28 +36,24 @@ USE_WATER_CORRECTION = True
|
|
| 35 |
USE_VEGETATION_CORRECTION = True
|
| 36 |
USE_BUILDING_CORRECTION = True
|
| 37 |
USE_BARE_SOIL_CORRECTION = True
|
| 38 |
-
USE_SNOW_CORRECTION = False
|
| 39 |
-
|
| 40 |
-
SAVE_RESULTS = True
|
| 41 |
-
OUTPUT_FOLDER = "./wyniki"
|
| 42 |
|
| 43 |
# =========================================
|
| 44 |
-
#
|
| 45 |
# =========================================
|
| 46 |
|
| 47 |
ESA_CLASSES = {
|
| 48 |
-
0: "
|
| 49 |
-
10: "
|
| 50 |
-
20: "
|
| 51 |
-
30: "
|
| 52 |
-
40: "
|
| 53 |
-
50: "
|
| 54 |
-
60: "
|
| 55 |
-
70: "
|
| 56 |
-
80: "
|
| 57 |
-
90: "
|
| 58 |
-
95: "
|
| 59 |
-
100: "
|
| 60 |
}
|
| 61 |
|
| 62 |
ESA_COLORS = {
|
|
@@ -80,17 +77,30 @@ INDEX_TO_ESA = {
|
|
| 80 |
}
|
| 81 |
|
| 82 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 83 |
-
print(f"
|
| 84 |
|
| 85 |
# =========================================
|
| 86 |
-
#
|
| 87 |
# =========================================
|
| 88 |
|
| 89 |
_CURRENT_MODEL = None
|
| 90 |
_CURRENT_MODEL_NAME = None
|
| 91 |
|
| 92 |
def get_model(model_name):
|
| 93 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
global _CURRENT_MODEL, _CURRENT_MODEL_NAME
|
| 95 |
if _CURRENT_MODEL is not None and _CURRENT_MODEL_NAME == model_name:
|
| 96 |
return _CURRENT_MODEL
|
|
@@ -99,14 +109,14 @@ def get_model(model_name):
|
|
| 99 |
if torch.cuda.is_available():
|
| 100 |
torch.cuda.empty_cache()
|
| 101 |
gc.collect()
|
| 102 |
-
print("
|
| 103 |
|
| 104 |
from terratorch import FULL_MODEL_REGISTRY
|
| 105 |
-
print("
|
| 106 |
|
| 107 |
try:
|
| 108 |
model = FULL_MODEL_REGISTRY.build(
|
| 109 |
-
model_name, #
|
| 110 |
modalities=["S2L2A"],
|
| 111 |
output_modalities=["LULC"],
|
| 112 |
pretrained=True,
|
|
@@ -115,29 +125,38 @@ def get_model(model_name):
|
|
| 115 |
|
| 116 |
model.eval()
|
| 117 |
|
| 118 |
-
#
|
| 119 |
_CURRENT_MODEL = model
|
| 120 |
_CURRENT_MODEL_NAME = model_name
|
| 121 |
|
| 122 |
-
print(f"
|
| 123 |
return _CURRENT_MODEL
|
| 124 |
|
| 125 |
except Exception as e:
|
| 126 |
-
print(f"
|
| 127 |
-
#
|
| 128 |
raise e
|
| 129 |
|
| 130 |
# =========================================
|
| 131 |
-
#
|
| 132 |
# =========================================
|
| 133 |
|
| 134 |
def get_coordinates_from_name(place_name):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
try:
|
| 136 |
from geopy.geocoders import Nominatim
|
| 137 |
geolocator = Nominatim(user_agent="terramind_fast")
|
| 138 |
location = geolocator.geocode(place_name)
|
| 139 |
if location:
|
| 140 |
-
print(f"
|
| 141 |
return location.latitude, location.longitude
|
| 142 |
return None
|
| 143 |
except:
|
|
@@ -145,42 +164,56 @@ def get_coordinates_from_name(place_name):
|
|
| 145 |
|
| 146 |
|
| 147 |
def download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back):
|
| 148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
import pystac_client
|
| 150 |
import odc.stac
|
|
|
|
| 151 |
import math
|
| 152 |
import numpy as np
|
| 153 |
from datetime import datetime, timedelta
|
| 154 |
from pyproj import Transformer
|
| 155 |
|
| 156 |
-
print(f"
|
| 157 |
|
| 158 |
-
# 1.
|
| 159 |
-
#
|
| 160 |
scale_factor = 1.0 / math.cos(math.radians(lat))
|
| 161 |
|
| 162 |
-
# 2.
|
| 163 |
to_3857 = Transformer.from_crs("EPSG:4326", "EPSG:3857", always_xy=True)
|
| 164 |
to_4326 = Transformer.from_crs("EPSG:3857", "EPSG:4326", always_xy=True)
|
| 165 |
|
| 166 |
-
# 3.
|
| 167 |
center_x, center_y = to_3857.transform(lon, lat)
|
| 168 |
|
| 169 |
-
# 4.
|
| 170 |
-
# half_side = (buffer_km * 1000) / 2 <--
|
| 171 |
-
# half_side = (buffer_km * 1000) <--
|
| 172 |
-
#
|
| 173 |
half_side_mercator = (buffer_km * 1000) * scale_factor
|
| 174 |
|
| 175 |
min_x, min_y = center_x - half_side_mercator, center_y - half_side_mercator
|
| 176 |
max_x, max_y = center_x + half_side_mercator, center_y + half_side_mercator
|
| 177 |
|
| 178 |
-
# 5.
|
| 179 |
west, south = to_4326.transform(min_x, min_y)
|
| 180 |
east, north = to_4326.transform(max_x, max_y)
|
| 181 |
bbox_geo = [west, south, east, north]
|
| 182 |
|
| 183 |
-
# ---
|
| 184 |
end_date = datetime.now()
|
| 185 |
start_date = end_date - timedelta(days=days_back)
|
| 186 |
date_range = f"{start_date.strftime('%Y-%m-%d')}/{end_date.strftime('%Y-%m-%d')}"
|
|
@@ -194,18 +227,18 @@ def download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back):
|
|
| 194 |
collections=["sentinel-2-l2a"],
|
| 195 |
bbox=bbox_geo,
|
| 196 |
datetime=date_range,
|
| 197 |
-
query={"eo:cloud_cover": {"
|
| 198 |
)
|
| 199 |
|
| 200 |
items = list(search.items())
|
| 201 |
if not items:
|
| 202 |
-
print("
|
| 203 |
return None
|
| 204 |
|
| 205 |
best_item = sorted(items, key=lambda x: x.properties.get('eo:cloud_cover', 100))[0]
|
| 206 |
|
| 207 |
-
# 6.
|
| 208 |
-
# bbox_geo (
|
| 209 |
data = odc.stac.load(
|
| 210 |
[best_item],
|
| 211 |
bands=["B01", "B02", "B03", "B04", "B05", "B06",
|
|
@@ -215,63 +248,24 @@ def download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back):
|
|
| 215 |
resolution=10
|
| 216 |
)
|
| 217 |
|
| 218 |
-
#
|
| 219 |
stacked = np.stack([data[b].values[0] for b in data.data_vars], axis=0)
|
| 220 |
|
| 221 |
-
print(f"
|
| 222 |
return stacked, best_item.datetime.strftime('%Y-%m-%d'), best_item.id
|
| 223 |
|
| 224 |
-
#
|
| 225 |
-
# """Pobiera dane satelitarne uwzględniając parametry przekazane z backendu."""
|
| 226 |
-
# import planetary_computer
|
| 227 |
-
# import pystac_client
|
| 228 |
-
# import odc.stac
|
| 229 |
-
#
|
| 230 |
-
# print(f"🛰️ Pobieranie danych dla: {lat:.4f}, {lon:.4f} (Buffer: {buffer_km}km, Chmury: <{max_cloud_cover}%)")
|
| 231 |
-
#
|
| 232 |
-
# delta = buffer_km * 0.01
|
| 233 |
-
# bbox = [lon - delta, lat - delta, lon + delta, lat + delta]
|
| 234 |
-
#
|
| 235 |
-
# end_date = datetime.now()
|
| 236 |
-
# start_date = end_date - timedelta(days=days_back)
|
| 237 |
-
# date_range = f"{start_date.strftime('%Y-%m-%d')}/{end_date.strftime('%Y-%m-%d')}"
|
| 238 |
-
#
|
| 239 |
-
# catalog = pystac_client.Client.open(
|
| 240 |
-
# "https://planetarycomputer.microsoft.com/api/stac/v1",
|
| 241 |
-
# modifier=planetary_computer.sign_inplace
|
| 242 |
-
# )
|
| 243 |
-
#
|
| 244 |
-
# search = catalog.search(
|
| 245 |
-
# collections=["sentinel-2-l2a"],
|
| 246 |
-
# bbox=bbox,
|
| 247 |
-
# datetime=date_range,
|
| 248 |
-
# query={"eo:cloud_cover": {"lt": max_cloud_cover}}
|
| 249 |
-
# )
|
| 250 |
-
#
|
| 251 |
-
# items = list(search.items())
|
| 252 |
-
# if not items:
|
| 253 |
-
# print("❌ Brak danych spełniających kryteria")
|
| 254 |
-
# return None
|
| 255 |
-
#
|
| 256 |
-
# items_sorted = sorted(items, key=lambda x: x.properties.get('eo:cloud_cover', 100))
|
| 257 |
-
# best_item = items_sorted[0]
|
| 258 |
-
# date = best_item.datetime.strftime('%Y-%m-%d')
|
| 259 |
-
# cloud = best_item.properties.get('eo:cloud_cover', 0)
|
| 260 |
-
#
|
| 261 |
-
# print(f"📅 Znaleziono zdjęcie: {date} (chmury: {cloud:.1f}%)")
|
| 262 |
-
# print(f" 📌 ID sceny: {best_item.id}")
|
| 263 |
-
# print(f" 📦 BBOX żądany: {bbox}")
|
| 264 |
-
#
|
| 265 |
-
# bands = ["B01", "B02", "B03", "B04", "B05", "B06",
|
| 266 |
-
# "B07", "B08", "B8A", "B09", "B11", "B12"]
|
| 267 |
-
#
|
| 268 |
-
# data = odc.stac.load([best_item], bands=bands, bbox=bbox, resolution=10)
|
| 269 |
-
# stacked = np.stack([data[b].values[0] for b in bands], axis=0)
|
| 270 |
-
#
|
| 271 |
-
# print(f" 📐 Rozmiar danych: {stacked.shape}")
|
| 272 |
-
# return stacked, date, best_item.id
|
| 273 |
-
|
| 274 |
def prepare_input(data_12ch):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
tensor = torch.from_numpy(data_12ch.astype(np.float32))
|
| 276 |
tensor = torch.nan_to_num(tensor, nan=0.0)
|
| 277 |
|
|
@@ -291,7 +285,17 @@ def prepare_input(data_12ch):
|
|
| 291 |
return transform(tensor).unsqueeze(0)
|
| 292 |
|
| 293 |
def run_inference(model, input_tensor):
|
| 294 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
with torch.no_grad():
|
| 296 |
output = model(
|
| 297 |
{"S2L2A": input_tensor.to(device)},
|
|
@@ -301,6 +305,16 @@ def run_inference(model, input_tensor):
|
|
| 301 |
return output["LULC"].detach()
|
| 302 |
|
| 303 |
def decode_output(lulc_tensor):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 304 |
if lulc_tensor.ndim == 4 and lulc_tensor.shape[1] > 1:
|
| 305 |
class_indices = lulc_tensor.argmax(dim=1)[0].cpu().numpy()
|
| 306 |
if class_indices.max() <= 11:
|
|
@@ -312,6 +326,16 @@ def decode_output(lulc_tensor):
|
|
| 312 |
return class_map
|
| 313 |
|
| 314 |
def calculate_spectral_indices(input_tensor):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
blue = input_tensor[0, 1].cpu().numpy() / 10000.0
|
| 316 |
green = input_tensor[0, 2].cpu().numpy() / 10000.0
|
| 317 |
red = input_tensor[0, 3].cpu().numpy() / 10000.0
|
|
@@ -331,39 +355,55 @@ def calculate_spectral_indices(input_tensor):
|
|
| 331 |
|
| 332 |
return indices
|
| 333 |
|
| 334 |
-
# 🆕 Funkcja generująca maski dla każdego wskaźnika
|
| 335 |
def generate_index_masks(indices):
|
| 336 |
"""
|
| 337 |
-
|
| 338 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
"""
|
| 340 |
masks = {}
|
| 341 |
|
| 342 |
-
#
|
| 343 |
masks['water_ndwi'] = indices['ndwi'] > NDWI_THRESHOLD
|
| 344 |
|
| 345 |
-
#
|
| 346 |
masks['water_mndwi'] = indices['mndwi'] > MNDWI_THRESHOLD
|
| 347 |
|
| 348 |
-
#
|
| 349 |
masks['water_awei'] = indices['awei'] > 0
|
| 350 |
|
| 351 |
-
#
|
| 352 |
masks['vegetation_ndvi'] = indices['ndvi'] > NDVI_THRESHOLD
|
| 353 |
|
| 354 |
-
#
|
| 355 |
masks['vegetation_evi'] = indices['evi'] > 0.3
|
| 356 |
|
| 357 |
-
#
|
| 358 |
masks['buildings_ndbi'] = (indices['ndbi'] > NDBI_THRESHOLD) & (indices['ndvi'] < 0.2)
|
| 359 |
|
| 360 |
-
#
|
| 361 |
masks['baresoil_bsi'] = (indices['bsi'] > BSI_THRESHOLD) & (indices['ndvi'] < 0.1)
|
| 362 |
|
| 363 |
return masks
|
| 364 |
|
| 365 |
def apply_hybrid_corrections(class_map, indices):
|
| 366 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 367 |
hybrid_map = class_map.copy()
|
| 368 |
correction_layers = {}
|
| 369 |
|
|
@@ -394,11 +434,21 @@ def apply_hybrid_corrections(class_map, indices):
|
|
| 394 |
return hybrid_map, correction_layers
|
| 395 |
|
| 396 |
# =========================================
|
| 397 |
-
#
|
| 398 |
# =========================================
|
| 399 |
|
| 400 |
def create_rgb_image(input_tensor, brightness=BRIGHTNESS_BOOST):
|
| 401 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 402 |
red = input_tensor[0, 3].cpu().numpy()
|
| 403 |
green = input_tensor[0, 2].cpu().numpy()
|
| 404 |
blue = input_tensor[0, 1].cpu().numpy()
|
|
@@ -415,7 +465,16 @@ def create_rgb_image(input_tensor, brightness=BRIGHTNESS_BOOST):
|
|
| 415 |
return rgb_uint8
|
| 416 |
|
| 417 |
def create_segmentation_image(class_map):
|
| 418 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 419 |
h, w = class_map.shape
|
| 420 |
rgb = np.zeros((h, w, 3), dtype=np.uint8)
|
| 421 |
|
|
@@ -425,60 +484,37 @@ def create_segmentation_image(class_map):
|
|
| 425 |
|
| 426 |
return rgb
|
| 427 |
|
| 428 |
-
# def create_segmentation_image(class_map, alpha=180):
|
| 429 |
-
# """
|
| 430 |
-
# Tworzy kolorową mapę segmentacji z kanałem alfa.
|
| 431 |
-
# alpha: 0 (całkowicie przezroczysty) do 255 (nieprzezroczysty)
|
| 432 |
-
# """
|
| 433 |
-
# h, w = class_map.shape
|
| 434 |
-
# # Tworzymy tablicę RGBA (4 kanały)
|
| 435 |
-
# rgba = np.zeros((h, w, 4), dtype=np.uint8)
|
| 436 |
-
#
|
| 437 |
-
# for class_id, color in ESA_COLORS.items():
|
| 438 |
-
# mask = class_map == class_id
|
| 439 |
-
# rgba[mask, :3] = color # Kopiujemy R, G, B
|
| 440 |
-
# rgba[mask, 3] = alpha # Ustawiamy przezroczystość dla tej klasy
|
| 441 |
-
#
|
| 442 |
-
# return rgba
|
| 443 |
-
|
| 444 |
def create_mask_visualization(mask, color=[255, 0, 0]):
|
| 445 |
"""
|
| 446 |
-
|
|
|
|
| 447 |
|
| 448 |
Args:
|
| 449 |
-
mask: numpy array
|
| 450 |
-
color:
|
| 451 |
|
| 452 |
Returns:
|
| 453 |
-
numpy
|
| 454 |
"""
|
| 455 |
h, w = mask.shape
|
| 456 |
-
rgb = np.ones((h, w, 3), dtype=np.uint8) * 255 #
|
| 457 |
|
| 458 |
-
rgb[mask] = color #
|
| 459 |
-
rgb[~mask] = [240, 240, 240] #
|
| 460 |
|
| 461 |
return rgb
|
| 462 |
|
| 463 |
-
# def create_mask_visualization(mask, color=[255, 0, 0], alpha=180):
|
| 464 |
-
# """
|
| 465 |
-
# Tworzy wizualizację maski binarnej na przezroczystym tle.
|
| 466 |
-
# """
|
| 467 |
-
# h, w = mask.shape
|
| 468 |
-
# # 4 kanały: R, G, B, A
|
| 469 |
-
# rgba = np.zeros((h, w, 4), dtype=np.uint8)
|
| 470 |
-
#
|
| 471 |
-
# # Piksele należące do maski otrzymują kolor i wybraną przezroczystość
|
| 472 |
-
# rgba[mask, :3] = color
|
| 473 |
-
# rgba[mask, 3] = alpha
|
| 474 |
-
#
|
| 475 |
-
# # Piksele poza maską (~mask) mają już 0 w kanale alfa dzięki np.zeros,
|
| 476 |
-
# # więc są całkowicie przezroczyste.
|
| 477 |
-
#
|
| 478 |
-
# return rgba
|
| 479 |
-
|
| 480 |
def calculate_class_percentages(class_map):
|
| 481 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 482 |
total_pixels = class_map.size
|
| 483 |
percentages = {}
|
| 484 |
|
|
@@ -494,116 +530,137 @@ def calculate_class_percentages(class_map):
|
|
| 494 |
return percentages
|
| 495 |
|
| 496 |
def image_to_base64(image_array):
|
| 497 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 498 |
img = Image.fromarray(image_array)
|
| 499 |
buffer = BytesIO()
|
| 500 |
img.save(buffer, format='PNG')
|
| 501 |
return base64.b64encode(buffer.getvalue()).decode('utf-8')
|
| 502 |
|
| 503 |
# =========================================
|
| 504 |
-
#
|
| 505 |
# =========================================
|
| 506 |
|
| 507 |
def analyze(location_data, buffer_km=DEFAULT_BUFFER_KM, max_cloud_cover=DEFAULT_MAX_CLOUD_COVER,
|
| 508 |
days_back=DEFAULT_DAYS_BACK, show_visualization=False, save_files=False, model_name="terramind_v1_large_generate"):
|
| 509 |
"""
|
| 510 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 511 |
|
| 512 |
Returns:
|
| 513 |
-
dict
|
|
|
|
|
|
|
|
|
|
| 514 |
"""
|
| 515 |
|
| 516 |
lat, lon = None, None
|
| 517 |
title = "Unknown"
|
| 518 |
|
| 519 |
-
# 1.
|
| 520 |
if isinstance(location_data, list) or isinstance(location_data, tuple):
|
| 521 |
lat, lon = location_data
|
| 522 |
title = f"{lat:.4f}N, {lon:.4f}E"
|
| 523 |
elif isinstance(location_data, str):
|
| 524 |
coords = get_coordinates_from_name(location_data)
|
| 525 |
if not coords:
|
| 526 |
-
print("
|
| 527 |
return None
|
| 528 |
lat, lon = coords
|
| 529 |
title = location_data
|
| 530 |
else:
|
| 531 |
-
print("
|
| 532 |
return None
|
| 533 |
|
| 534 |
print(f"\n{'='*60}")
|
| 535 |
-
print(f"
|
| 536 |
print(f"{'='*60}")
|
| 537 |
-
print(f"
|
| 538 |
-
print(f"
|
| 539 |
-
print(f"
|
| 540 |
-
print(f"
|
| 541 |
print(f"{'='*60}\n")
|
| 542 |
|
| 543 |
-
# 2.
|
| 544 |
result = download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back)
|
| 545 |
if result is None:
|
| 546 |
return None
|
| 547 |
data, date, scene_id = result
|
| 548 |
|
| 549 |
-
#
|
| 550 |
original_height, original_width = data.shape[1], data.shape[2]
|
| 551 |
|
| 552 |
-
# 3.
|
| 553 |
input_tensor = prepare_input(data)
|
| 554 |
-
print(f"
|
| 555 |
|
| 556 |
model = get_model(model_name)
|
| 557 |
lulc_output = run_inference(model, input_tensor)
|
| 558 |
|
| 559 |
-
# 4.
|
| 560 |
class_map_raw = decode_output(lulc_output)
|
| 561 |
|
| 562 |
-
# 5.
|
| 563 |
indices = calculate_spectral_indices(input_tensor)
|
| 564 |
|
| 565 |
-
# 6.
|
| 566 |
index_masks = generate_index_masks(indices)
|
| 567 |
|
| 568 |
-
# 7.
|
| 569 |
class_map_final, correction_layers = apply_hybrid_corrections(class_map_raw, indices)
|
| 570 |
|
| 571 |
# =========================================
|
| 572 |
-
#
|
| 573 |
# =========================================
|
| 574 |
|
| 575 |
-
print("
|
| 576 |
|
| 577 |
-
# 1. RGB (
|
| 578 |
rgb_image = create_rgb_image(input_tensor)
|
| 579 |
|
| 580 |
-
# 2.
|
| 581 |
raw_segmentation = create_segmentation_image(class_map_raw)
|
| 582 |
|
| 583 |
-
# 3.
|
| 584 |
final_segmentation = create_segmentation_image(class_map_final)
|
| 585 |
|
| 586 |
-
# 4.
|
| 587 |
mask_images = {}
|
| 588 |
mask_colors = {
|
| 589 |
-
'water_ndwi': [0, 150, 255], #
|
| 590 |
-
'water_mndwi': [0, 100, 200], #
|
| 591 |
-
'water_awei': [100, 200, 255], #
|
| 592 |
-
'vegetation_ndvi': [0, 150, 0], #
|
| 593 |
-
'vegetation_evi': [50, 200, 50], #
|
| 594 |
-
'buildings_ndbi': [255, 0, 0], #
|
| 595 |
-
'baresoil_bsi': [180, 140, 100], #
|
| 596 |
}
|
| 597 |
|
| 598 |
for mask_name, mask in index_masks.items():
|
| 599 |
color = mask_colors.get(mask_name, [128, 128, 128])
|
| 600 |
mask_images[mask_name] = create_mask_visualization(mask, color)
|
| 601 |
|
| 602 |
-
# 5.
|
| 603 |
statistics = calculate_class_percentages(class_map_final)
|
| 604 |
|
| 605 |
# =========================================
|
| 606 |
-
#
|
| 607 |
# =========================================
|
| 608 |
|
| 609 |
frontend_result = {
|
|
@@ -615,44 +672,44 @@ def analyze(location_data, buffer_km=DEFAULT_BUFFER_KM, max_cloud_cover=DEFAULT_
|
|
| 615 |
'scene_id': scene_id,
|
| 616 |
'statistics': statistics,
|
| 617 |
|
| 618 |
-
#
|
| 619 |
'rgb_base64': image_to_base64(rgb_image),
|
| 620 |
'raw_segmentation_base64': image_to_base64(raw_segmentation),
|
| 621 |
'segmentation_base64': image_to_base64(final_segmentation),
|
| 622 |
|
| 623 |
-
#
|
| 624 |
'masks': {
|
| 625 |
mask_name: image_to_base64(mask_img)
|
| 626 |
for mask_name, mask_img in mask_images.items()
|
| 627 |
},
|
| 628 |
|
| 629 |
-
#
|
| 630 |
'class_map': class_map_final.tolist(),
|
| 631 |
|
| 632 |
-
#
|
| 633 |
'image_width': original_width,
|
| 634 |
'image_height': original_height
|
| 635 |
}
|
| 636 |
|
| 637 |
-
print("
|
| 638 |
|
| 639 |
return frontend_result
|
| 640 |
|
| 641 |
if __name__ == "__main__":
|
| 642 |
print("\n" + "="*70)
|
| 643 |
-
print("
|
| 644 |
print("="*70)
|
| 645 |
|
| 646 |
result = analyze([50.0540, 19.9352], buffer_km=3, max_cloud_cover=10, days_back=60)
|
| 647 |
|
| 648 |
if result:
|
| 649 |
print("\n" + "="*70)
|
| 650 |
-
print("
|
| 651 |
print("="*70)
|
| 652 |
-
print(f"
|
| 653 |
-
print(f"
|
| 654 |
-
print(f"
|
| 655 |
-
print(f"
|
| 656 |
for mask_name in result['masks'].keys():
|
| 657 |
print(f" - {mask_name}")
|
| 658 |
print("="*70)
|
|
|
|
| 1 |
+
|
| 2 |
import os
|
| 3 |
import torch
|
| 4 |
import numpy as np
|
|
|
|
| 13 |
import gc
|
| 14 |
|
| 15 |
# =========================================
|
| 16 |
+
# CONFIGURATION DEFAULTS
|
| 17 |
# =========================================
|
| 18 |
|
| 19 |
DEFAULT_BUFFER_KM = 5
|
|
|
|
| 25 |
BRIGHTNESS_BOOST = 2.5
|
| 26 |
NORMALIZATION_MODE = "offset"
|
| 27 |
|
| 28 |
+
# Spectral index thresholds
|
| 29 |
NDWI_THRESHOLD = 0.1
|
| 30 |
MNDWI_THRESHOLD = 0.1
|
| 31 |
NDVI_THRESHOLD = 0.3
|
|
|
|
| 36 |
USE_VEGETATION_CORRECTION = True
|
| 37 |
USE_BUILDING_CORRECTION = True
|
| 38 |
USE_BARE_SOIL_CORRECTION = True
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
# =========================================
|
| 41 |
+
# ESA WORLDCOVER CLASSES
|
| 42 |
# =========================================
|
| 43 |
|
| 44 |
ESA_CLASSES = {
|
| 45 |
+
0: "No data",
|
| 46 |
+
10: "Trees / Forest",
|
| 47 |
+
20: "Shrubs",
|
| 48 |
+
30: "Grass / Meadows",
|
| 49 |
+
40: "Cultivated crops",
|
| 50 |
+
50: "Buildings",
|
| 51 |
+
60: "Bare ground",
|
| 52 |
+
70: "Snow and ice",
|
| 53 |
+
80: "Water",
|
| 54 |
+
90: "Wetlands",
|
| 55 |
+
95: "Mangroves",
|
| 56 |
+
100: "Lichens and moss"
|
| 57 |
}
|
| 58 |
|
| 59 |
ESA_COLORS = {
|
|
|
|
| 77 |
}
|
| 78 |
|
| 79 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 80 |
+
print(f"[DEVICE] Computing device: {device}")
|
| 81 |
|
| 82 |
# =========================================
|
| 83 |
+
# GLOBAL MODEL CACHE
|
| 84 |
# =========================================
|
| 85 |
|
| 86 |
_CURRENT_MODEL = None
|
| 87 |
_CURRENT_MODEL_NAME = None
|
| 88 |
|
| 89 |
def get_model(model_name):
|
| 90 |
+
"""
|
| 91 |
+
Loads and caches TerraMind model for inference.
|
| 92 |
+
Implements global cache to avoid loading same model multiple times.
|
| 93 |
+
Clears memory when switching models.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
model_name: str - model identifier from TerraTorch registry
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
model object ready for inference
|
| 100 |
+
|
| 101 |
+
Raises:
|
| 102 |
+
Exception if model loading fails
|
| 103 |
+
"""
|
| 104 |
global _CURRENT_MODEL, _CURRENT_MODEL_NAME
|
| 105 |
if _CURRENT_MODEL is not None and _CURRENT_MODEL_NAME == model_name:
|
| 106 |
return _CURRENT_MODEL
|
|
|
|
| 109 |
if torch.cuda.is_available():
|
| 110 |
torch.cuda.empty_cache()
|
| 111 |
gc.collect()
|
| 112 |
+
print("[CLEANUP] Memory cleared after previous model.")
|
| 113 |
|
| 114 |
from terratorch import FULL_MODEL_REGISTRY
|
| 115 |
+
print("[LOADING] Loading model (first time only)...")
|
| 116 |
|
| 117 |
try:
|
| 118 |
model = FULL_MODEL_REGISTRY.build(
|
| 119 |
+
model_name, # Use the name passed from parameter
|
| 120 |
modalities=["S2L2A"],
|
| 121 |
output_modalities=["LULC"],
|
| 122 |
pretrained=True,
|
|
|
|
| 125 |
|
| 126 |
model.eval()
|
| 127 |
|
| 128 |
+
# Update global cache
|
| 129 |
_CURRENT_MODEL = model
|
| 130 |
_CURRENT_MODEL_NAME = model_name
|
| 131 |
|
| 132 |
+
print(f"[SUCCESS] Model {model_name} ready for use.")
|
| 133 |
return _CURRENT_MODEL
|
| 134 |
|
| 135 |
except Exception as e:
|
| 136 |
+
print(f"[ERROR] Error loading model {model_name}: {e}")
|
| 137 |
+
# If it fails, try loading default or raise error
|
| 138 |
raise e
|
| 139 |
|
| 140 |
# =========================================
|
| 141 |
+
# HELPER FUNCTIONS
|
| 142 |
# =========================================
|
| 143 |
|
| 144 |
def get_coordinates_from_name(place_name):
|
| 145 |
+
"""
|
| 146 |
+
Geocodes place name to geographic coordinates using Nominatim.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
place_name: str - name of the location
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
tuple of (latitude, longitude) or None if not found
|
| 153 |
+
"""
|
| 154 |
try:
|
| 155 |
from geopy.geocoders import Nominatim
|
| 156 |
geolocator = Nominatim(user_agent="terramind_fast")
|
| 157 |
location = geolocator.geocode(place_name)
|
| 158 |
if location:
|
| 159 |
+
print(f"[LOCATION] {location.address}")
|
| 160 |
return location.latitude, location.longitude
|
| 161 |
return None
|
| 162 |
except:
|
|
|
|
| 164 |
|
| 165 |
|
| 166 |
def download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back):
|
| 167 |
+
"""
|
| 168 |
+
Downloads Sentinel-2 L2A satellite data for specified location.
|
| 169 |
+
Uses Planetary Computer STAC API to find and load cloud-optimized GeoTIFF data.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
lat: latitude coordinate
|
| 173 |
+
lon: longitude coordinate
|
| 174 |
+
buffer_km: search radius in kilometers
|
| 175 |
+
max_cloud_cover: maximum allowed cloud cover percentage
|
| 176 |
+
days_back: days to search back in time
|
| 177 |
+
|
| 178 |
+
Returns:
|
| 179 |
+
tuple of (stacked_array, date, scene_id) or None if no data found
|
| 180 |
+
"""
|
| 181 |
import pystac_client
|
| 182 |
import odc.stac
|
| 183 |
+
import planetary_computer
|
| 184 |
import math
|
| 185 |
import numpy as np
|
| 186 |
from datetime import datetime, timedelta
|
| 187 |
from pyproj import Transformer
|
| 188 |
|
| 189 |
+
print(f"[DOWNLOAD] Downloading data for: {lat:.4f}, {lon:.4f} (Radius: {buffer_km}km)")
|
| 190 |
|
| 191 |
+
# 1. Calculate Mercator scale factor for given latitude
|
| 192 |
+
# This corrects map distortion (in Poland 1 meter real ≈ 1.6 meters in EPSG:3857)
|
| 193 |
scale_factor = 1.0 / math.cos(math.radians(lat))
|
| 194 |
|
| 195 |
+
# 2. Prepare transformers
|
| 196 |
to_3857 = Transformer.from_crs("EPSG:4326", "EPSG:3857", always_xy=True)
|
| 197 |
to_4326 = Transformer.from_crs("EPSG:3857", "EPSG:4326", always_xy=True)
|
| 198 |
|
| 199 |
+
# 3. Calculate center in Mercator meters
|
| 200 |
center_x, center_y = to_3857.transform(lon, lat)
|
| 201 |
|
| 202 |
+
# 4. Calculate extent in Mercator meters accounting for scale
|
| 203 |
+
# half_side = (buffer_km * 1000) / 2 <-- If buffer_km is side length
|
| 204 |
+
# half_side = (buffer_km * 1000) <-- If buffer_km is radius (distance from center)
|
| 205 |
+
# We assume buffer_km as radius (consistent with "radius" logic):
|
| 206 |
half_side_mercator = (buffer_km * 1000) * scale_factor
|
| 207 |
|
| 208 |
min_x, min_y = center_x - half_side_mercator, center_y - half_side_mercator
|
| 209 |
max_x, max_y = center_x + half_side_mercator, center_y + half_side_mercator
|
| 210 |
|
| 211 |
+
# 5. Convert back to degrees for STAC (required by API)
|
| 212 |
west, south = to_4326.transform(min_x, min_y)
|
| 213 |
east, north = to_4326.transform(max_x, max_y)
|
| 214 |
bbox_geo = [west, south, east, north]
|
| 215 |
|
| 216 |
+
# --- Data Search ---
|
| 217 |
end_date = datetime.now()
|
| 218 |
start_date = end_date - timedelta(days=days_back)
|
| 219 |
date_range = f"{start_date.strftime('%Y-%m-%d')}/{end_date.strftime('%Y-%m-%d')}"
|
|
|
|
| 227 |
collections=["sentinel-2-l2a"],
|
| 228 |
bbox=bbox_geo,
|
| 229 |
datetime=date_range,
|
| 230 |
+
query={"eo:cloud_cover": {"lte": max_cloud_cover}}
|
| 231 |
)
|
| 232 |
|
| 233 |
items = list(search.items())
|
| 234 |
if not items:
|
| 235 |
+
print("[ERROR] No data matching criteria found")
|
| 236 |
return None
|
| 237 |
|
| 238 |
best_item = sorted(items, key=lambda x: x.properties.get('eo:cloud_cover', 100))[0]
|
| 239 |
|
| 240 |
+
# 6. Load data
|
| 241 |
+
# bbox_geo (degrees) defines the area, crs="EPSG:3857" enforces web map format
|
| 242 |
data = odc.stac.load(
|
| 243 |
[best_item],
|
| 244 |
bands=["B01", "B02", "B03", "B04", "B05", "B06",
|
|
|
|
| 248 |
resolution=10
|
| 249 |
)
|
| 250 |
|
| 251 |
+
# Map variables to numpy array
|
| 252 |
stacked = np.stack([data[b].values[0] for b in data.data_vars], axis=0)
|
| 253 |
|
| 254 |
+
print(f"[SUCCESS] Downloaded image size: {stacked.shape}")
|
| 255 |
return stacked, best_item.datetime.strftime('%Y-%m-%d'), best_item.id
|
| 256 |
|
| 257 |
+
# Input data format: (12, H, W) numpy array (12 Sentinel-2 bands)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
def prepare_input(data_12ch):
|
| 259 |
+
"""
|
| 260 |
+
Preprocesses raw satellite data tensor for model inference.
|
| 261 |
+
Normalizes, resizes, and converts to PyTorch tensor.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
data_12ch: numpy array of shape (12, H, W) with 12 Sentinel-2 bands
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
torch.Tensor of shape (1, 12, 224, 224) ready for model inference
|
| 268 |
+
"""
|
| 269 |
tensor = torch.from_numpy(data_12ch.astype(np.float32))
|
| 270 |
tensor = torch.nan_to_num(tensor, nan=0.0)
|
| 271 |
|
|
|
|
| 285 |
return transform(tensor).unsqueeze(0)
|
| 286 |
|
| 287 |
def run_inference(model, input_tensor):
|
| 288 |
+
"""
|
| 289 |
+
Executes AI model inference on preprocessed satellite data.
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
model: TerraMind neural network model
|
| 293 |
+
input_tensor: torch.Tensor of shape (1, 12, 224, 224)
|
| 294 |
+
|
| 295 |
+
Returns:
|
| 296 |
+
torch.Tensor containing LULC (Land Use Land Cover) predictions
|
| 297 |
+
"""
|
| 298 |
+
print(f"[RUNNING] Running AI model...")
|
| 299 |
with torch.no_grad():
|
| 300 |
output = model(
|
| 301 |
{"S2L2A": input_tensor.to(device)},
|
|
|
|
| 305 |
return output["LULC"].detach()
|
| 306 |
|
| 307 |
def decode_output(lulc_tensor):
|
| 308 |
+
"""
|
| 309 |
+
Converts model output tensor to class map with ESA WorldCover labels.
|
| 310 |
+
Maps class indices to standardized ESA class codes.
|
| 311 |
+
|
| 312 |
+
Args:
|
| 313 |
+
lulc_tensor: Model output tensor (4D or 3D depending on inference mode)
|
| 314 |
+
|
| 315 |
+
Returns:
|
| 316 |
+
numpy.ndarray with ESA class codes (0-100)
|
| 317 |
+
"""
|
| 318 |
if lulc_tensor.ndim == 4 and lulc_tensor.shape[1] > 1:
|
| 319 |
class_indices = lulc_tensor.argmax(dim=1)[0].cpu().numpy()
|
| 320 |
if class_indices.max() <= 11:
|
|
|
|
| 326 |
return class_map
|
| 327 |
|
| 328 |
def calculate_spectral_indices(input_tensor):
|
| 329 |
+
"""
|
| 330 |
+
Calculates spectral indices from Sentinel-2 multispectral bands.
|
| 331 |
+
Includes: NDWI, MNDWI, AWEI, NDVI, EVI, NDBI, BSI.
|
| 332 |
+
|
| 333 |
+
Args:
|
| 334 |
+
input_tensor: torch.Tensor containing 12 Sentinel-2 bands
|
| 335 |
+
|
| 336 |
+
Returns:
|
| 337 |
+
dict with spectral indices as numpy arrays
|
| 338 |
+
"""
|
| 339 |
blue = input_tensor[0, 1].cpu().numpy() / 10000.0
|
| 340 |
green = input_tensor[0, 2].cpu().numpy() / 10000.0
|
| 341 |
red = input_tensor[0, 3].cpu().numpy() / 10000.0
|
|
|
|
| 355 |
|
| 356 |
return indices
|
| 357 |
|
|
|
|
| 358 |
def generate_index_masks(indices):
|
| 359 |
"""
|
| 360 |
+
Generates binary masks for each spectral vegetation and water index.
|
| 361 |
+
Uses predefined thresholds to create masks for water, vegetation, buildings, bare soil.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
indices: dict with spectral indices (ndwi, mndwi, awei, ndvi, evi, ndbi, bsi)
|
| 365 |
+
|
| 366 |
+
Returns:
|
| 367 |
+
dict with 7 binary masks as numpy boolean arrays
|
| 368 |
"""
|
| 369 |
masks = {}
|
| 370 |
|
| 371 |
+
# Water mask (NDWI)
|
| 372 |
masks['water_ndwi'] = indices['ndwi'] > NDWI_THRESHOLD
|
| 373 |
|
| 374 |
+
# Water mask (MNDWI)
|
| 375 |
masks['water_mndwi'] = indices['mndwi'] > MNDWI_THRESHOLD
|
| 376 |
|
| 377 |
+
# Water mask (AWEI)
|
| 378 |
masks['water_awei'] = indices['awei'] > 0
|
| 379 |
|
| 380 |
+
# Vegetation mask (NDVI)
|
| 381 |
masks['vegetation_ndvi'] = indices['ndvi'] > NDVI_THRESHOLD
|
| 382 |
|
| 383 |
+
# Vegetation mask (EVI)
|
| 384 |
masks['vegetation_evi'] = indices['evi'] > 0.3
|
| 385 |
|
| 386 |
+
# Buildings mask (NDBI)
|
| 387 |
masks['buildings_ndbi'] = (indices['ndbi'] > NDBI_THRESHOLD) & (indices['ndvi'] < 0.2)
|
| 388 |
|
| 389 |
+
# Bare soil mask (BSI)
|
| 390 |
masks['baresoil_bsi'] = (indices['bsi'] > BSI_THRESHOLD) & (indices['ndvi'] < 0.1)
|
| 391 |
|
| 392 |
return masks
|
| 393 |
|
| 394 |
def apply_hybrid_corrections(class_map, indices):
|
| 395 |
+
"""
|
| 396 |
+
Applies hybrid corrections to raw model output using spectral indices.
|
| 397 |
+
Corrects water, vegetation, buildings, and bare soil classifications.
|
| 398 |
+
Creates correction layers showing where corrections were applied.
|
| 399 |
+
|
| 400 |
+
Args:
|
| 401 |
+
class_map: numpy array with ESA class codes from raw model
|
| 402 |
+
indices: dict with calculated spectral indices
|
| 403 |
+
|
| 404 |
+
Returns:
|
| 405 |
+
tuple of (corrected_class_map, correction_layers_dict)
|
| 406 |
+
"""
|
| 407 |
hybrid_map = class_map.copy()
|
| 408 |
correction_layers = {}
|
| 409 |
|
|
|
|
| 434 |
return hybrid_map, correction_layers
|
| 435 |
|
| 436 |
# =========================================
|
| 437 |
+
# NEW: MASK VISUALIZATION FUNCTIONS
|
| 438 |
# =========================================
|
| 439 |
|
| 440 |
def create_rgb_image(input_tensor, brightness=BRIGHTNESS_BOOST):
|
| 441 |
+
"""
|
| 442 |
+
Creates natural color RGB image from Sentinel-2 multispectral tensor.
|
| 443 |
+
Uses red, green, blue bands with brightness scaling and normalization.
|
| 444 |
+
|
| 445 |
+
Args:
|
| 446 |
+
input_tensor: torch.Tensor with 12 Sentinel-2 bands
|
| 447 |
+
brightness: float - brightness multiplier (default 2.5)
|
| 448 |
+
|
| 449 |
+
Returns:
|
| 450 |
+
numpy.ndarray (H, W, 3) uint8 RGB image
|
| 451 |
+
"""
|
| 452 |
red = input_tensor[0, 3].cpu().numpy()
|
| 453 |
green = input_tensor[0, 2].cpu().numpy()
|
| 454 |
blue = input_tensor[0, 1].cpu().numpy()
|
|
|
|
| 465 |
return rgb_uint8
|
| 466 |
|
| 467 |
def create_segmentation_image(class_map):
|
| 468 |
+
"""
|
| 469 |
+
Converts class map to colored segmentation visualization.
|
| 470 |
+
Maps ESA class codes to predefined RGB colors for display.
|
| 471 |
+
|
| 472 |
+
Args:
|
| 473 |
+
class_map: numpy array with ESA class codes (0-100)
|
| 474 |
+
|
| 475 |
+
Returns:
|
| 476 |
+
numpy.ndarray (H, W, 3) uint8 RGB image with class colors
|
| 477 |
+
"""
|
| 478 |
h, w = class_map.shape
|
| 479 |
rgb = np.zeros((h, w, 3), dtype=np.uint8)
|
| 480 |
|
|
|
|
| 484 |
|
| 485 |
return rgb
|
| 486 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 487 |
def create_mask_visualization(mask, color=[255, 0, 0]):
|
| 488 |
"""
|
| 489 |
+
Creates colored visualization of binary mask.
|
| 490 |
+
Renders mask pixels in specified color with gray background for contrast.
|
| 491 |
|
| 492 |
Args:
|
| 493 |
+
mask: numpy boolean array (True/False)
|
| 494 |
+
color: list [R, G, B] for True pixels (default red [255, 0, 0])
|
| 495 |
|
| 496 |
Returns:
|
| 497 |
+
numpy.ndarray (H, W, 3) uint8 RGB image with mask visualization
|
| 498 |
"""
|
| 499 |
h, w = mask.shape
|
| 500 |
+
rgb = np.ones((h, w, 3), dtype=np.uint8) * 255 # White background
|
| 501 |
|
| 502 |
+
rgb[mask] = color # Fill mask with color
|
| 503 |
+
rgb[~mask] = [240, 240, 240] # Gray background for better visibility
|
| 504 |
|
| 505 |
return rgb
|
| 506 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 507 |
def calculate_class_percentages(class_map):
|
| 508 |
+
"""
|
| 509 |
+
Calculates pixel count and percentage coverage for each ESA class.
|
| 510 |
+
Includes class names and statistics for result reporting.
|
| 511 |
+
|
| 512 |
+
Args:
|
| 513 |
+
class_map: numpy array with ESA class codes
|
| 514 |
+
|
| 515 |
+
Returns:
|
| 516 |
+
dict with class statistics {class_id: {name, count, percentage}}
|
| 517 |
+
"""
|
| 518 |
total_pixels = class_map.size
|
| 519 |
percentages = {}
|
| 520 |
|
|
|
|
| 530 |
return percentages
|
| 531 |
|
| 532 |
def image_to_base64(image_array):
|
| 533 |
+
"""
|
| 534 |
+
Encodes numpy image array to base64-encoded PNG string for transmission.
|
| 535 |
+
|
| 536 |
+
Args:
|
| 537 |
+
image_array: numpy.ndarray containing image data
|
| 538 |
+
|
| 539 |
+
Returns:
|
| 540 |
+
str - base64-encoded PNG image suitable for HTML/JSON transmission
|
| 541 |
+
"""
|
| 542 |
img = Image.fromarray(image_array)
|
| 543 |
buffer = BytesIO()
|
| 544 |
img.save(buffer, format='PNG')
|
| 545 |
return base64.b64encode(buffer.getvalue()).decode('utf-8')
|
| 546 |
|
| 547 |
# =========================================
|
| 548 |
+
# MAIN ANALYTICAL FUNCTION
|
| 549 |
# =========================================
|
| 550 |
|
| 551 |
def analyze(location_data, buffer_km=DEFAULT_BUFFER_KM, max_cloud_cover=DEFAULT_MAX_CLOUD_COVER,
|
| 552 |
days_back=DEFAULT_DAYS_BACK, show_visualization=False, save_files=False, model_name="terramind_v1_large_generate"):
|
| 553 |
"""
|
| 554 |
+
Main analysis pipeline for land cover classification and spectral analysis.
|
| 555 |
+
Downloads Sentinel-2 data, runs AI model, applies corrections, generates visualizations.
|
| 556 |
+
|
| 557 |
+
Args:
|
| 558 |
+
location_data: list/tuple [lat, lon] or str place name
|
| 559 |
+
buffer_km: int - search radius in kilometers (default 5)
|
| 560 |
+
max_cloud_cover: int - maximum cloud percentage (default 10)
|
| 561 |
+
days_back: int - days to search back (default 180)
|
| 562 |
+
show_visualization: bool - display plots (not used in API)
|
| 563 |
+
save_files: bool - save results to disk (not used in API)
|
| 564 |
+
model_name: str - TerraMind model name
|
| 565 |
|
| 566 |
Returns:
|
| 567 |
+
dict with success status, images (base64), masks, statistics, metadata
|
| 568 |
+
|
| 569 |
+
Example:
|
| 570 |
+
result = analyze([50.0540, 19.9352], buffer_km=5, max_cloud_cover=20)
|
| 571 |
"""
|
| 572 |
|
| 573 |
lat, lon = None, None
|
| 574 |
title = "Unknown"
|
| 575 |
|
| 576 |
+
# 1. Recognize data type
|
| 577 |
if isinstance(location_data, list) or isinstance(location_data, tuple):
|
| 578 |
lat, lon = location_data
|
| 579 |
title = f"{lat:.4f}N, {lon:.4f}E"
|
| 580 |
elif isinstance(location_data, str):
|
| 581 |
coords = get_coordinates_from_name(location_data)
|
| 582 |
if not coords:
|
| 583 |
+
print("[ERROR] Coordinates not found for given name.")
|
| 584 |
return None
|
| 585 |
lat, lon = coords
|
| 586 |
title = location_data
|
| 587 |
else:
|
| 588 |
+
print("[ERROR] Invalid location format")
|
| 589 |
return None
|
| 590 |
|
| 591 |
print(f"\n{'='*60}")
|
| 592 |
+
print(f"[START] ANALYSIS START: {title}")
|
| 593 |
print(f"{'='*60}")
|
| 594 |
+
print(f"[LOCATION] Coordinates: {lat:.6f}, {lon:.6f}")
|
| 595 |
+
print(f"[RADIUS] Radius: {buffer_km} km")
|
| 596 |
+
print(f"[CLOUDS] Max cloud cover: {max_cloud_cover}%")
|
| 597 |
+
print(f"[HISTORY] Days back: {days_back}")
|
| 598 |
print(f"{'='*60}\n")
|
| 599 |
|
| 600 |
+
# 2. Download data
|
| 601 |
result = download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back)
|
| 602 |
if result is None:
|
| 603 |
return None
|
| 604 |
data, date, scene_id = result
|
| 605 |
|
| 606 |
+
# Save original size before scaling
|
| 607 |
original_height, original_width = data.shape[1], data.shape[2]
|
| 608 |
|
| 609 |
+
# 3. Process and run AI
|
| 610 |
input_tensor = prepare_input(data)
|
| 611 |
+
print(f"[TENSOR] Input tensor size: {input_tensor.shape}")
|
| 612 |
|
| 613 |
model = get_model(model_name)
|
| 614 |
lulc_output = run_inference(model, input_tensor)
|
| 615 |
|
| 616 |
+
# 4. Decode (RAW model - without corrections)
|
| 617 |
class_map_raw = decode_output(lulc_output)
|
| 618 |
|
| 619 |
+
# 5. Calculate spectral indices
|
| 620 |
indices = calculate_spectral_indices(input_tensor)
|
| 621 |
|
| 622 |
+
# 6. Generate masks for indices
|
| 623 |
index_masks = generate_index_masks(indices)
|
| 624 |
|
| 625 |
+
# 7. Apply corrections (final map)
|
| 626 |
class_map_final, correction_layers = apply_hybrid_corrections(class_map_raw, indices)
|
| 627 |
|
| 628 |
# =========================================
|
| 629 |
+
# GENERATING ALL VISUALIZATIONS
|
| 630 |
# =========================================
|
| 631 |
|
| 632 |
+
print("[VISUALIZATION] Generating visualizations...")
|
| 633 |
|
| 634 |
+
# 1. RGB (satellite)
|
| 635 |
rgb_image = create_rgb_image(input_tensor)
|
| 636 |
|
| 637 |
+
# 2. Raw TerraMind (without corrections)
|
| 638 |
raw_segmentation = create_segmentation_image(class_map_raw)
|
| 639 |
|
| 640 |
+
# 3. Final segmentation (with corrections)
|
| 641 |
final_segmentation = create_segmentation_image(class_map_final)
|
| 642 |
|
| 643 |
+
# 4. Index masks
|
| 644 |
mask_images = {}
|
| 645 |
mask_colors = {
|
| 646 |
+
'water_ndwi': [0, 150, 255], # Blue
|
| 647 |
+
'water_mndwi': [0, 100, 200], # Dark blue
|
| 648 |
+
'water_awei': [100, 200, 255], # Light blue
|
| 649 |
+
'vegetation_ndvi': [0, 150, 0], # Green
|
| 650 |
+
'vegetation_evi': [50, 200, 50], # Light green
|
| 651 |
+
'buildings_ndbi': [255, 0, 0], # Red
|
| 652 |
+
'baresoil_bsi': [180, 140, 100], # Brown
|
| 653 |
}
|
| 654 |
|
| 655 |
for mask_name, mask in index_masks.items():
|
| 656 |
color = mask_colors.get(mask_name, [128, 128, 128])
|
| 657 |
mask_images[mask_name] = create_mask_visualization(mask, color)
|
| 658 |
|
| 659 |
+
# 5. Statistics
|
| 660 |
statistics = calculate_class_percentages(class_map_final)
|
| 661 |
|
| 662 |
# =========================================
|
| 663 |
+
# PREPARING RESULT
|
| 664 |
# =========================================
|
| 665 |
|
| 666 |
frontend_result = {
|
|
|
|
| 672 |
'scene_id': scene_id,
|
| 673 |
'statistics': statistics,
|
| 674 |
|
| 675 |
+
# MAIN IMAGES
|
| 676 |
'rgb_base64': image_to_base64(rgb_image),
|
| 677 |
'raw_segmentation_base64': image_to_base64(raw_segmentation),
|
| 678 |
'segmentation_base64': image_to_base64(final_segmentation),
|
| 679 |
|
| 680 |
+
# INDEX MASKS
|
| 681 |
'masks': {
|
| 682 |
mask_name: image_to_base64(mask_img)
|
| 683 |
for mask_name, mask_img in mask_images.items()
|
| 684 |
},
|
| 685 |
|
| 686 |
+
# For frontend compatibility
|
| 687 |
'class_map': class_map_final.tolist(),
|
| 688 |
|
| 689 |
+
# Original image dimensions
|
| 690 |
'image_width': original_width,
|
| 691 |
'image_height': original_height
|
| 692 |
}
|
| 693 |
|
| 694 |
+
print("[SUCCESS] Analysis completed successfully!")
|
| 695 |
|
| 696 |
return frontend_result
|
| 697 |
|
| 698 |
if __name__ == "__main__":
|
| 699 |
print("\n" + "="*70)
|
| 700 |
+
print("[TEST] TEST MODE - terramindFunctions.py")
|
| 701 |
print("="*70)
|
| 702 |
|
| 703 |
result = analyze([50.0540, 19.9352], buffer_km=3, max_cloud_cover=10, days_back=60)
|
| 704 |
|
| 705 |
if result:
|
| 706 |
print("\n" + "="*70)
|
| 707 |
+
print("[RESULT] GENERATED IMAGES:")
|
| 708 |
print("="*70)
|
| 709 |
+
print(f" [OK] RGB: {len(result['rgb_base64'])} chars")
|
| 710 |
+
print(f" [OK] Raw TerraMind: {len(result['raw_segmentation_base64'])} chars")
|
| 711 |
+
print(f" [OK] Final segmentation: {len(result['segmentation_base64'])} chars")
|
| 712 |
+
print(f" [OK] Index masks: {len(result['masks'])} pieces")
|
| 713 |
for mask_name in result['masks'].keys():
|
| 714 |
print(f" - {mask_name}")
|
| 715 |
print("="*70)
|
io-app-front/README.md
DELETED
|
@@ -1,73 +0,0 @@
|
|
| 1 |
-
# React + TypeScript + Vite
|
| 2 |
-
|
| 3 |
-
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
|
| 4 |
-
|
| 5 |
-
Currently, two official plugins are available:
|
| 6 |
-
|
| 7 |
-
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
|
| 8 |
-
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
|
| 9 |
-
|
| 10 |
-
## React Compiler
|
| 11 |
-
|
| 12 |
-
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
|
| 13 |
-
|
| 14 |
-
## Expanding the ESLint configuration
|
| 15 |
-
|
| 16 |
-
If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
|
| 17 |
-
|
| 18 |
-
```js
|
| 19 |
-
export default defineConfig([
|
| 20 |
-
globalIgnores(['dist']),
|
| 21 |
-
{
|
| 22 |
-
files: ['**/*.{ts,tsx}'],
|
| 23 |
-
extends: [
|
| 24 |
-
// Other configs...
|
| 25 |
-
|
| 26 |
-
// Remove tseslint.configs.recommended and replace with this
|
| 27 |
-
tseslint.configs.recommendedTypeChecked,
|
| 28 |
-
// Alternatively, use this for stricter rules
|
| 29 |
-
tseslint.configs.strictTypeChecked,
|
| 30 |
-
// Optionally, add this for stylistic rules
|
| 31 |
-
tseslint.configs.stylisticTypeChecked,
|
| 32 |
-
|
| 33 |
-
// Other configs...
|
| 34 |
-
],
|
| 35 |
-
languageOptions: {
|
| 36 |
-
parserOptions: {
|
| 37 |
-
project: ['./tsconfig.node.json', './tsconfig.app.json'],
|
| 38 |
-
tsconfigRootDir: import.meta.dirname,
|
| 39 |
-
},
|
| 40 |
-
// other options...
|
| 41 |
-
},
|
| 42 |
-
},
|
| 43 |
-
])
|
| 44 |
-
```
|
| 45 |
-
|
| 46 |
-
You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
|
| 47 |
-
|
| 48 |
-
```js
|
| 49 |
-
// eslint.config.js
|
| 50 |
-
import reactX from 'eslint-plugin-react-x'
|
| 51 |
-
import reactDom from 'eslint-plugin-react-dom'
|
| 52 |
-
|
| 53 |
-
export default defineConfig([
|
| 54 |
-
globalIgnores(['dist']),
|
| 55 |
-
{
|
| 56 |
-
files: ['**/*.{ts,tsx}'],
|
| 57 |
-
extends: [
|
| 58 |
-
// Other configs...
|
| 59 |
-
// Enable lint rules for React
|
| 60 |
-
reactX.configs['recommended-typescript'],
|
| 61 |
-
// Enable lint rules for React DOM
|
| 62 |
-
reactDom.configs.recommended,
|
| 63 |
-
],
|
| 64 |
-
languageOptions: {
|
| 65 |
-
parserOptions: {
|
| 66 |
-
project: ['./tsconfig.node.json', './tsconfig.app.json'],
|
| 67 |
-
tsconfigRootDir: import.meta.dirname,
|
| 68 |
-
},
|
| 69 |
-
// other options...
|
| 70 |
-
},
|
| 71 |
-
},
|
| 72 |
-
])
|
| 73 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
io-app-front/index.html
CHANGED
|
@@ -2,9 +2,11 @@
|
|
| 2 |
<html lang="en">
|
| 3 |
<head>
|
| 4 |
<meta charset="UTF-8" />
|
| 5 |
-
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
|
| 6 |
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 7 |
-
<
|
|
|
|
|
|
|
|
|
|
| 8 |
</head>
|
| 9 |
<body>
|
| 10 |
<div id="root"></div>
|
|
|
|
| 2 |
<html lang="en">
|
| 3 |
<head>
|
| 4 |
<meta charset="UTF-8" />
|
|
|
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 6 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 7 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 8 |
+
<link href="https://fonts.googleapis.com/css2?family=Google+Sans:ital,opsz,wght@0,17..18,400..700;1,17..18,400..700&display=swap" rel="stylesheet">
|
| 9 |
+
<title>TerraEye</title>
|
| 10 |
</head>
|
| 11 |
<body>
|
| 12 |
<div id="root"></div>
|
io-app-front/public/vite.svg
DELETED
io-app-front/src/App.jsx
CHANGED
|
@@ -1,26 +1,34 @@
|
|
| 1 |
import { useState } from "react"
|
| 2 |
import AnalysisPanel from './components/AnalysisPanel/AnalysisPanel'
|
| 3 |
import MapView from './components/MapView/MapView'
|
| 4 |
-
import { MantineProvider } from '@mantine/core'
|
| 5 |
import '@mantine/core/styles.css'
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
function App() {
|
| 8 |
const [selectedLocation, setSelectedLocation] = useState(null);
|
| 9 |
const [analysisResult, setAnalysisResult] = useState(null);
|
| 10 |
const [layersConfig, setLayersConfig] = useState({
|
| 11 |
-
firstLayer: 'rgb',
|
| 12 |
-
secondLayer: 'image',
|
| 13 |
});
|
|
|
|
| 14 |
const handleAnalysisComplete = (data) => {
|
| 15 |
setAnalysisResult(data);
|
| 16 |
};
|
|
|
|
| 17 |
const handleLocationChange = (locationData) => {
|
| 18 |
setSelectedLocation(locationData);
|
| 19 |
};
|
| 20 |
|
| 21 |
return (
|
| 22 |
-
<MantineProvider forceColorScheme='dark'>
|
| 23 |
-
|
| 24 |
<AnalysisPanel
|
| 25 |
selectedLocation={selectedLocation}
|
| 26 |
onLocationSelect={handleLocationChange}
|
|
@@ -29,16 +37,14 @@ function App() {
|
|
| 29 |
onLayersChange={setLayersConfig}
|
| 30 |
analysisResult={analysisResult}
|
| 31 |
/>
|
| 32 |
-
|
| 33 |
<MapView
|
| 34 |
selectedLocation={selectedLocation}
|
| 35 |
-
onLocationSelect={handleLocationChange}
|
| 36 |
analysisResult={analysisResult}
|
| 37 |
layersConfig={layersConfig}
|
| 38 |
/>
|
| 39 |
-
|
| 40 |
</MantineProvider>
|
| 41 |
-
)
|
| 42 |
}
|
| 43 |
|
| 44 |
export default App
|
|
|
|
| 1 |
import { useState } from "react"
|
| 2 |
import AnalysisPanel from './components/AnalysisPanel/AnalysisPanel'
|
| 3 |
import MapView from './components/MapView/MapView'
|
| 4 |
+
import { MantineProvider, createTheme } from '@mantine/core'
|
| 5 |
import '@mantine/core/styles.css'
|
| 6 |
|
| 7 |
+
const theme = createTheme({
|
| 8 |
+
fontFamily: 'Google Sans, sans-serif',
|
| 9 |
+
headings: {
|
| 10 |
+
fontFamily: 'Google Sans, sans-serif',
|
| 11 |
+
},
|
| 12 |
+
});
|
| 13 |
+
|
| 14 |
function App() {
|
| 15 |
const [selectedLocation, setSelectedLocation] = useState(null);
|
| 16 |
const [analysisResult, setAnalysisResult] = useState(null);
|
| 17 |
const [layersConfig, setLayersConfig] = useState({
|
| 18 |
+
firstLayer: 'rgb',
|
| 19 |
+
secondLayer: 'image',
|
| 20 |
});
|
| 21 |
+
|
| 22 |
const handleAnalysisComplete = (data) => {
|
| 23 |
setAnalysisResult(data);
|
| 24 |
};
|
| 25 |
+
|
| 26 |
const handleLocationChange = (locationData) => {
|
| 27 |
setSelectedLocation(locationData);
|
| 28 |
};
|
| 29 |
|
| 30 |
return (
|
| 31 |
+
<MantineProvider forceColorScheme='dark' theme={ theme }>
|
|
|
|
| 32 |
<AnalysisPanel
|
| 33 |
selectedLocation={selectedLocation}
|
| 34 |
onLocationSelect={handleLocationChange}
|
|
|
|
| 37 |
onLayersChange={setLayersConfig}
|
| 38 |
analysisResult={analysisResult}
|
| 39 |
/>
|
|
|
|
| 40 |
<MapView
|
| 41 |
selectedLocation={selectedLocation}
|
| 42 |
+
onLocationSelect={handleLocationChange}
|
| 43 |
analysisResult={analysisResult}
|
| 44 |
layersConfig={layersConfig}
|
| 45 |
/>
|
|
|
|
| 46 |
</MantineProvider>
|
| 47 |
+
)
|
| 48 |
}
|
| 49 |
|
| 50 |
export default App
|
io-app-front/src/assets/react.svg
DELETED
io-app-front/src/components/AdvancedAnalysisModal/AdvancedAnalysisModal.jsx
CHANGED
|
@@ -55,14 +55,13 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 55 |
>
|
| 56 |
<Stack gap="xl" h="100%">
|
| 57 |
|
| 58 |
-
{/* --- STAN: WYBÓR MODELI --- */}
|
| 59 |
{!results && (
|
| 60 |
<Center h={500}>
|
| 61 |
<Stack align="center" gap="xl" w="100%" maw={600}>
|
| 62 |
<Text size="lg" fw={500} c="dimmed" lts={1}>SELECT MODELS TO COMPARE</Text>
|
| 63 |
<Group grow w="100%">
|
| 64 |
-
<Select label="Model A" data={modelOptions} value={modelA} onChange={setModelA} disabled={isLoading} />
|
| 65 |
-
<Select label="Model B" data={modelOptions} value={modelB} onChange={setModelB} disabled={isLoading} />
|
| 66 |
</Group>
|
| 67 |
<Button fullWidth size="lg" color="blue" onClick={() => onRunCompare(modelA, modelB)} loading={isLoading}>
|
| 68 |
START ANALYSIS
|
|
@@ -71,30 +70,25 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 71 |
</Center>
|
| 72 |
)}
|
| 73 |
|
| 74 |
-
{/* --- STAN: WYNIKI (2 KOLUMNY) --- */}
|
| 75 |
{results && !isLoading && (
|
| 76 |
<div className={classes.dualLayout}>
|
| 77 |
|
| 78 |
-
{/* LEWA KOLUMNA */}
|
| 79 |
<div className={classes.modelColumn}>
|
| 80 |
<div className={classes.columnHeader}>{getModelLabel(modelA)}</div>
|
| 81 |
|
| 82 |
<div className={classes.imagesGrid}>
|
| 83 |
-
{/* 1. RGB */}
|
| 84 |
<ImageCard
|
| 85 |
src={results.modelA.rgb}
|
| 86 |
title="Satellite RGB"
|
| 87 |
subtitle=""
|
| 88 |
borderColor="#1971c2"
|
| 89 |
/>
|
| 90 |
-
{/* 2. Raw */}
|
| 91 |
<ImageCard
|
| 92 |
src={results.modelA.raw_segmentation}
|
| 93 |
title="Raw Output"
|
| 94 |
subtitle=""
|
| 95 |
borderColor="#f08c00"
|
| 96 |
/>
|
| 97 |
-
{/* 3. Final */}
|
| 98 |
<ImageCard
|
| 99 |
src={results.modelA.image}
|
| 100 |
title="Output(with Spectral Indices)"
|
|
@@ -104,26 +98,22 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 104 |
</div>
|
| 105 |
</div>
|
| 106 |
|
| 107 |
-
{/* PRAWA KOLUMNA */}
|
| 108 |
<div className={classes.modelColumn}>
|
| 109 |
<div className={classes.columnHeader}>{getModelLabel(modelB)}</div>
|
| 110 |
|
| 111 |
<div className={classes.imagesGrid}>
|
| 112 |
-
{/* 1. RGB */}
|
| 113 |
<ImageCard
|
| 114 |
src={results.modelB.rgb}
|
| 115 |
title="Satellite RGB"
|
| 116 |
subtitle=""
|
| 117 |
borderColor="#1971c2"
|
| 118 |
/>
|
| 119 |
-
{/* 2. Raw */}
|
| 120 |
<ImageCard
|
| 121 |
src={results.modelB.raw_segmentation}
|
| 122 |
title="Raw Output"
|
| 123 |
subtitle=""
|
| 124 |
borderColor="#f08c00"
|
| 125 |
/>
|
| 126 |
-
{/* 3. Final */}
|
| 127 |
<ImageCard
|
| 128 |
src={results.modelB.image}
|
| 129 |
title="Output(with Spectral Indices)"
|
|
@@ -133,7 +123,6 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 133 |
</div>
|
| 134 |
</div>
|
| 135 |
|
| 136 |
-
{/* MASKI (NA DOLE) */}
|
| 137 |
<div className={classes.masksSection}>
|
| 138 |
<div className={classes.masksHeader}>Spectral Indices Masks</div>
|
| 139 |
<div className={classes.masksGrid}>
|
|
@@ -155,16 +144,13 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 155 |
</div>
|
| 156 |
</div>
|
| 157 |
|
| 158 |
-
{/* METRYKI */}
|
| 159 |
{results.metrics && (
|
| 160 |
<div className={classes.metricsSection}>
|
| 161 |
|
| 162 |
-
{/* METRYKI DLA RAW SEGMENTATION */}
|
| 163 |
{results.metrics.raw && (
|
| 164 |
<div className={classes.metricsSubsection}>
|
| 165 |
<div className={classes.metricsHeader}>Metrics - Raw Segmentation (without Spectral Indices)</div>
|
| 166 |
|
| 167 |
-
{/* Główne metryki */}
|
| 168 |
<div className={classes.mainMetricsGrid}>
|
| 169 |
<MetricCard
|
| 170 |
label="Pixel Accuracy"
|
|
@@ -204,7 +190,6 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 204 |
/>
|
| 205 |
</div>
|
| 206 |
|
| 207 |
-
{/* Metryki dla każdej klasy - RAW */}
|
| 208 |
{results.metrics.raw.class_details && Object.keys(results.metrics.raw.class_details).length > 0 && (
|
| 209 |
<div className={classes.classDetailsSection}>
|
| 210 |
<div className={classes.classDetailsHeader}>Per-Class Metrics</div>
|
|
@@ -234,12 +219,10 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 234 |
</div>
|
| 235 |
)}
|
| 236 |
|
| 237 |
-
{/* METRYKI DLA CORRECTED SEGMENTATION */}
|
| 238 |
{results.metrics.corrected && (
|
| 239 |
<div className={classes.metricsSubsection}>
|
| 240 |
<div className={classes.metricsHeader}>Metrics - Corrected Segmentation (with Spectral Indices)</div>
|
| 241 |
|
| 242 |
-
{/* Główne metryki */}
|
| 243 |
<div className={classes.mainMetricsGrid}>
|
| 244 |
<MetricCard
|
| 245 |
label="Pixel Accuracy"
|
|
@@ -279,7 +262,6 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 279 |
/>
|
| 280 |
</div>
|
| 281 |
|
| 282 |
-
{/* Metryki dla każdej klasy - CORRECTED */}
|
| 283 |
{results.metrics.corrected.class_details && Object.keys(results.metrics.corrected.class_details).length > 0 && (
|
| 284 |
<div className={classes.classDetailsSection}>
|
| 285 |
<div className={classes.classDetailsHeader}>Per-Class Metrics</div>
|
|
@@ -319,7 +301,6 @@ export default function AdvancedAnalysisModal({ opened, onClose, onRunCompare, i
|
|
| 319 |
);
|
| 320 |
}
|
| 321 |
|
| 322 |
-
// KOMPONENT KARTY (Czysty, z Twoimi stylami ramki)
|
| 323 |
function ImageCard({ src, title, subtitle, borderColor }) {
|
| 324 |
return (
|
| 325 |
<div className={classes.imageCard}>
|
|
@@ -332,7 +313,6 @@ function ImageCard({ src, title, subtitle, borderColor }) {
|
|
| 332 |
);
|
| 333 |
}
|
| 334 |
|
| 335 |
-
// KOMPONENT KARTY METRYK
|
| 336 |
function MetricCard({ label, value, unit, color, highlight = false }) {
|
| 337 |
return (
|
| 338 |
<div className={classes.metricCard} style={{
|
|
|
|
| 55 |
>
|
| 56 |
<Stack gap="xl" h="100%">
|
| 57 |
|
|
|
|
| 58 |
{!results && (
|
| 59 |
<Center h={500}>
|
| 60 |
<Stack align="center" gap="xl" w="100%" maw={600}>
|
| 61 |
<Text size="lg" fw={500} c="dimmed" lts={1}>SELECT MODELS TO COMPARE</Text>
|
| 62 |
<Group grow w="100%">
|
| 63 |
+
<Select label="Model A" data={modelOptions} allowDeselect={false} value={modelA} onChange={setModelA} disabled={isLoading} />
|
| 64 |
+
<Select label="Model B" data={modelOptions} allowDeselect={false} value={modelB} onChange={setModelB} disabled={isLoading} />
|
| 65 |
</Group>
|
| 66 |
<Button fullWidth size="lg" color="blue" onClick={() => onRunCompare(modelA, modelB)} loading={isLoading}>
|
| 67 |
START ANALYSIS
|
|
|
|
| 70 |
</Center>
|
| 71 |
)}
|
| 72 |
|
|
|
|
| 73 |
{results && !isLoading && (
|
| 74 |
<div className={classes.dualLayout}>
|
| 75 |
|
|
|
|
| 76 |
<div className={classes.modelColumn}>
|
| 77 |
<div className={classes.columnHeader}>{getModelLabel(modelA)}</div>
|
| 78 |
|
| 79 |
<div className={classes.imagesGrid}>
|
|
|
|
| 80 |
<ImageCard
|
| 81 |
src={results.modelA.rgb}
|
| 82 |
title="Satellite RGB"
|
| 83 |
subtitle=""
|
| 84 |
borderColor="#1971c2"
|
| 85 |
/>
|
|
|
|
| 86 |
<ImageCard
|
| 87 |
src={results.modelA.raw_segmentation}
|
| 88 |
title="Raw Output"
|
| 89 |
subtitle=""
|
| 90 |
borderColor="#f08c00"
|
| 91 |
/>
|
|
|
|
| 92 |
<ImageCard
|
| 93 |
src={results.modelA.image}
|
| 94 |
title="Output(with Spectral Indices)"
|
|
|
|
| 98 |
</div>
|
| 99 |
</div>
|
| 100 |
|
|
|
|
| 101 |
<div className={classes.modelColumn}>
|
| 102 |
<div className={classes.columnHeader}>{getModelLabel(modelB)}</div>
|
| 103 |
|
| 104 |
<div className={classes.imagesGrid}>
|
|
|
|
| 105 |
<ImageCard
|
| 106 |
src={results.modelB.rgb}
|
| 107 |
title="Satellite RGB"
|
| 108 |
subtitle=""
|
| 109 |
borderColor="#1971c2"
|
| 110 |
/>
|
|
|
|
| 111 |
<ImageCard
|
| 112 |
src={results.modelB.raw_segmentation}
|
| 113 |
title="Raw Output"
|
| 114 |
subtitle=""
|
| 115 |
borderColor="#f08c00"
|
| 116 |
/>
|
|
|
|
| 117 |
<ImageCard
|
| 118 |
src={results.modelB.image}
|
| 119 |
title="Output(with Spectral Indices)"
|
|
|
|
| 123 |
</div>
|
| 124 |
</div>
|
| 125 |
|
|
|
|
| 126 |
<div className={classes.masksSection}>
|
| 127 |
<div className={classes.masksHeader}>Spectral Indices Masks</div>
|
| 128 |
<div className={classes.masksGrid}>
|
|
|
|
| 144 |
</div>
|
| 145 |
</div>
|
| 146 |
|
|
|
|
| 147 |
{results.metrics && (
|
| 148 |
<div className={classes.metricsSection}>
|
| 149 |
|
|
|
|
| 150 |
{results.metrics.raw && (
|
| 151 |
<div className={classes.metricsSubsection}>
|
| 152 |
<div className={classes.metricsHeader}>Metrics - Raw Segmentation (without Spectral Indices)</div>
|
| 153 |
|
|
|
|
| 154 |
<div className={classes.mainMetricsGrid}>
|
| 155 |
<MetricCard
|
| 156 |
label="Pixel Accuracy"
|
|
|
|
| 190 |
/>
|
| 191 |
</div>
|
| 192 |
|
|
|
|
| 193 |
{results.metrics.raw.class_details && Object.keys(results.metrics.raw.class_details).length > 0 && (
|
| 194 |
<div className={classes.classDetailsSection}>
|
| 195 |
<div className={classes.classDetailsHeader}>Per-Class Metrics</div>
|
|
|
|
| 219 |
</div>
|
| 220 |
)}
|
| 221 |
|
|
|
|
| 222 |
{results.metrics.corrected && (
|
| 223 |
<div className={classes.metricsSubsection}>
|
| 224 |
<div className={classes.metricsHeader}>Metrics - Corrected Segmentation (with Spectral Indices)</div>
|
| 225 |
|
|
|
|
| 226 |
<div className={classes.mainMetricsGrid}>
|
| 227 |
<MetricCard
|
| 228 |
label="Pixel Accuracy"
|
|
|
|
| 262 |
/>
|
| 263 |
</div>
|
| 264 |
|
|
|
|
| 265 |
{results.metrics.corrected.class_details && Object.keys(results.metrics.corrected.class_details).length > 0 && (
|
| 266 |
<div className={classes.classDetailsSection}>
|
| 267 |
<div className={classes.classDetailsHeader}>Per-Class Metrics</div>
|
|
|
|
| 301 |
);
|
| 302 |
}
|
| 303 |
|
|
|
|
| 304 |
function ImageCard({ src, title, subtitle, borderColor }) {
|
| 305 |
return (
|
| 306 |
<div className={classes.imageCard}>
|
|
|
|
| 313 |
);
|
| 314 |
}
|
| 315 |
|
|
|
|
| 316 |
function MetricCard({ label, value, unit, color, highlight = false }) {
|
| 317 |
return (
|
| 318 |
<div className={classes.metricCard} style={{
|
io-app-front/src/components/AdvancedAnalysisModal/AdvancedAnalysisModal.module.css
CHANGED
|
@@ -1,6 +1,4 @@
|
|
| 1 |
-
/* --- GŁÓWNY MODAL --- */
|
| 2 |
.modalContent {
|
| 3 |
-
/* Ciemne, szklane tło */
|
| 4 |
background-color: rgba(15, 15, 18, 0.95) !important;
|
| 5 |
backdrop-filter: blur(40px) saturate(180%) !important;
|
| 6 |
border: 1px solid rgba(255, 255, 255, 0.08) !important;
|
|
@@ -21,17 +19,15 @@
|
|
| 21 |
.modalBody { background: transparent !important; padding: 0 32px 32px 32px !important; }
|
| 22 |
.modalTitle { font-size: 1.25rem; font-weight: 700; letter-spacing: 0.5px; color: white; }
|
| 23 |
|
| 24 |
-
/* --- UKŁAD DWUKOLUMNOWY (GRID) --- */
|
| 25 |
.dualLayout {
|
| 26 |
display: grid;
|
| 27 |
-
grid-template-columns: 1fr 1fr;
|
| 28 |
-
gap: 32px;
|
| 29 |
align-items: start;
|
| 30 |
}
|
| 31 |
|
| 32 |
-
/* Pojedyncza sekcja modelu (lewa lub prawa) */
|
| 33 |
.modelColumn {
|
| 34 |
-
background-color: rgba(255, 255, 255, 0.03);
|
| 35 |
border: 1px solid rgba(255, 255, 255, 0.06);
|
| 36 |
border-radius: 12px;
|
| 37 |
padding: 24px;
|
|
@@ -49,15 +45,12 @@
|
|
| 49 |
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
| 50 |
}
|
| 51 |
|
| 52 |
-
/* --- GRID OBRAZÓW (Dokładnie jak w Twoim przykładzie) --- */
|
| 53 |
.imagesGrid {
|
| 54 |
display: grid;
|
| 55 |
-
/* To wymusza minimalną szerokość 280px. */
|
| 56 |
grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
|
| 57 |
gap: 20px;
|
| 58 |
}
|
| 59 |
|
| 60 |
-
/* --- KARTA OBRAZU --- */
|
| 61 |
.imageCard {
|
| 62 |
text-align: center;
|
| 63 |
background: rgba(0, 0, 0, 0.2);
|
|
@@ -78,13 +71,12 @@
|
|
| 78 |
margin-bottom: 10px;
|
| 79 |
}
|
| 80 |
|
| 81 |
-
/* Ramka obrazka */
|
| 82 |
.cardFrame {
|
| 83 |
width: 100%;
|
| 84 |
background-color: #000;
|
| 85 |
border-radius: 8px;
|
| 86 |
border-width: 2px;
|
| 87 |
-
border-style: solid;
|
| 88 |
overflow: hidden;
|
| 89 |
box-shadow: 0 4px 20px rgba(0,0,0,0.4);
|
| 90 |
}
|
|
@@ -102,9 +94,8 @@
|
|
| 102 |
color: rgba(255, 255, 255, 0.5);
|
| 103 |
}
|
| 104 |
|
| 105 |
-
/* --- SEKCJA MASEK (Dół) --- */
|
| 106 |
.masksSection {
|
| 107 |
-
grid-column: 1 / -1;
|
| 108 |
background-color: rgba(255, 255, 255, 0.02);
|
| 109 |
border: 1px solid rgba(255, 255, 255, 0.06);
|
| 110 |
border-radius: 12px;
|
|
@@ -121,7 +112,6 @@
|
|
| 121 |
text-transform: uppercase;
|
| 122 |
}
|
| 123 |
|
| 124 |
-
/* Grid masek (Twoje 250px) */
|
| 125 |
.masksGrid {
|
| 126 |
display: grid;
|
| 127 |
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
|
@@ -129,7 +119,7 @@
|
|
| 129 |
}
|
| 130 |
|
| 131 |
.maskCard {
|
| 132 |
-
border: 1px solid;
|
| 133 |
border-radius: 8px;
|
| 134 |
overflow: hidden;
|
| 135 |
background: rgba(0, 0, 0, 0.3);
|
|
@@ -141,7 +131,7 @@
|
|
| 141 |
padding: 8px 12px;
|
| 142 |
font-size: 0.8rem;
|
| 143 |
font-weight: 700;
|
| 144 |
-
color: white;
|
| 145 |
}
|
| 146 |
|
| 147 |
.maskImg {
|
|
@@ -171,9 +161,8 @@
|
|
| 171 |
display: inline-block;
|
| 172 |
}
|
| 173 |
|
| 174 |
-
/* --- SEKCJA METRYK --- */
|
| 175 |
.metricsSection {
|
| 176 |
-
grid-column: 1 / -1;
|
| 177 |
background-color: rgba(255, 255, 255, 0.02);
|
| 178 |
border: 1px solid rgba(255, 255, 255, 0.06);
|
| 179 |
border-radius: 12px;
|
|
@@ -181,7 +170,6 @@
|
|
| 181 |
margin-top: 20px;
|
| 182 |
}
|
| 183 |
|
| 184 |
-
/* Podsekcja metryk (raw/corrected) */
|
| 185 |
.metricsSubsection {
|
| 186 |
margin-bottom: 32px;
|
| 187 |
}
|
|
@@ -201,7 +189,6 @@
|
|
| 201 |
letter-spacing: 0.5px;
|
| 202 |
}
|
| 203 |
|
| 204 |
-
/* Grid głównych metryk */
|
| 205 |
.mainMetricsGrid {
|
| 206 |
display: grid;
|
| 207 |
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
|
|
@@ -249,7 +236,6 @@
|
|
| 249 |
font-weight: 500;
|
| 250 |
}
|
| 251 |
|
| 252 |
-
/* --- SEKCJA METRYK PER-CLASS --- */
|
| 253 |
.classDetailsSection {
|
| 254 |
margin-top: 24px;
|
| 255 |
border-top: 1px solid rgba(255, 255, 255, 0.06);
|
|
|
|
|
|
|
| 1 |
.modalContent {
|
|
|
|
| 2 |
background-color: rgba(15, 15, 18, 0.95) !important;
|
| 3 |
backdrop-filter: blur(40px) saturate(180%) !important;
|
| 4 |
border: 1px solid rgba(255, 255, 255, 0.08) !important;
|
|
|
|
| 19 |
.modalBody { background: transparent !important; padding: 0 32px 32px 32px !important; }
|
| 20 |
.modalTitle { font-size: 1.25rem; font-weight: 700; letter-spacing: 0.5px; color: white; }
|
| 21 |
|
|
|
|
| 22 |
.dualLayout {
|
| 23 |
display: grid;
|
| 24 |
+
grid-template-columns: 1fr 1fr;
|
| 25 |
+
gap: 32px;
|
| 26 |
align-items: start;
|
| 27 |
}
|
| 28 |
|
|
|
|
| 29 |
.modelColumn {
|
| 30 |
+
background-color: rgba(255, 255, 255, 0.03);
|
| 31 |
border: 1px solid rgba(255, 255, 255, 0.06);
|
| 32 |
border-radius: 12px;
|
| 33 |
padding: 24px;
|
|
|
|
| 45 |
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
| 46 |
}
|
| 47 |
|
|
|
|
| 48 |
.imagesGrid {
|
| 49 |
display: grid;
|
|
|
|
| 50 |
grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
|
| 51 |
gap: 20px;
|
| 52 |
}
|
| 53 |
|
|
|
|
| 54 |
.imageCard {
|
| 55 |
text-align: center;
|
| 56 |
background: rgba(0, 0, 0, 0.2);
|
|
|
|
| 71 |
margin-bottom: 10px;
|
| 72 |
}
|
| 73 |
|
|
|
|
| 74 |
.cardFrame {
|
| 75 |
width: 100%;
|
| 76 |
background-color: #000;
|
| 77 |
border-radius: 8px;
|
| 78 |
border-width: 2px;
|
| 79 |
+
border-style: solid;
|
| 80 |
overflow: hidden;
|
| 81 |
box-shadow: 0 4px 20px rgba(0,0,0,0.4);
|
| 82 |
}
|
|
|
|
| 94 |
color: rgba(255, 255, 255, 0.5);
|
| 95 |
}
|
| 96 |
|
|
|
|
| 97 |
.masksSection {
|
| 98 |
+
grid-column: 1 / -1;
|
| 99 |
background-color: rgba(255, 255, 255, 0.02);
|
| 100 |
border: 1px solid rgba(255, 255, 255, 0.06);
|
| 101 |
border-radius: 12px;
|
|
|
|
| 112 |
text-transform: uppercase;
|
| 113 |
}
|
| 114 |
|
|
|
|
| 115 |
.masksGrid {
|
| 116 |
display: grid;
|
| 117 |
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
|
|
|
| 119 |
}
|
| 120 |
|
| 121 |
.maskCard {
|
| 122 |
+
border: 1px solid;
|
| 123 |
border-radius: 8px;
|
| 124 |
overflow: hidden;
|
| 125 |
background: rgba(0, 0, 0, 0.3);
|
|
|
|
| 131 |
padding: 8px 12px;
|
| 132 |
font-size: 0.8rem;
|
| 133 |
font-weight: 700;
|
| 134 |
+
color: white;
|
| 135 |
}
|
| 136 |
|
| 137 |
.maskImg {
|
|
|
|
| 161 |
display: inline-block;
|
| 162 |
}
|
| 163 |
|
|
|
|
| 164 |
.metricsSection {
|
| 165 |
+
grid-column: 1 / -1;
|
| 166 |
background-color: rgba(255, 255, 255, 0.02);
|
| 167 |
border: 1px solid rgba(255, 255, 255, 0.06);
|
| 168 |
border-radius: 12px;
|
|
|
|
| 170 |
margin-top: 20px;
|
| 171 |
}
|
| 172 |
|
|
|
|
| 173 |
.metricsSubsection {
|
| 174 |
margin-bottom: 32px;
|
| 175 |
}
|
|
|
|
| 189 |
letter-spacing: 0.5px;
|
| 190 |
}
|
| 191 |
|
|
|
|
| 192 |
.mainMetricsGrid {
|
| 193 |
display: grid;
|
| 194 |
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
|
|
|
|
| 236 |
font-weight: 500;
|
| 237 |
}
|
| 238 |
|
|
|
|
| 239 |
.classDetailsSection {
|
| 240 |
margin-top: 24px;
|
| 241 |
border-top: 1px solid rgba(255, 255, 255, 0.06);
|
io-app-front/src/components/AnalysisOptions/AnalysisOptions.jsx
CHANGED
|
@@ -55,11 +55,12 @@ export default function AnalysisOptions({ values, onChange }) {
|
|
| 55 |
|
| 56 |
<Select
|
| 57 |
label="Model Version"
|
|
|
|
| 58 |
data={[
|
| 59 |
-
{ value: 'terramind_v1_large_generate', label: 'Terramind v1 Large' },
|
| 60 |
-
{ value: 'terramind_v1_base_generate', label: 'Terramind v1 Base' },
|
| 61 |
-
{ value: 'terramind_v1_small_generate', label: 'Terramind v1 Small' },
|
| 62 |
{ value: 'terramind_v1_tiny_generate', label: 'Terramind v1 Tiny' },
|
|
|
|
|
|
|
|
|
|
| 63 |
]}
|
| 64 |
value={values.model}
|
| 65 |
onChange={(val) => handleChange('model', val)}
|
|
|
|
| 55 |
|
| 56 |
<Select
|
| 57 |
label="Model Version"
|
| 58 |
+
allowDeselect={false}
|
| 59 |
data={[
|
|
|
|
|
|
|
|
|
|
| 60 |
{ value: 'terramind_v1_tiny_generate', label: 'Terramind v1 Tiny' },
|
| 61 |
+
{ value: 'terramind_v1_small_generate', label: 'Terramind v1 Small' },
|
| 62 |
+
{ value: 'terramind_v1_base_generate', label: 'Terramind v1 Base' },
|
| 63 |
+
{ value: 'terramind_v1_large_generate', label: 'Terramind v1 Large' },
|
| 64 |
]}
|
| 65 |
value={values.model}
|
| 66 |
onChange={(val) => handleChange('model', val)}
|
io-app-front/src/components/AnalysisOptions/AnalysisOptions.module.css
CHANGED
|
@@ -8,7 +8,7 @@
|
|
| 8 |
.inputLabel {
|
| 9 |
font-size: 13px !important;
|
| 10 |
font-weight: 600 !important;
|
| 11 |
-
color: #868e96 !important;
|
| 12 |
margin-bottom: 5px;
|
| 13 |
}
|
| 14 |
|
|
|
|
| 8 |
.inputLabel {
|
| 9 |
font-size: 13px !important;
|
| 10 |
font-weight: 600 !important;
|
| 11 |
+
color: #868e96 !important;
|
| 12 |
margin-bottom: 5px;
|
| 13 |
}
|
| 14 |
|
io-app-front/src/components/AnalysisPanel/AnalysisPanel.jsx
CHANGED
|
@@ -37,7 +37,7 @@ function AnalysisPanel({
|
|
| 37 |
return (
|
| 38 |
<div className={styles.sidebar}>
|
| 39 |
<div className={styles.scrollArea}>
|
| 40 |
-
<Title order={3} c="white" mb="md">
|
| 41 |
|
| 42 |
<SearchBar onLocationSelect={onLocationSelect} />
|
| 43 |
|
|
|
|
| 37 |
return (
|
| 38 |
<div className={styles.sidebar}>
|
| 39 |
<div className={styles.scrollArea}>
|
| 40 |
+
<Title order={3} c="white" mb="md">TerraEye</Title>
|
| 41 |
|
| 42 |
<SearchBar onLocationSelect={onLocationSelect} />
|
| 43 |
|
io-app-front/src/components/LayerOpacitySlider/LayerOpacitySlider.module.css
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
.floatingContainer {
|
| 2 |
position: absolute;
|
| 3 |
bottom: 40px;
|
| 4 |
-
left:
|
| 5 |
transform: translateX(-50%);
|
| 6 |
width: 600px;
|
| 7 |
z-index: 20;
|
|
@@ -20,3 +20,4 @@
|
|
| 20 |
.sliderWidth {
|
| 21 |
width: 100%;
|
| 22 |
}
|
|
|
|
|
|
| 1 |
.floatingContainer {
|
| 2 |
position: absolute;
|
| 3 |
bottom: 40px;
|
| 4 |
+
left: calc(31% + 500px);
|
| 5 |
transform: translateX(-50%);
|
| 6 |
width: 600px;
|
| 7 |
z-index: 20;
|
|
|
|
| 20 |
.sliderWidth {
|
| 21 |
width: 100%;
|
| 22 |
}
|
| 23 |
+
|
io-app-front/src/components/SearchBar/SearchBar.module.css
CHANGED
|
@@ -2,6 +2,7 @@
|
|
| 2 |
background-color: #25262b;
|
| 3 |
color: #fff;
|
| 4 |
border-color: #373a40;
|
|
|
|
| 5 |
}
|
| 6 |
|
| 7 |
.searchInput:focus {
|
|
|
|
| 2 |
background-color: #25262b;
|
| 3 |
color: #fff;
|
| 4 |
border-color: #373a40;
|
| 5 |
+
border-radius: 20px;
|
| 6 |
}
|
| 7 |
|
| 8 |
.searchInput:focus {
|
io-app-front/src/hooks/useMap.js
CHANGED
|
@@ -36,6 +36,14 @@ export const useMap = (mapContainerRef, mapboxAccessToken, selectedLocation, ana
|
|
| 36 |
'bottom-right'
|
| 37 |
);
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
return () => {
|
| 40 |
mapRef.current?.remove();
|
| 41 |
markerRef.current?.remove();
|
|
|
|
| 36 |
'bottom-right'
|
| 37 |
);
|
| 38 |
|
| 39 |
+
mapRef.current.addControl(
|
| 40 |
+
new mapboxgl.NavigationControl({
|
| 41 |
+
showZoom: true,
|
| 42 |
+
showCompass: false
|
| 43 |
+
}),
|
| 44 |
+
'bottom-right'
|
| 45 |
+
);
|
| 46 |
+
|
| 47 |
return () => {
|
| 48 |
mapRef.current?.remove();
|
| 49 |
markerRef.current?.remove();
|
io-app-front/src/utils/mapUtils.js
CHANGED
|
@@ -41,7 +41,20 @@ export const CLASS_METADATA = {
|
|
| 41 |
'Tereny podmokłe': { label: 'Flooded vegetation', color: 'rgb(0, 150, 160)' },
|
| 42 |
'Namorzyny': { label: 'Mangroves', color: 'rgb(0, 207, 117)' },
|
| 43 |
'Mchy i porosty': { label: 'Moss & Lichen', color: 'rgb(250, 230, 160)' },
|
| 44 |
-
'Brak danych': { label: 'No Data', color: 'rgb(100, 100, 100)' }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
};
|
| 46 |
|
| 47 |
export const getMetadata = (className) => {
|
|
|
|
| 41 |
'Tereny podmokłe': { label: 'Flooded vegetation', color: 'rgb(0, 150, 160)' },
|
| 42 |
'Namorzyny': { label: 'Mangroves', color: 'rgb(0, 207, 117)' },
|
| 43 |
'Mchy i porosty': { label: 'Moss & Lichen', color: 'rgb(250, 230, 160)' },
|
| 44 |
+
'Brak danych': { label: 'No Data', color: 'rgb(100, 100, 100)' },
|
| 45 |
+
// English labels returned by backend
|
| 46 |
+
'No data': { label: 'No data', color: 'rgb(100, 100, 100)' },
|
| 47 |
+
'Trees / Forest': { label: 'Trees / Forest', color: 'rgb(0, 100, 0)' },
|
| 48 |
+
'Shrubs': { label: 'Shrubs', color: 'rgb(255, 187, 34)' },
|
| 49 |
+
'Grass / Meadows': { label: 'Grass / Meadows', color: 'rgb(255, 255, 76)' },
|
| 50 |
+
'Cultivated crops': { label: 'Cultivated crops', color: 'rgb(240, 150, 255)' },
|
| 51 |
+
'Buildings': { label: 'Buildings', color: 'rgb(250, 0, 0)' },
|
| 52 |
+
'Bare ground': { label: 'Bare ground', color: 'rgb(180, 180, 180)' },
|
| 53 |
+
'Snow and ice': { label: 'Snow and ice', color: 'rgb(240, 240, 240)' },
|
| 54 |
+
'Water': { label: 'Water', color: 'rgb(0, 100, 200)' },
|
| 55 |
+
'Wetlands': { label: 'Wetlands', color: 'rgb(0, 150, 160)' },
|
| 56 |
+
'Mangroves': { label: 'Mangroves', color: 'rgb(0, 207, 117)' },
|
| 57 |
+
'Lichens and moss': { label: 'Lichens and moss', color: 'rgb(250, 230, 160)' }
|
| 58 |
};
|
| 59 |
|
| 60 |
export const getMetadata = (className) => {
|