JakubBodzioch commited on
Commit
89eaf8a
·
0 Parent(s):

Ultra clean build - no venv on disk

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python virtual environment
2
+ io-app-backend/venv/
3
+ io-app-backend/__pycache__/
4
+ *.pyc
5
+
6
+ # Node modules and build files
7
+ io-app-front/node_modules/
8
+ io-app-front/dist/
9
+ io-app-front/build/
10
+
11
+ # OS files
12
+ .DS_Store
13
+ Thumbs.db
Dockerfile ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --- STAGE 1: Build Frontend (Vite/React) ---
2
+ FROM node:18 AS build-step
3
+ WORKDIR /app/frontend
4
+
5
+ # Copy npm configuration files from the frontend directory
6
+ COPY io-app-front/package*.json ./
7
+ RUN npm install
8
+
9
+ # Copy the rest of the frontend source code and build it
10
+ COPY io-app-front/ ./
11
+ RUN npm run build
12
+
13
+ # --- STAGE 2: Backend Server (Python + Flask) ---
14
+ FROM python:3.9-slim
15
+ WORKDIR /app
16
+
17
+ # Install system dependencies required for geospatial libraries (GDAL/Rasterio)
18
+ RUN apt-get update && apt-get install -y \
19
+ build-essential \
20
+ libgdal-dev \
21
+ python3-dev \
22
+ && rm -rf /var/lib/apt/lists/*
23
+
24
+ # Copy requirements.txt and install Python libraries
25
+ COPY io-app-backend/requirements.txt .
26
+ RUN pip install --no-cache-dir -r requirements.txt
27
+
28
+ # Set the working directory for the backend
29
+ WORKDIR /app/io-app-backend
30
+
31
+ # Copy the built frontend (Vite's 'dist' folder) to Flask's static directory
32
+ COPY --from=build-step /app/frontend/dist /app/io-app-backend/static
33
+
34
+ # Copy all backend source code
35
+ COPY io-app-backend/ .
36
+
37
+ # Set environment variables for Flask and Hugging Face (default port 7860)
38
+ ENV FLASK_APP=app.py
39
+ ENV FLASK_RUN_HOST=0.0.0.0
40
+ ENV FLASK_RUN_PORT=7860
41
+
42
+ EXPOSE 7860
43
+
44
+ # Launch the application
45
+ CMD ["python", "app.py"]
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: TerraMind Spectral Gaze
3
+ emoji: 😻
4
+ colorFrom: green
5
+ colorTo: red
6
+ sdk: docker
7
+ pinned: false
8
+ license: apache-2.0
9
+ short_description: Interactive web app for comparing TerraMind model versions
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
io-app-backend/app.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from flask_cors import CORS
3
+ import terramindFunctions as tm
4
+ from terramindFunctions import analyze
5
+ from evaluate_system import run_evaluation_with_models
6
+ import traceback
7
+
8
+ app = Flask(__name__)
9
+ # CORS(app, resources={
10
+ # r"/api/*": {
11
+ # "origins": ["http://localhost:5173", "http://127.0.0.1:5173"],
12
+ # "methods": ["POST", "GET", "OPTIONS"],
13
+ # "allow_headers": ["Content-Type", "Authorization"]
14
+ # }
15
+ # })
16
+ CORS(app)
17
+
18
+ @app.route('/api/analyze', methods=['POST'])
19
+ def analyze_endpoint():
20
+ try:
21
+ data = request.json
22
+ print(f"📡 Otrzymano żądanie: {data}")
23
+
24
+ location = data.get('location', {})
25
+ lat = location.get('lat')
26
+ lng = location.get('lng')
27
+
28
+ params = data.get('params', {})
29
+ buffer_km = params.get('bufferKm', 5)
30
+ max_cloud_cover = params.get('maxCloudCover', 20)
31
+ days_back = params.get('daysBack', 180)
32
+ model_name = params.get('model', 'terramind_v1_large_generate')
33
+
34
+ if lat is None or lng is None:
35
+ return jsonify({
36
+ 'success': False,
37
+ 'error': 'Brak współrzędnych (lat/lng)'
38
+ }), 400
39
+
40
+ print(f"🚀 Uruchamiam analizę dla: [{lat}, {lng}]")
41
+ print(f"⚙️ Parametry: Buffer={buffer_km}km, Chmury<{max_cloud_cover}%, Historia={days_back}dni")
42
+
43
+ result = analyze(
44
+ location_data=[lat, lng],
45
+ buffer_km=buffer_km,
46
+ max_cloud_cover=max_cloud_cover,
47
+ days_back=days_back,
48
+ model_name=model_name,
49
+ show_visualization=False,
50
+ save_files=False
51
+ )
52
+
53
+ if result is None:
54
+ return jsonify({
55
+ 'success': False,
56
+ 'error': 'Nie udało się pobrać danych satelitarnych.'
57
+ }), 404
58
+
59
+ # 🆕 Dodaj prefixy data:image/png;base64, dla frontendu
60
+ response = {
61
+ 'success': True,
62
+ 'lat': result.get('lat'),
63
+ 'lon': result.get('lon'),
64
+ 'title': result.get('title'),
65
+ 'date': result.get('date'),
66
+ 'scene_id': result.get('scene_id'),
67
+ 'statistics': result.get('statistics'),
68
+
69
+ # OBRAZY GŁÓWNE (z prefixem)
70
+ 'image': f"data:image/png;base64,{result.get('segmentation_base64')}",
71
+ 'rgb': f"data:image/png;base64,{result.get('rgb_base64')}",
72
+ 'raw_segmentation': f"data:image/png;base64,{result.get('raw_segmentation_base64')}",
73
+
74
+ # MASKI WSKAŹNIKÓW (z prefixami)
75
+ 'masks': {
76
+ mask_name: f"data:image/png;base64,{mask_base64}"
77
+ for mask_name, mask_base64 in result.get('masks', {}).items()
78
+ }
79
+ }
80
+
81
+ print(f"✅ Wysyłam wynik do frontendu")
82
+ print(f" 📅 Data zdjęcia: {result.get('date')}")
83
+ print(f" 🖼️ Obrazów: 3 główne + {len(result.get('masks', {}))} masek")
84
+
85
+ return jsonify(response)
86
+
87
+ except Exception as e:
88
+ print(f"❌ Błąd podczas analizy: {str(e)}")
89
+ traceback.print_exc()
90
+ return jsonify({
91
+ 'success': False,
92
+ 'error': str(e)
93
+ }), 500
94
+
95
+ @app.route('/api/advanced-analyze', methods=['POST'])
96
+ def advanced_analyze_endpoint():
97
+ """
98
+ Endpoint dla zaawansowanej analizy porównawczej z metrykami.
99
+ Uruchamia porównanie dwóch wybranych modeli i zwraca:
100
+ - Obrazy obu modeli (RGB, raw, final)
101
+ - Maski spektralne
102
+ - Metryki porównawcze
103
+ """
104
+ try:
105
+ data = request.json
106
+ print(f"📡 Otrzymano żądanie zaawansowanej analizy: {data}")
107
+
108
+ location = data.get('location', {})
109
+ lat = location.get('lat')
110
+ lng = location.get('lng')
111
+
112
+ params = data.get('params', {})
113
+ buffer_km = params.get('bufferKm', 5)
114
+ model_a = params.get('modelA', 'terramind_v1_small_generate')
115
+ model_b = params.get('modelB', 'terramind_v1_large_generate')
116
+
117
+ if lat is None or lng is None:
118
+ return jsonify({
119
+ 'success': False,
120
+ 'error': 'Brak współrzędnych (lat/lng)'
121
+ }), 400
122
+
123
+ print(f"🚀 Uruchamiam zaawansowaną analizę dla: [{lat}, {lng}]")
124
+ print(f"📊 Porównanie: {model_a} vs {model_b}")
125
+
126
+ # Uruchamia evaluate_system.run_evaluation_with_models()
127
+ eval_result = run_evaluation_with_models(lat, lng, buffer_km, model_a, model_b)
128
+
129
+ if 'error' in eval_result:
130
+ return jsonify({
131
+ 'success': False,
132
+ 'error': eval_result.get('error')
133
+ }), 404
134
+
135
+ # Konwertujemy mapy na obrazy (używając terramindFunctions)
136
+ print("🖼️ Konwertowanie map na obrazy...")
137
+ import base64
138
+ import io
139
+ from PIL import Image
140
+ import numpy as np
141
+
142
+ # Mapy z corrections i bez corrections
143
+ model_a_map = eval_result['maps']['modelA'] # Z corrections
144
+ model_a_map_raw = eval_result['maps']['modelA_raw'] # Bez corrections
145
+ model_b_map = eval_result['maps']['modelB'] # Z corrections
146
+ model_b_map_raw = eval_result['maps']['modelB_raw'] # Bez corrections
147
+ raw_data = eval_result['raw_data']
148
+ input_tensor = eval_result['input_tensor']
149
+
150
+ # Funkcja do konwersji numpy RGB array na base64
151
+ def rgb_to_base64(rgb_array):
152
+ """Konwertuje RGB array na PNG base64"""
153
+ img = Image.fromarray(rgb_array.astype(np.uint8))
154
+ buf = io.BytesIO()
155
+ img.save(buf, format='PNG')
156
+ buf.seek(0)
157
+ return base64.b64encode(buf.read()).decode('utf-8')
158
+
159
+ # RGB z raw_data (używając create_rgb_image z terramindFunctions)
160
+ rgb_image = tm.create_rgb_image(input_tensor)
161
+ rgb_base64 = rgb_to_base64(rgb_image)
162
+
163
+ # Mapy segmentacji - raw (bez corrections) i final (z corrections)
164
+ model_a_raw_segmentation = tm.create_segmentation_image(model_a_map_raw)
165
+ model_a_segmentation = tm.create_segmentation_image(model_a_map)
166
+ model_b_raw_segmentation = tm.create_segmentation_image(model_b_map_raw)
167
+ model_b_segmentation = tm.create_segmentation_image(model_b_map)
168
+
169
+ model_a_raw_seg_base64 = rgb_to_base64(model_a_raw_segmentation)
170
+ model_a_seg_base64 = rgb_to_base64(model_a_segmentation)
171
+ model_b_raw_seg_base64 = rgb_to_base64(model_b_raw_segmentation)
172
+ model_b_seg_base64 = rgb_to_base64(model_b_segmentation)
173
+
174
+ # Maski spektralne (z indices)
175
+ indices = eval_result['indices']
176
+ masks_dict = {}
177
+
178
+ # Generuj maski jak w analyze()
179
+ index_masks = tm.generate_index_masks(indices)
180
+
181
+ if isinstance(index_masks, dict):
182
+ for mask_name, mask_array in index_masks.items():
183
+ try:
184
+ # Konwertuj maski binarne na 0-255
185
+ mask_binary = mask_array.astype(np.uint8) * 255
186
+ img_mask = Image.fromarray(mask_binary, mode='L')
187
+ buf_mask = io.BytesIO()
188
+ img_mask.save(buf_mask, format='PNG')
189
+ buf_mask.seek(0)
190
+ masks_dict[mask_name] = base64.b64encode(buf_mask.read()).decode('utf-8')
191
+ except Exception as e:
192
+ print(f"⚠️ Błąd konwersji maski {mask_name}: {e}")
193
+
194
+ # Formatujemy odpowiedź
195
+ response = {
196
+ 'success': True,
197
+ 'date': eval_result.get('date'),
198
+
199
+ # OBRAZY
200
+ 'modelA': {
201
+ 'name': model_a.split('_')[2].upper(),
202
+ 'rgb': f"data:image/png;base64,{rgb_base64}",
203
+ 'raw_segmentation': f"data:image/png;base64,{model_a_raw_seg_base64}",
204
+ 'image': f"data:image/png;base64,{model_a_seg_base64}",
205
+ 'masks': {
206
+ mask_name: f"data:image/png;base64,{mask_base64}"
207
+ for mask_name, mask_base64 in masks_dict.items()
208
+ }
209
+ },
210
+ 'modelB': {
211
+ 'name': model_b.split('_')[2].upper(),
212
+ 'rgb': f"data:image/png;base64,{rgb_base64}",
213
+ 'raw_segmentation': f"data:image/png;base64,{model_b_raw_seg_base64}",
214
+ 'image': f"data:image/png;base64,{model_b_seg_base64}",
215
+ 'masks': {
216
+ mask_name: f"data:image/png;base64,{mask_base64}"
217
+ for mask_name, mask_base64 in masks_dict.items()
218
+ }
219
+ },
220
+
221
+ # METRYKI
222
+ 'metrics': eval_result.get('metrics', {})
223
+ }
224
+
225
+ print(f"✅ Wysyłam zaawansowaną analizę do frontendu")
226
+ print(f" 📅 Data zdjęcia: {eval_result.get('date')}")
227
+ print(f" 📊 Metryki: {list(eval_result.get('metrics', {}).keys())}")
228
+
229
+ return jsonify(response)
230
+
231
+ except Exception as e:
232
+ print(f"❌ Błąd podczas zaawansowanej analizy: {str(e)}")
233
+ traceback.print_exc()
234
+ return jsonify({
235
+ 'success': False,
236
+ 'error': str(e)
237
+ }), 500
238
+
239
+ @app.route('/api/health', methods=['GET'])
240
+ def health_check():
241
+ return jsonify({'status': 'ok', 'message': 'Backend działa poprawnie'})
242
+
243
+
244
+
245
+ if __name__ == '__main__':
246
+ print("\n" + "="*60)
247
+ print("🚀 Uruchamianie serwera Flask...")
248
+ print("="*60)
249
+ print("📍 Endpointy:")
250
+ print(" POST /api/analyze - Analiza obrazu satelitarnego")
251
+ print(" POST /api/advanced-analyze - Zaawansowana analiza z metrykami")
252
+ print(" GET /api/health - Sprawdzenie stanu serwera")
253
+ print("="*60 + "\n")
254
+
255
+ app.run(debug=True, host='127.0.0.1', port=5000)
io-app-backend/evaluate_system.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import terramindFunctions as tm
3
+ from terratorch import FULL_MODEL_REGISTRY
4
+ from metrics import calculate_precision,calculate_recall,calculate_accuracy, calculate_miou, calculate_final_score, calculate_fw_iou, calculate_dice_score
5
+
6
+ # Konfiguracja
7
+ DEVICE = tm.device
8
+
9
+ def load_model(model_name):
10
+ """Ładuje wybrany model. Jeśli nie istnieje, fallback do Large."""
11
+ print(f"⏳ Ładowanie modelu: {model_name}...")
12
+ try:
13
+ model = FULL_MODEL_REGISTRY.build(
14
+ model_name,
15
+ modalities=["S2L2A"],
16
+ output_modalities=["LULC"],
17
+ pretrained=True,
18
+ standardize=True,
19
+ ).to(DEVICE)
20
+ model.eval()
21
+ return model
22
+ except Exception as e:
23
+ print(f"⚠️ Błąd ładowania modelu {model_name}: {e}")
24
+ print(f" Fallback: próbuję terramind_v1_large_generate...")
25
+ try:
26
+ model = FULL_MODEL_REGISTRY.build(
27
+ "terramind_v1_large_generate",
28
+ modalities=["S2L2A"],
29
+ output_modalities=["LULC"],
30
+ pretrained=True,
31
+ standardize=True,
32
+ ).to(DEVICE)
33
+ model.eval()
34
+ return model
35
+ except Exception as e2:
36
+ print(f"❌ Błąd ładowania fallback modelu: {e2}")
37
+ return None
38
+
39
+ def load_teacher_model():
40
+ """Ładuje model Large (Nauczyciela)."""
41
+ print(f"⏳ Ładowanie Nauczyciela...")
42
+ return load_model("terramind_v1_large_generate")
43
+
44
+ def process_with_model(model, input_tensor, indices):
45
+ """
46
+ To jest ta "wspólna logika", o którą pytałeś.
47
+ Bierze DOWOLNY model (Student lub Teacher) i zwraca gotową mapę po korektach.
48
+ Używa funkcji z terramindFunctions.py.
49
+ """
50
+ # 1. Inferencja (To samo co w analyze)
51
+ raw_output = tm.run_inference(model, input_tensor)
52
+
53
+ # 2. Dekodowanie (To samo co w analyze)
54
+ class_map = tm.decode_output(raw_output)
55
+
56
+ # # 3. Korekty (To samo co w analyze)
57
+ # # Dzięki temu Teacher też dostanie poprawki wody/roślinności!
58
+ # final_map = tm.apply_hybrid_corrections(class_map, indices)
59
+
60
+ # return final_map
61
+ return class_map
62
+
63
+ def run_evaluation(lat, lon, buffer_km=5):
64
+ print(f"🔍 ROZPOCZYNAM EWALUACJĘ DLA: {lat}, {lon}")
65
+
66
+ # 1. POBRANIE DANYCH (Raz dla obu modeli - oszczędność czasu!)
67
+ dl_result = tm.download_sentinel2(lat, lon, buffer_km, max_cloud_cover=10, days_back=180)
68
+
69
+ if dl_result is None:
70
+ return {"error": "Nie udało się pobrać zdjęć satelitarnych"}
71
+
72
+ raw_data, date, scene_id = dl_result
73
+
74
+ # 2. PRZYGOTOWANIE WSPÓLNYCH DANYCH
75
+ input_tensor = tm.prepare_input(raw_data)
76
+ # Obliczamy indeksy raz i używamy dla obu modeli (spójność)
77
+ indices = tm.calculate_spectral_indices(input_tensor)
78
+
79
+ # ==========================================
80
+ # STUDENT
81
+ # ==========================================
82
+ print("🤖 Przetwarzanie: Student...")
83
+ student_model = tm.get_model()
84
+ # 👇 Używamy naszej funkcji pomocniczej
85
+ student_map = process_with_model(student_model, input_tensor, indices)
86
+
87
+ # ==========================================
88
+ # NAUCZYCIEL
89
+ # ==========================================
90
+ print("👨‍🏫 Przetwarzanie: Nauczyciel...")
91
+ teacher_model = load_teacher_model()
92
+
93
+ if teacher_model is None:
94
+ return {"error": "Błąd modelu Nauczyciela"}
95
+
96
+ # 👇 Używamy tej samej funkcji pomocniczej (gwarancja identycznego procesu)
97
+ teacher_map = process_with_model(teacher_model, input_tensor, indices)
98
+
99
+ # Czyszczenie pamięci
100
+ del teacher_model
101
+ if torch.cuda.is_available():
102
+ torch.cuda.empty_cache()
103
+
104
+ # ==========================================
105
+ # OBLICZANIE METRYK (Wewnątrz run_evaluation)
106
+ # ==========================================
107
+ print("📊 Liczenie metryk...")
108
+
109
+ # 1. Liczymy wszystko osobno
110
+ acc = calculate_accuracy(student_map, teacher_map)
111
+ miou, iou_details = calculate_miou(student_map, teacher_map)
112
+ fw_iou = calculate_fw_iou(student_map, teacher_map)
113
+ dice = calculate_dice_score(student_map, teacher_map)
114
+
115
+ # Wywołujemy nowe funkcje
116
+ mean_precision, precision_details = calculate_precision(student_map, teacher_map)
117
+ mean_recall, recall_details = calculate_recall(student_map, teacher_map)
118
+
119
+ # 2. Łączymy szczegóły w jeden słownik dla Frontendu
120
+ # Frontend oczekuje struktury: { "Las": { iou: 90, precision: 85, recall: 95 }, ... }
121
+ combined_details = {}
122
+
123
+ # Bierzemy klucze (nazwy klas) z IoU (bo ono jest zawsze liczone)
124
+ for class_name in iou_details.keys():
125
+ combined_details[class_name] = {
126
+ "iou": iou_details.get(class_name, 0.0),
127
+ "precision": precision_details.get(class_name, 0.0),
128
+ "recall": recall_details.get(class_name, 0.0)
129
+ }
130
+
131
+ # 3. Final Score (np. średnia z 4 głównych)
132
+ final_score = (acc + miou + fw_iou + dice) / 4.0
133
+
134
+ return {
135
+ "status": "success",
136
+ "metrics": {
137
+ "accuracy": acc,
138
+ "miou": miou,
139
+ "fw_iou": fw_iou,
140
+ "dice": dice,
141
+ "mean_precision": mean_precision,
142
+ "mean_recall": mean_recall,
143
+ "final_score": final_score,
144
+ "class_details": combined_details
145
+ },
146
+ "maps": {
147
+ "student": student_map,
148
+ "teacher": teacher_map
149
+ },
150
+ "raw_data": raw_data,
151
+ "indices": indices,
152
+ "date": date
153
+ }
154
+
155
+ def run_evaluation_with_models(lat, lon, buffer_km=5, model_a_name=None, model_b_name=None):
156
+ """
157
+ Uruchamia porównanie dwóch wybranych modeli.
158
+ Parametry:
159
+ - lat, lon: współrzędne
160
+ - buffer_km: rozmiar buforu
161
+ - model_a_name: nazwa pierwszego modelu
162
+ - model_b_name: nazwa drugiego modelu
163
+ """
164
+ if model_a_name is None:
165
+ model_a_name = 'terramind_v1_small_generate'
166
+ if model_b_name is None:
167
+ model_b_name = 'terramind_v1_large_generate'
168
+
169
+ print(f"🔍 PORÓWNANIE MODELI DLA: {lat}, {lon}")
170
+ print(f" Model A: {model_a_name}")
171
+ print(f" Model B: {model_b_name}")
172
+
173
+ # 1. POBRANIE DANYCH (Raz dla obu modeli - oszczędność czasu!)
174
+ dl_result = tm.download_sentinel2(lat, lon, buffer_km, max_cloud_cover=10, days_back=180)
175
+
176
+ if dl_result is None:
177
+ return {"error": "Nie udało się pobrać zdjęć satelitarnych"}
178
+
179
+ raw_data, date, scene_id = dl_result
180
+
181
+ # 2. PRZYGOTOWANIE WSPÓLNYCH DANYCH
182
+ input_tensor = tm.prepare_input(raw_data)
183
+ # Obliczamy indeksy raz i używamy dla obu modeli (spójność)
184
+ indices = tm.calculate_spectral_indices(input_tensor)
185
+
186
+ # ==========================================
187
+ # MODEL A
188
+ # ==========================================
189
+ print(f"🤖 Przetwarzanie: {model_a_name}...")
190
+ model_a = load_model(model_a_name)
191
+ if model_a is None:
192
+ return {"error": f"Błąd ładowania modelu {model_a_name}"}
193
+
194
+ raw_output_a = tm.run_inference(model_a, input_tensor)
195
+ map_a_raw = tm.decode_output(raw_output_a)
196
+ # Zastosuj korekty spektralne
197
+ map_a, _ = tm.apply_hybrid_corrections(map_a_raw, indices)
198
+ del model_a
199
+
200
+ # ==========================================
201
+ # MODEL B
202
+ # ==========================================
203
+ print(f"🤖 Przetwarzanie: {model_b_name}...")
204
+ model_b = load_model(model_b_name)
205
+ if model_b is None:
206
+ return {"error": f"Błąd ładowania modelu {model_b_name}"}
207
+
208
+ raw_output_b = tm.run_inference(model_b, input_tensor)
209
+ map_b_raw = tm.decode_output(raw_output_b)
210
+ # Zastosuj korekty spektralne
211
+ map_b, _ = tm.apply_hybrid_corrections(map_b_raw, indices)
212
+ del model_b
213
+
214
+ # Czyszczenie pamięci
215
+ if torch.cuda.is_available():
216
+ torch.cuda.empty_cache()
217
+
218
+ # ==========================================
219
+ # OBLICZANIE METRYK (Porównanie map)
220
+ # ==========================================
221
+ print("📊 Liczenie metryk...")
222
+
223
+ # 1. Liczymy wszystko osobno
224
+ acc = calculate_accuracy(map_a, map_b)
225
+ miou, iou_details = calculate_miou(map_a, map_b)
226
+ fw_iou = calculate_fw_iou(map_a, map_b)
227
+ dice = calculate_dice_score(map_a, map_b)
228
+
229
+ # Wywołujemy nowe funkcje
230
+ mean_precision, precision_details = calculate_precision(map_a, map_b)
231
+ mean_recall, recall_details = calculate_recall(map_a, map_b)
232
+
233
+ # 2. Łączymy szczegóły w jeden słownik dla Frontendu
234
+ combined_details = {}
235
+
236
+ # Bierzemy klucze (nazwy klas) z IoU (bo ono jest zawsze liczone)
237
+ for class_name in iou_details.keys():
238
+ combined_details[class_name] = {
239
+ "iou": iou_details.get(class_name, 0.0),
240
+ "precision": precision_details.get(class_name, 0.0),
241
+ "recall": recall_details.get(class_name, 0.0)
242
+ }
243
+
244
+ # 3. Final Score (np. średnia z 4 głównych)
245
+ # 3. Final Score (np. średnia z 4 głównych)
246
+ final_score = (acc + miou + fw_iou + dice) / 4.0
247
+
248
+ return {
249
+ "status": "success",
250
+ "metrics": {
251
+ "accuracy": acc,
252
+ "miou": miou,
253
+ "fw_iou": fw_iou,
254
+ "dice": dice,
255
+ "mean_precision": mean_precision,
256
+ "mean_recall": mean_recall,
257
+ "class_details": combined_details
258
+ },
259
+ "maps": {
260
+ "modelA": map_a,
261
+ "modelB": map_b,
262
+ "modelA_raw": map_a_raw,
263
+ "modelB_raw": map_b_raw
264
+ },
265
+ "raw_data": raw_data,
266
+ "input_tensor": input_tensor,
267
+ "indices": indices,
268
+ "date": date
269
+ }
io-app-backend/metrics.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ # 👇 Importujemy słownik nazw z Twojego pliku, żeby metryki "rozumiały" klasy
3
+ from terramindFunctions import ESA_CLASSES
4
+
5
+
6
+ import numpy as np
7
+ from terramindFunctions import ESA_CLASSES
8
+
9
+ def calculate_precision(pred_map, target_map):
10
+ """
11
+ PRECYZJA (Precision): "Ile z tego co model wykrył, jest prawdą?"
12
+ Wzór: TP / (TP + FP)
13
+ Zwraca: (średnia_precyzja, słownik_detali)
14
+ """
15
+ classes_in_target = np.unique(target_map)
16
+ precision_list = []
17
+ details = {}
18
+
19
+ for cls in classes_in_target:
20
+ if cls == 0: continue
21
+
22
+ p_mask = (pred_map == cls)
23
+ t_mask = (target_map == cls)
24
+
25
+ true_positives = np.logical_and(p_mask, t_mask).sum()
26
+ false_positives = np.logical_and(p_mask, ~t_mask).sum() # Model: TAK, Prawda: NIE
27
+
28
+ if (true_positives + false_positives) > 0:
29
+ precision = true_positives / (true_positives + false_positives)
30
+ else:
31
+ precision = 0.0 # Jeśli model nic nie wykrył dla tej klasy, precyzja 0 (lub 1, zależy od definicji, tu bezpiecznie 0)
32
+
33
+ precision_list.append(precision)
34
+
35
+ class_name = ESA_CLASSES.get(cls, f"Klasa {cls}")
36
+ details[class_name] = precision * 100.0
37
+
38
+ mean_precision = np.mean(precision_list) * 100.0 if precision_list else 0.0
39
+ return mean_precision, details
40
+
41
+
42
+ def calculate_recall(pred_map, target_map):
43
+ """
44
+ CZUŁOŚĆ (Recall): "Ile prawdziwych obiektów model znalazł?"
45
+ Wzór: TP / (TP + FN)
46
+ Zwraca: (średnia_czułość, słownik_detali)
47
+ """
48
+ classes_in_target = np.unique(target_map)
49
+ recall_list = []
50
+ details = {}
51
+
52
+ for cls in classes_in_target:
53
+ if cls == 0: continue
54
+
55
+ p_mask = (pred_map == cls)
56
+ t_mask = (target_map == cls)
57
+
58
+ true_positives = np.logical_and(p_mask, t_mask).sum()
59
+ false_negatives = np.logical_and(~p_mask, t_mask).sum() # Model: NIE, Prawda: TAK
60
+
61
+ if (true_positives + false_negatives) > 0:
62
+ recall = true_positives / (true_positives + false_negatives)
63
+ else:
64
+ recall = 0.0 # To się nie powinno zdarzyć, bo iterujemy po klasach z targetu
65
+
66
+ recall_list.append(recall)
67
+
68
+ class_name = ESA_CLASSES.get(cls, f"Klasa {cls}")
69
+ details[class_name] = recall * 100.0
70
+
71
+ mean_recall = np.mean(recall_list) * 100.0 if recall_list else 0.0
72
+ return mean_recall, details
73
+
74
+ def calculate_dice_score(pred_map, target_map):
75
+ """
76
+ Oblicza współczynnik Dice (F1-Score dla pikseli).
77
+ Jest to średnia z Dice dla każdej klasy.
78
+ Wynik Dice jest zazwyczaj wyższy niż IoU dla tych samych danych.
79
+ """
80
+ classes_in_target = np.unique(target_map)
81
+ dice_list = []
82
+
83
+ for cls in classes_in_target:
84
+ # Pomiń klasę 0 (Brak danych)
85
+ if cls == 0:
86
+ continue
87
+
88
+ # Maski binarne
89
+ p_mask = (pred_map == cls)
90
+ t_mask = (target_map == cls)
91
+
92
+ intersection = np.logical_and(p_mask, t_mask).sum()
93
+
94
+ # Liczba pikseli w predykcji i w targecie
95
+ area_pred = p_mask.sum()
96
+ area_target = t_mask.sum()
97
+
98
+ # Zabezpieczenie przed dzieleniem przez zero
99
+ if area_pred + area_target == 0:
100
+ dice = 1.0 # Oba puste = idealne dopasowanie
101
+ else:
102
+ dice = (2.0 * intersection) / (area_pred + area_target)
103
+
104
+ dice_list.append(dice)
105
+
106
+ if len(dice_list) == 0:
107
+ return 0.0
108
+
109
+ return np.mean(dice_list) * 100.0
110
+
111
+
112
+ def calculate_accuracy(pred_map, target_map):
113
+ """
114
+ Oblicza Pixel Accuracy (procent zgodnych pikseli).
115
+ """
116
+ p = pred_map.flatten()
117
+ t = target_map.flatten()
118
+
119
+ # Porównujemy tylko tam, gdzie we wzorcu nie ma "Braku danych" (klasa 0)
120
+ valid_mask = (t != 0)
121
+
122
+ if np.sum(valid_mask) == 0:
123
+ return 0.0
124
+
125
+ correct_pixels = np.sum((p == t) & valid_mask)
126
+ total_pixels = np.sum(valid_mask)
127
+
128
+ return (correct_pixels / total_pixels) * 100.0
129
+
130
+ def calculate_miou(pred_map, target_map, verbose=False):
131
+ """
132
+ Oblicza mIoU i (opcjonalnie) wypisuje wynik dla każdej klasy z osobna.
133
+ """
134
+ # Znajdujemy wszystkie klasy obecne we wzorcu (Ground Truth)
135
+ classes_in_target = np.unique(target_map)
136
+ iou_list = []
137
+ class_report = {}
138
+
139
+ for cls in classes_in_target:
140
+ # Pomiń klasę 0 (Brak danych / No Data)
141
+ if cls == 0:
142
+ continue
143
+
144
+ # Tworzymy maski binarne (gdzie jest dana klasa)
145
+ p_mask = (pred_map == cls)
146
+ t_mask = (target_map == cls)
147
+
148
+ intersection = np.logical_and(p_mask, t_mask).sum()
149
+ union = np.logical_or(p_mask, t_mask).sum()
150
+
151
+ if union > 0:
152
+ iou = intersection / union
153
+ iou_list.append(iou)
154
+
155
+ # Pobieramy nazwę klasy z Twojego słownika
156
+ class_name = ESA_CLASSES.get(cls, f"Klasa {cls}")
157
+ class_report[class_name] = iou * 100.0
158
+
159
+ if len(iou_list) == 0:
160
+ return 0.0, {}
161
+
162
+ miou = np.mean(iou_list) * 100.0
163
+
164
+ # Jeśli verbose=True, wypiszemy szczegóły w konsoli
165
+ if verbose:
166
+ print("\n--- Szczegóły IoU dla klas ---")
167
+ for name, score in class_report.items():
168
+ print(f"{name:<20}: {score:.2f}%")
169
+
170
+ return miou, class_report
171
+
172
+ def calculate_final_score(accuracy, miou, fwiou):
173
+ """
174
+ Oblicza ostateczny wynik jako średnią z trzech metryk:
175
+ 1. Pixel Accuracy
176
+ 2. mIoU
177
+ 3. fwIoU
178
+ """
179
+ return (accuracy + miou + fwiou) / 3.0
180
+
181
+ def print_report(accuracy, miou, final_score=None, class_details=None, model_name="Model"):
182
+ """
183
+ Wyświetla ładny, czytelny raport w konsoli.
184
+ """
185
+ print("=" * 40)
186
+ print(f" 📊 RAPORT DLA: {model_name}")
187
+ print("=" * 40)
188
+
189
+ if class_details:
190
+ print(f"{'KLASA':<25} | {'IoU':<10}")
191
+ print("-" * 40)
192
+ for name, score in class_details.items():
193
+ print(f"{name:<25} | {score:.2f}%")
194
+ print("-" * 40)
195
+
196
+ print(f"Pixel Accuracy (Ogólna): {accuracy:.2f}%")
197
+ print(f"mIoU (Średnia z klas): {miou:.2f}%")
198
+
199
+ if final_score is not None:
200
+ print("-" * 40)
201
+ print(f"🏆 FINAL SCORE: {final_score:.2f} / 100")
202
+ print("=" * 40 + "\n")
203
+
204
+
205
+
206
+
207
+ # ... (reszta pliku bez zmian) ...
208
+
209
+ def calculate_fw_iou(pred_map, target_map):
210
+ """
211
+ Oblicza Frequency Weighted IoU.
212
+ Wagi zależą od tego, jak często dana klasa występuje na obrazku (Target).
213
+ To daje "sprawiedliwszy" wynik wizualny (duży las ma większą wagę niż mała rzeczka).
214
+ """
215
+ classes_in_target = np.unique(target_map)
216
+
217
+ fw_iou_sum = 0.0
218
+ total_valid_pixels = 0
219
+
220
+ # 1. Liczymy sumę wszystkich ważnych pikseli (bez klasy 0 - brak danych)
221
+ for cls in classes_in_target:
222
+ if cls == 0: continue
223
+ total_valid_pixels += np.sum(target_map == cls)
224
+
225
+ if total_valid_pixels == 0:
226
+ return 0.0
227
+
228
+ # 2. Liczymy ważone IoU dla każdej klasy
229
+ for cls in classes_in_target:
230
+ if cls == 0: continue
231
+
232
+ # IoU dla danej klasy
233
+ p_mask = (pred_map == cls)
234
+ t_mask = (target_map == cls)
235
+
236
+ intersection = np.logical_and(p_mask, t_mask).sum()
237
+ union = np.logical_or(p_mask, t_mask).sum()
238
+
239
+ iou = 0.0
240
+ if union > 0:
241
+ iou = intersection / union
242
+
243
+ # Częstość występowania (Waga)
244
+ frequency = np.sum(t_mask) / total_valid_pixels
245
+
246
+ # Dodajemy do sumy: Waga * IoU
247
+ fw_iou_sum += (frequency * iou)
248
+
249
+ return fw_iou_sum * 100.0
io-app-backend/plotting_utils.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import numpy as np
4
+ import textwrap
5
+ import matplotlib.pyplot as plt
6
+ from matplotlib.colors import hex2color, LinearSegmentedColormap
7
+
8
+ # Plotting utils
9
+ COLORBLIND_HEX = ["#000000", "#3171AD", "#469C76", '#83CA70', "#EAE159", "#C07CB8", "#C19368", "#6FB2E4", "#F1F1F1",
10
+ "#C66526"]
11
+ COLORBLIND_RGB = [hex2color(hex) for hex in COLORBLIND_HEX]
12
+ lulc_cmap = LinearSegmentedColormap.from_list('lulc', COLORBLIND_RGB, N=10)
13
+
14
+
15
+ def rgb_smooth_quantiles(array, tolerance=0.02, scaling=0.5, default=2000):
16
+ """
17
+ array: numpy array with dimensions [C, H, W]
18
+ returns 0-1 scaled array
19
+ """
20
+
21
+ # Get scaling thresholds for smoothing the brightness
22
+ limit_low, median, limit_high = np.quantile(array, q=[tolerance, 0.5, 1. - tolerance])
23
+ limit_high = limit_high.clip(default) # Scale only pixels above default value
24
+ limit_low = limit_low.clip(0, 1000) # Scale only pixels below 1000
25
+ limit_low = np.where(median > default / 2, limit_low, 0) # Make image only darker if it is not dark already
26
+
27
+ # Smooth very dark and bright values using linear scaling
28
+ array = np.where(array >= limit_low, array, limit_low + (array - limit_low) * scaling)
29
+ array = np.where(array <= limit_high, array, limit_high + (array - limit_high) * scaling)
30
+
31
+ # Update scaling params using a 10th of the tolerance for max value
32
+ limit_low, limit_high = np.quantile(array, q=[tolerance/10, 1. - tolerance/10])
33
+ limit_high = limit_high.clip(default, 20000) # Scale only pixels above default value
34
+ limit_low = limit_low.clip(0, 500) # Scale only pixels below 500
35
+ limit_low = np.where(median > default / 2, limit_low, 0) # Make image only darker if it is not dark already
36
+
37
+ # Scale data to 0-255
38
+ array = (array - limit_low) / (limit_high - limit_low)
39
+
40
+ return array
41
+
42
+
43
+ def s2_to_rgb(data, smooth_quantiles=False):
44
+ if isinstance(data, torch.Tensor):
45
+ # to numpy
46
+ data = data.clone().cpu().numpy()
47
+ if len(data.shape) == 4:
48
+ # Remove batch dim
49
+ data = data[0]
50
+
51
+ # Select
52
+ if data.shape[0] > 13:
53
+ # assuming channel last
54
+ rgb = data[:, :, [3, 2, 1]]
55
+ else:
56
+ # assuming channel first
57
+ rgb = data[[3, 2, 1]].transpose((1, 2, 0))
58
+
59
+ if smooth_quantiles:
60
+ rgb = rgb_smooth_quantiles(rgb)
61
+ else:
62
+ rgb = rgb / 2000
63
+
64
+ # to uint8
65
+ rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
66
+
67
+ return rgb
68
+
69
+
70
+ def s1_to_rgb(data):
71
+ if isinstance(data, torch.Tensor):
72
+ # to numpy
73
+ data = data.clone().cpu().numpy()
74
+ if len(data.shape) == 4:
75
+ # Remove batch dim
76
+ data = data[0]
77
+
78
+ vv = data[0]
79
+ vh = data[1]
80
+ r = (vv + 30) / 40 # scale -30 to +10
81
+ g = (vh + 40) / 40 # scale -40 to +0
82
+ b = vv / vh.clip(-40, -1) / 1.5 # VV / VH
83
+
84
+ rgb = np.dstack([r, g, b])
85
+ rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
86
+ return rgb
87
+
88
+
89
+ def s1_to_power(data):
90
+ # Convert dB to power
91
+ data = 10 ** (data / 10)
92
+ return data * 10000
93
+
94
+
95
+ def s1_power_to_rgb(data):
96
+ if isinstance(data, torch.Tensor):
97
+ # to numpy
98
+ data = data.clone().cpu().numpy()
99
+ if len(data.shape) == 4:
100
+ # Remove batch dim
101
+ data = data[0]
102
+
103
+ vv = data[0]
104
+ vh = data[1]
105
+ r = vv / 500
106
+ g = vh / 2200
107
+ b = vv / vh / 2
108
+
109
+ rgb = np.dstack([r, g, b])
110
+ rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
111
+ return rgb
112
+
113
+
114
+ def dem_to_rgb(data, cmap='BrBG_r', buffer=5):
115
+ if isinstance(data, torch.Tensor):
116
+ # to numpy
117
+ data = data.clone().cpu().numpy()
118
+ while len(data.shape) > 2:
119
+ # Remove batch dim etc.
120
+ data = data[0]
121
+
122
+ # Add 10m buffer to highlight flat areas
123
+ data_min, data_max = data.min(), data.max()
124
+ data_min -= buffer
125
+ data_max += buffer
126
+ data = (data - data_min) / (data_max - data_min + 1e-6)
127
+
128
+ rgb = plt.get_cmap(cmap)(data)[:, :, :3]
129
+ rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
130
+ return rgb
131
+
132
+
133
+ def ndvi_to_rgb(data, cmap='RdYlGn'):
134
+ if isinstance(data, torch.Tensor):
135
+ # to numpy
136
+ data = data.clone().cpu().numpy()
137
+ while len(data.shape) > 2:
138
+ # Remove batch dim etc.
139
+ data = data[0]
140
+
141
+ # Scale NDVI to 0-1
142
+ data = (data + 1) / 2
143
+
144
+ rgb = plt.get_cmap(cmap)(data)[:, :, :3]
145
+ rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
146
+ return rgb
147
+
148
+
149
+ def lulc_to_rgb(data, cmap=lulc_cmap, num_classes=10):
150
+ while len(data.shape) > 2:
151
+ if data.shape[0] == num_classes:
152
+ data = data.argmax(axis=0) # First dim are class logits
153
+ else:
154
+ # Remove batch dim
155
+ data = data[0]
156
+
157
+ rgb = cmap(data)[:, :, :3]
158
+ rgb = (rgb * 255).round().clip(0, 255).astype(np.uint8)
159
+ return rgb
160
+
161
+
162
+ def coords_to_text(data):
163
+ if isinstance(data, torch.Tensor):
164
+ data = data.clone().cpu().numpy()
165
+ if len(data.shape) > 1:
166
+ # Remove batch dim etc.
167
+ data = data[0]
168
+ if data.shape[0] > 2:
169
+ # Not coords
170
+ return str(data)
171
+ else:
172
+
173
+ return f'lon={data[0]:.2f}, lat={data[1]:.2f}'
174
+
175
+
176
+ def plot_s2(data, ax=None, smooth_quantiles=False, *args, **kwargs):
177
+ rgb = s2_to_rgb(data, smooth_quantiles=smooth_quantiles)
178
+
179
+ if ax is None:
180
+ plt.imshow(rgb)
181
+ plt.axis('off')
182
+ plt.show()
183
+ else:
184
+ ax.imshow(rgb)
185
+ ax.axis('off')
186
+
187
+
188
+ def plot_s1(data, ax=None, power=False, *args, **kwargs):
189
+ if power:
190
+ data = s1_to_power(data)
191
+ rgb = s1_power_to_rgb(data)
192
+ else:
193
+ rgb = s1_to_rgb(data)
194
+
195
+ if ax is None:
196
+ plt.imshow(rgb)
197
+ plt.axis('off')
198
+ plt.show()
199
+ else:
200
+ ax.imshow(rgb)
201
+ ax.axis('off')
202
+
203
+
204
+ def plot_dem(data, ax=None, *args, **kwargs):
205
+ if isinstance(data, torch.Tensor):
206
+ # to numpy
207
+ data = data.clone().cpu().numpy()
208
+ while len(data.shape) > 2:
209
+ # Remove batch dim etc.
210
+ data = data[0]
211
+
212
+ # Add 10m buffer to highlight flat areas
213
+ data_min, data_max = data.min(), data.max()
214
+ data_min -= 5
215
+ data_max += 5
216
+ data = (data - data_min) / (data_max - data_min + 1e-6)
217
+
218
+ data = (data * 255).round().clip(0, 255).astype(np.uint8)
219
+
220
+ if ax is None:
221
+ plt.imshow(data, vmin=0, vmax=255, cmap='BrBG_r')
222
+ plt.axis('off')
223
+ plt.show()
224
+ else:
225
+ ax.imshow(data, vmin=0, vmax=255, cmap='BrBG_r')
226
+ ax.axis('off')
227
+
228
+
229
+ def plot_lulc(data, ax=None, num_classes=10, *args, **kwargs):
230
+ if isinstance(data, torch.Tensor):
231
+ # to numpy
232
+ data = data.clone().cpu().numpy()
233
+ while len(data.shape) > 2:
234
+ if data.shape[0] == num_classes:
235
+ data = data.argmax(axis=0) # First dim are class logits
236
+ else:
237
+ # Remove batch dim
238
+ data = data[0]
239
+
240
+ if ax is None:
241
+ plt.imshow(data, vmin=0, vmax=num_classes-1, cmap=lulc_cmap, interpolation='nearest')
242
+ plt.axis('off')
243
+ plt.show()
244
+ else:
245
+ ax.imshow(data, vmin=0, vmax=num_classes-1, cmap=lulc_cmap, interpolation='nearest')
246
+ ax.axis('off')
247
+
248
+
249
+ def plot_ndvi(data, ax=None, *args, **kwargs):
250
+ if isinstance(data, torch.Tensor):
251
+ # to numpy
252
+ data = data.clone().cpu().numpy()
253
+ while len(data.shape) > 2:
254
+ # Remove batch dim etc.
255
+ data = data[0]
256
+
257
+ if ax is None:
258
+ plt.imshow(data, vmin=-1, vmax=+1, cmap='RdYlGn')
259
+ plt.axis('off')
260
+ plt.show()
261
+ else:
262
+ ax.imshow(data, vmin=-1, vmax=+1, cmap='RdYlGn')
263
+ ax.axis('off')
264
+
265
+
266
+ def wrap_text(text, ax, font_size):
267
+ # Get the width of the axis in pixels
268
+ bbox = ax.get_window_extent()
269
+ width, height = bbox.width, bbox.height
270
+
271
+ # Calculate the number of characters per line
272
+ char_width = font_size * 0.6 # Approximate width of a character
273
+ max_chars_per_line = int(width / char_width * 0.75)
274
+ max_lines = int(height / font_size * 0.5)
275
+
276
+ # Wrap the text
277
+ wrapped_text = textwrap.wrap(text, width=max_chars_per_line)
278
+
279
+ if len(wrapped_text) > max_lines:
280
+ wrapped_text = wrapped_text[:max_lines]
281
+ wrapped_text[-1] += '...'
282
+
283
+ return '\n'.join(wrapped_text)
284
+
285
+
286
+ def plot_text(data, ax=None, *args, **kwargs):
287
+ if isinstance(data, str):
288
+ text = data
289
+ elif isinstance(data, torch.Tensor) or isinstance(data, np.ndarray):
290
+ # assuming coordinates
291
+ text = coords_to_text(data)
292
+ else:
293
+ raise ValueError()
294
+
295
+ font_size = 14 if len(text) > 150 else 20
296
+
297
+ if ax is None:
298
+ fig, ax = plt.subplots()
299
+ wrapped_text = wrap_text(text, ax, font_size)
300
+ ax.text(0.5, 0.5, wrapped_text, fontsize=font_size, ha='center', va='center', wrap=True)
301
+ ax.set_xticks([])
302
+ ax.set_yticks([])
303
+ plt.show()
304
+ else:
305
+ wrapped_text = wrap_text(text, ax, font_size)
306
+ ax.text(0.5, 0.5, wrapped_text, fontsize=font_size, ha='center', va='center', wrap=True)
307
+ ax.set_xticks([])
308
+ ax.set_yticks([])
309
+
310
+
311
+ def plot_modality(modality, data, ax=None, **kwargs):
312
+ if 's2' in modality.lower():
313
+ plot_s2(data, ax=ax, **kwargs)
314
+ elif 's1' in modality.lower():
315
+ plot_s1(data, ax=ax, **kwargs)
316
+ elif 'dem' in modality.lower():
317
+ plot_dem(data, ax=ax, **kwargs)
318
+ elif 'ndvi' in modality.lower():
319
+ plot_ndvi(data, ax=ax, **kwargs)
320
+ elif 'lulc' in modality.lower():
321
+ plot_lulc(data, ax=ax, **kwargs)
322
+ elif 'coords' in modality.lower() or 'caption' in modality.lower() or 'text' in modality.lower():
323
+ plot_text(data, ax=ax, **kwargs)
324
+
325
+
326
+
327
+ # Metryki v0.2 (patrz terramind_generation.ipynb na komórke z "print(f"⏳ Ładowanie modelu: {model_name}...")")
328
+
329
+ def calculate_lulc_score(pred_tensor, target_tensor, num_classes=10):
330
+ """
331
+ Porównanie map LULC:
332
+ - Pixel Accuracy
333
+ - mIoU
334
+ - Final Score (0–100)
335
+
336
+ Teacher (Large) = proxy ground truth
337
+ """
338
+
339
+ if isinstance(pred_tensor, torch.Tensor):
340
+ pred_tensor = pred_tensor.detach().cpu()
341
+ if isinstance(target_tensor, torch.Tensor):
342
+ target_tensor = target_tensor.detach().cpu()
343
+
344
+ if pred_tensor.ndim == 4:
345
+ pred_tensor = pred_tensor[0]
346
+ if target_tensor.ndim == 4:
347
+ target_tensor = target_tensor[0]
348
+
349
+ if pred_tensor.ndim == 3:
350
+ pred_mask = torch.argmax(pred_tensor, dim=0).numpy()
351
+ else:
352
+ pred_mask = pred_tensor.numpy()
353
+
354
+ if target_tensor.ndim == 3:
355
+ target_mask = torch.argmax(target_tensor, dim=0).numpy()
356
+ else:
357
+ target_mask = target_tensor.numpy()
358
+
359
+ accuracy = (pred_mask == target_mask).sum() / pred_mask.size * 100.0
360
+
361
+ iou_list = []
362
+ for c in range(num_classes):
363
+ pred_c = (pred_mask == c)
364
+ target_c = (target_mask == c)
365
+
366
+ union = np.logical_or(pred_c, target_c).sum()
367
+ intersection = np.logical_and(pred_c, target_c).sum()
368
+
369
+ if union > 0:
370
+ iou_list.append(intersection / union)
371
+
372
+ miou = np.mean(iou_list) * 100.0 if len(iou_list) > 0 else 0.0
373
+ final_score = 0.3 * accuracy + 0.7 * miou
374
+
375
+ return {
376
+ "pixel_accuracy": accuracy,
377
+ "miou": miou,
378
+ "final_score": final_score
379
+ }
380
+
381
+
382
+ def prepare_masks_for_score(*masks):
383
+ """Prepare one or more label maps for `calculate_lulc_score`.
384
+
385
+ Args:
386
+ *masks: numpy arrays or torch tensors representing label maps (H,W) or (C,H,W) logits.
387
+
388
+ Returns:
389
+ (tensors, num_classes):
390
+ tensors: list of `torch.LongTensor` remapped to contiguous labels 0..N-1
391
+ num_classes: number of unique labels across all inputs
392
+
393
+ This helper will extract class indices if given logits, compute the union of labels,
394
+ remap them to 0..N-1 and return torch tensors suitable for `calculate_lulc_score`.
395
+ """
396
+ np_masks = []
397
+ for m in masks:
398
+ if isinstance(m, torch.Tensor):
399
+ m = m.detach().cpu()
400
+ if m.ndim == 4:
401
+ m = m[0]
402
+ if m.ndim == 3 and m.shape[0] > 1:
403
+ m = m.argmax(axis=0).numpy()
404
+ else:
405
+ m = m.numpy()
406
+ elif isinstance(m, np.ndarray):
407
+ while m.ndim > 2:
408
+ # remove batch dim
409
+ m = m[0]
410
+ else:
411
+ raise ValueError("Unsupported mask type")
412
+
413
+ np_masks.append(np.array(m, dtype=np.int64))
414
+
415
+ # union of labels across all masks
416
+ if len(np_masks) > 1:
417
+ union = np.unique(np.concatenate([m.ravel() for m in np_masks]))
418
+ else:
419
+ union = np.unique(np_masks[0])
420
+ label_to_idx = {int(l): i for i, l in enumerate(union)}
421
+
422
+ remapped_tensors = []
423
+ for arr in np_masks:
424
+ flat = arr.ravel()
425
+ rem = np.vectorize(lambda x: label_to_idx[int(x)])(flat)
426
+ remapped = rem.reshape(arr.shape).astype(np.int64)
427
+ remapped_tensors.append(torch.from_numpy(remapped))
428
+
429
+ return remapped_tensors, len(union)
io-app-backend/requirements.txt ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.3.1
2
+ aenum==3.1.16
3
+ affine==2.4.0
4
+ aiohappyeyeballs==2.6.1
5
+ aiohttp==3.13.3
6
+ aiosignal==1.4.0
7
+ albucore==0.0.24
8
+ albumentations==2.0.8
9
+ annotated-types==0.7.0
10
+ antlr4-python3-runtime==4.9.3
11
+ anyio==4.12.1
12
+ attrs==25.4.0
13
+ blinker==1.9.0
14
+ cachetools==6.2.4
15
+ certifi==2026.1.4
16
+ charset-normalizer==3.4.4
17
+ click==8.3.1
18
+ cligj==0.7.2
19
+ cloudpickle==3.1.2
20
+ contourpy==1.3.3
21
+ cycler==0.12.1
22
+ dask==2025.12.0
23
+ diffusers==0.36.0
24
+ docstring_parser==0.17.0
25
+ earthengine-api==1.7.4
26
+ einops==0.8.1
27
+ filelock==3.20.2
28
+ Flask==3.1.2
29
+ flask-cors==6.0.2
30
+ fonttools==4.61.1
31
+ frozenlist==1.8.0
32
+ fsspec==2025.12.0
33
+ geopandas==1.1.2
34
+ gitdb==4.0.12
35
+ GitPython==3.1.46
36
+ google-api-core==2.28.1
37
+ google-api-python-client==2.187.0
38
+ google-auth==2.47.0
39
+ google-auth-httplib2==0.3.0
40
+ google-cloud-core==2.5.0
41
+ google-cloud-storage==3.7.0
42
+ google-crc32c==1.8.0
43
+ google-resumable-media==2.8.0
44
+ googleapis-common-protos==1.72.0
45
+ grpcio==1.76.0
46
+ h11==0.16.0
47
+ h5py==3.15.1
48
+ hf-xet==1.2.0
49
+ httpcore==1.0.9
50
+ httplib2==0.31.0
51
+ httpx==0.28.1
52
+ huggingface_hub==1.2.4
53
+ hydra-core==1.3.2
54
+ idna==3.11
55
+ ImageIO==2.37.2
56
+ importlib_metadata==8.7.1
57
+ importlib_resources==6.5.2
58
+ itsdangerous==2.2.0
59
+ Jinja2==3.1.6
60
+ joblib==1.5.3
61
+ jsonargparse==4.45.0
62
+ jsonschema==4.26.0
63
+ jsonschema-specifications==2025.9.1
64
+ kiwisolver==1.4.9
65
+ kornia==0.8.2
66
+ kornia_rs==0.1.10
67
+ lazy_loader==0.4
68
+ lightly==1.5.22
69
+ lightly-utils==0.0.2
70
+ lightning==2.6.0
71
+ lightning-utilities==0.15.2
72
+ locket==1.0.0
73
+ Markdown==3.10
74
+ MarkupSafe==3.0.3
75
+ matplotlib==3.10.8
76
+ mpmath==1.3.0
77
+ multidict==6.7.0
78
+ networkx==3.6.1
79
+ numpy==2.2.6
80
+ odc-geo==0.5.0
81
+ odc-loader==0.6.0
82
+ odc-stac==0.5.0
83
+ omegaconf==2.3.0
84
+ opencv-python-headless==4.12.0.88
85
+ packaging==25.0
86
+ pandas==2.3.3
87
+ partd==1.4.2
88
+ pillow==12.1.0
89
+ planetary-computer==1.0.0
90
+ platformdirs==4.5.1
91
+ propcache==0.4.1
92
+ proto-plus==1.27.0
93
+ protobuf==6.33.2
94
+ pyasn1==0.6.1
95
+ pyasn1_modules==0.4.2
96
+ pycocotools==2.0.11
97
+ pydantic==2.12.5
98
+ pydantic_core==2.41.5
99
+ pyogrio==0.12.1
100
+ pyparsing==3.3.1
101
+ pyproj==3.7.2
102
+ pystac==1.14.2
103
+ pystac-client==0.9.0
104
+ python-box==7.3.2
105
+ python-dateutil==2.9.0.post0
106
+ python-dotenv==1.2.1
107
+ pytorch-lightning==2.6.0
108
+ pytz==2025.2
109
+ PyYAML==6.0.3
110
+ rasterio==1.5.0
111
+ referencing==0.37.0
112
+ regex==2025.11.3
113
+ requests==2.32.5
114
+ rioxarray==0.20.0
115
+ rpds-py==0.30.0
116
+ rsa==4.9.1
117
+ safetensors==0.7.0
118
+ scikit-image==0.26.0
119
+ scikit-learn==1.8.0
120
+ scipy==1.16.3
121
+ segmentation_models_pytorch==0.5.0
122
+ sentry-sdk==2.49.0
123
+ setuptools==80.9.0
124
+ shapely==2.1.2
125
+ shellingham==1.5.4
126
+ simsimd==6.5.12
127
+ six==1.17.0
128
+ smmap==5.0.2
129
+ stringzilla==4.6.0
130
+ sympy==1.14.0
131
+ tensorboard==2.20.0
132
+ tensorboard-data-server==0.7.2
133
+ terratorch==1.2.1
134
+ threadpoolctl==3.6.0
135
+ tifffile==2025.12.20
136
+ timm==1.0.24
137
+ toolz==1.1.0
138
+ torch==2.9.1
139
+ torchgeo==0.8.0
140
+ torchmetrics==1.8.2
141
+ torchvision==0.24.1
142
+ tqdm==4.67.1
143
+ typer-slim==0.21.1
144
+ typeshed_client==2.8.2
145
+ typing-inspection==0.4.2
146
+ typing_extensions==4.15.0
147
+ tzdata==2025.3
148
+ uritemplate==4.2.0
149
+ urllib3==2.6.3
150
+ wandb==0.23.1
151
+ Werkzeug==3.1.5
152
+ xarray==2025.12.0
153
+ yarl==1.22.0
154
+ zipp==3.23.0
io-app-backend/terramindFunctions.py ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import matplotlib.patches as mpatches
6
+ from datetime import datetime, timedelta
7
+ import torchvision.transforms as T
8
+ import terratorch
9
+ import base64
10
+ from io import BytesIO
11
+ from PIL import Image
12
+ import gc
13
+
14
+ # =========================================
15
+ # ⚙️ KONFIGURACJA DOMYŚLNA
16
+ # =========================================
17
+
18
+ DEFAULT_BUFFER_KM = 5
19
+ DEFAULT_MAX_CLOUD_COVER = 10
20
+ DEFAULT_DAYS_BACK = 180
21
+
22
+ TARGET_SIZE = 224
23
+ TIMESTEPS = 50
24
+ BRIGHTNESS_BOOST = 2.5
25
+ NORMALIZATION_MODE = "offset"
26
+
27
+ # Progi indeksów spektralnych
28
+ NDWI_THRESHOLD = 0.1
29
+ MNDWI_THRESHOLD = 0.1
30
+ NDVI_THRESHOLD = 0.3
31
+ NDBI_THRESHOLD = 0.0
32
+ BSI_THRESHOLD = 0.1
33
+
34
+ USE_WATER_CORRECTION = True
35
+ USE_VEGETATION_CORRECTION = True
36
+ USE_BUILDING_CORRECTION = True
37
+ USE_BARE_SOIL_CORRECTION = True
38
+ USE_SNOW_CORRECTION = False
39
+
40
+ SAVE_RESULTS = True
41
+ OUTPUT_FOLDER = "./wyniki"
42
+
43
+ # =========================================
44
+ # KLASY ESA WORLDCOVER
45
+ # =========================================
46
+
47
+ ESA_CLASSES = {
48
+ 0: "Brak danych",
49
+ 10: "Drzewa / Las",
50
+ 20: "Zarośla",
51
+ 30: "Trawa / Łąki",
52
+ 40: "Uprawy rolne",
53
+ 50: "Zabudowa",
54
+ 60: "Goły grunt",
55
+ 70: "Śnieg i lód",
56
+ 80: "Woda",
57
+ 90: "Tereny podmokłe",
58
+ 95: "Namorzyny",
59
+ 100: "Mchy i porosty"
60
+ }
61
+
62
+ ESA_COLORS = {
63
+ 0: [0, 0, 0],
64
+ 10: [0, 100, 0],
65
+ 20: [255, 187, 34],
66
+ 30: [255, 255, 76],
67
+ 40: [240, 150, 255],
68
+ 50: [250, 0, 0],
69
+ 60: [180, 180, 180],
70
+ 70: [240, 240, 240],
71
+ 80: [0, 100, 200],
72
+ 90: [0, 150, 160],
73
+ 95: [0, 207, 117],
74
+ 100: [250, 230, 160]
75
+ }
76
+
77
+ INDEX_TO_ESA = {
78
+ 0: 0, 1: 10, 2: 20, 3: 30, 4: 40, 5: 50,
79
+ 6: 60, 7: 70, 8: 80, 9: 90, 10: 95, 11: 100
80
+ }
81
+
82
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
83
+ print(f"🖥️ Urządzenie: {device}")
84
+
85
+ # =========================================
86
+ # 🚀 GLOBALNY CACHE MODELU
87
+ # =========================================
88
+
89
+ _CURRENT_MODEL = None
90
+ _CURRENT_MODEL_NAME = None
91
+
92
+ def get_model(model_name):
93
+ """Ładuje model tylko raz i cache'uje go."""
94
+ global _CURRENT_MODEL, _CURRENT_MODEL_NAME
95
+ if _CURRENT_MODEL is not None and _CURRENT_MODEL_NAME == model_name:
96
+ return _CURRENT_MODEL
97
+ if _CURRENT_MODEL is not None:
98
+ del _CURRENT_MODEL
99
+ torch.cuda.empty_cache()
100
+ gc.collect()
101
+ print("🧹 Wyczyszczono pamięć po poprzednim modelu.")
102
+ from terratorch import FULL_MODEL_REGISTRY
103
+ print("⏳ Ładowanie modelu (tylko pierwszy raz)...")
104
+ try:
105
+ model = FULL_MODEL_REGISTRY.build(
106
+ model_name, # Używamy nazwy przekazanej z parametru
107
+ modalities=["S2L2A"],
108
+ output_modalities=["LULC"],
109
+ pretrained=True,
110
+ standardize=True,
111
+ ).to(device)
112
+
113
+ model.eval()
114
+
115
+ # Aktualizujemy globalny cache
116
+ _CURRENT_MODEL = model
117
+ _CURRENT_MODEL_NAME = model_name
118
+
119
+ print(f"✅ Model {model_name} gotowy do pracy.")
120
+ return _CURRENT_MODEL
121
+
122
+ except Exception as e:
123
+ print(f"❌ Błąd podczas ładowania modelu {model_name}: {e}")
124
+ # Jeśli się nie uda, spróbujmy załadować domyślny lub rzucić błąd
125
+ raise e
126
+
127
+ # =========================================
128
+ # FUNKCJE POMOCNICZE
129
+ # =========================================
130
+
131
+ def get_coordinates_from_name(place_name):
132
+ try:
133
+ from geopy.geocoders import Nominatim
134
+ geolocator = Nominatim(user_agent="terramind_fast")
135
+ location = geolocator.geocode(place_name)
136
+ if location:
137
+ print(f"📍 {location.address}")
138
+ return location.latitude, location.longitude
139
+ return None
140
+ except:
141
+ return None
142
+
143
+
144
+ def download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back):
145
+ import planetary_computer
146
+ import pystac_client
147
+ import odc.stac
148
+ import math
149
+ import numpy as np
150
+ from datetime import datetime, timedelta
151
+ from pyproj import Transformer
152
+
153
+ print(f"🛰️ Pobieranie danych dla: {lat:.4f}, {lon:.4f} (Promień: {buffer_km}km)")
154
+
155
+ # 1. Obliczamy współczynnik skali Mercatora dla danej szerokości geograficznej
156
+ # To koryguje zniekształcenia mapy (w Polsce 1 metr rzeczywisty ≈ 1.6 metra w EPSG:3857)
157
+ scale_factor = 1.0 / math.cos(math.radians(lat))
158
+
159
+ # 2. Przygotowujemy transformatory
160
+ to_3857 = Transformer.from_crs("EPSG:4326", "EPSG:3857", always_xy=True)
161
+ to_4326 = Transformer.from_crs("EPSG:3857", "EPSG:4326", always_xy=True)
162
+
163
+ # 3. Obliczamy środek w metrach Mercatora
164
+ center_x, center_y = to_3857.transform(lon, lat)
165
+
166
+ # 4. Obliczamy zasięg w metrach Mercatora z uwzględnieniem skali
167
+ # half_side = (buffer_km * 1000) / 2 <-- Jeśli buffer_km to długość boku
168
+ # half_side = (buffer_km * 1000) <-- Jeśli buffer_km to promień (dystans od środka)
169
+ # Przyjmujemy buffer_km jako promień (zgodnie z Twoją logiką "radius"):
170
+ half_side_mercator = (buffer_km * 1000) * scale_factor
171
+
172
+ min_x, min_y = center_x - half_side_mercator, center_y - half_side_mercator
173
+ max_x, max_y = center_x + half_side_mercator, center_y + half_side_mercator
174
+
175
+ # 5. Konwersja z powrotem na stopnie dla STAC (wymagane przez API)
176
+ west, south = to_4326.transform(min_x, min_y)
177
+ east, north = to_4326.transform(max_x, max_y)
178
+ bbox_geo = [west, south, east, north]
179
+
180
+ # --- Wyszukiwanie danych ---
181
+ end_date = datetime.now()
182
+ start_date = end_date - timedelta(days=days_back)
183
+ date_range = f"{start_date.strftime('%Y-%m-%d')}/{end_date.strftime('%Y-%m-%d')}"
184
+
185
+ catalog = pystac_client.Client.open(
186
+ "https://planetarycomputer.microsoft.com/api/stac/v1",
187
+ modifier=planetary_computer.sign_inplace
188
+ )
189
+
190
+ search = catalog.search(
191
+ collections=["sentinel-2-l2a"],
192
+ bbox=bbox_geo,
193
+ datetime=date_range,
194
+ query={"eo:cloud_cover": {"lt": max_cloud_cover}}
195
+ )
196
+
197
+ items = list(search.items())
198
+ if not items:
199
+ print("❌ Brak danych spełniających kryteria")
200
+ return None
201
+
202
+ best_item = sorted(items, key=lambda x: x.properties.get('eo:cloud_cover', 100))[0]
203
+
204
+ # 6. Ładowanie danych
205
+ # bbox_geo (stopnie) definiuje obszar, crs="EPSG:3857" wymusza format map internetowych
206
+ data = odc.stac.load(
207
+ [best_item],
208
+ bands=["B01", "B02", "B03", "B04", "B05", "B06",
209
+ "B07", "B08", "B8A", "B09", "B11", "B12"],
210
+ bbox=bbox_geo,
211
+ crs="EPSG:3857",
212
+ resolution=10
213
+ )
214
+
215
+ # Mapowanie zmiennych na tablicę numpy
216
+ stacked = np.stack([data[b].values[0] for b in data.data_vars], axis=0)
217
+
218
+ print(f"✅ Pobrano obraz o rozmiarze: {stacked.shape}")
219
+ return stacked, best_item.datetime.strftime('%Y-%m-%d'), best_item.id
220
+
221
+ # def download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back):
222
+ # """Pobiera dane satelitarne uwzględniając parametry przekazane z backendu."""
223
+ # import planetary_computer
224
+ # import pystac_client
225
+ # import odc.stac
226
+ #
227
+ # print(f"🛰️ Pobieranie danych dla: {lat:.4f}, {lon:.4f} (Buffer: {buffer_km}km, Chmury: <{max_cloud_cover}%)")
228
+ #
229
+ # delta = buffer_km * 0.01
230
+ # bbox = [lon - delta, lat - delta, lon + delta, lat + delta]
231
+ #
232
+ # end_date = datetime.now()
233
+ # start_date = end_date - timedelta(days=days_back)
234
+ # date_range = f"{start_date.strftime('%Y-%m-%d')}/{end_date.strftime('%Y-%m-%d')}"
235
+ #
236
+ # catalog = pystac_client.Client.open(
237
+ # "https://planetarycomputer.microsoft.com/api/stac/v1",
238
+ # modifier=planetary_computer.sign_inplace
239
+ # )
240
+ #
241
+ # search = catalog.search(
242
+ # collections=["sentinel-2-l2a"],
243
+ # bbox=bbox,
244
+ # datetime=date_range,
245
+ # query={"eo:cloud_cover": {"lt": max_cloud_cover}}
246
+ # )
247
+ #
248
+ # items = list(search.items())
249
+ # if not items:
250
+ # print("❌ Brak danych spełniających kryteria")
251
+ # return None
252
+ #
253
+ # items_sorted = sorted(items, key=lambda x: x.properties.get('eo:cloud_cover', 100))
254
+ # best_item = items_sorted[0]
255
+ # date = best_item.datetime.strftime('%Y-%m-%d')
256
+ # cloud = best_item.properties.get('eo:cloud_cover', 0)
257
+ #
258
+ # print(f"📅 Znaleziono zdjęcie: {date} (chmury: {cloud:.1f}%)")
259
+ # print(f" 📌 ID sceny: {best_item.id}")
260
+ # print(f" 📦 BBOX żądany: {bbox}")
261
+ #
262
+ # bands = ["B01", "B02", "B03", "B04", "B05", "B06",
263
+ # "B07", "B08", "B8A", "B09", "B11", "B12"]
264
+ #
265
+ # data = odc.stac.load([best_item], bands=bands, bbox=bbox, resolution=10)
266
+ # stacked = np.stack([data[b].values[0] for b in bands], axis=0)
267
+ #
268
+ # print(f" 📐 Rozmiar danych: {stacked.shape}")
269
+ # return stacked, date, best_item.id
270
+
271
+ def prepare_input(data_12ch):
272
+ tensor = torch.from_numpy(data_12ch.astype(np.float32))
273
+ tensor = torch.nan_to_num(tensor, nan=0.0)
274
+
275
+ if NORMALIZATION_MODE == "offset":
276
+ tensor = tensor - 1000.0
277
+
278
+ tensor = torch.clamp(tensor, min=0)
279
+
280
+ h, w = tensor.shape[1], tensor.shape[2]
281
+ min_dim = min(h, w)
282
+
283
+ transform = T.Compose([
284
+ T.CenterCrop(min_dim),
285
+ T.Resize((TARGET_SIZE, TARGET_SIZE), antialias=True)
286
+ ])
287
+
288
+ return transform(tensor).unsqueeze(0)
289
+
290
+ def run_inference(model, input_tensor):
291
+ print(f"🔄 Uruchamianie modelu AI...")
292
+ with torch.no_grad():
293
+ output = model(
294
+ {"S2L2A": input_tensor.to(device)},
295
+ verbose=False,
296
+ timesteps=TIMESTEPS
297
+ )
298
+ return output["LULC"].detach()
299
+
300
+ def decode_output(lulc_tensor):
301
+ if lulc_tensor.ndim == 4 and lulc_tensor.shape[1] > 1:
302
+ class_indices = lulc_tensor.argmax(dim=1)[0].cpu().numpy()
303
+ if class_indices.max() <= 11:
304
+ class_map = np.vectorize(lambda x: INDEX_TO_ESA.get(x, 0))(class_indices)
305
+ else:
306
+ class_map = class_indices
307
+ else:
308
+ class_map = lulc_tensor[0, 0].cpu().numpy().astype(int)
309
+ return class_map
310
+
311
+ def calculate_spectral_indices(input_tensor):
312
+ blue = input_tensor[0, 1].cpu().numpy() / 10000.0
313
+ green = input_tensor[0, 2].cpu().numpy() / 10000.0
314
+ red = input_tensor[0, 3].cpu().numpy() / 10000.0
315
+ nir = input_tensor[0, 7].cpu().numpy() / 10000.0
316
+ swir1 = input_tensor[0, 10].cpu().numpy() / 10000.0
317
+ swir2 = input_tensor[0, 11].cpu().numpy() / 10000.0
318
+
319
+ eps = 1e-8
320
+ indices = {}
321
+ indices['ndwi'] = (green - nir) / (green + nir + eps)
322
+ indices['mndwi'] = (green - swir1) / (green + swir1 + eps)
323
+ indices['awei'] = 4 * (green - swir1) - (0.25 * nir + 2.75 * swir2)
324
+ indices['ndvi'] = (nir - red) / (nir + red + eps)
325
+ indices['evi'] = 2.5 * (nir - red) / (nir + 6 * red - 7.5 * blue + 1 + eps)
326
+ indices['ndbi'] = (swir1 - nir) / (swir1 + nir + eps)
327
+ indices['bsi'] = ((swir1 + red) - (nir + blue)) / ((swir1 + red) + (nir + blue) + eps)
328
+
329
+ return indices
330
+
331
+ # 🆕 Funkcja generująca maski dla każdego wskaźnika
332
+ def generate_index_masks(indices):
333
+ """
334
+ Tworzy binarne maski dla każdego wskaźnika spektralnego.
335
+ Zwraca dict z maskami jako numpy arrays (True/False).
336
+ """
337
+ masks = {}
338
+
339
+ # Maska wody (NDWI)
340
+ masks['water_ndwi'] = indices['ndwi'] > NDWI_THRESHOLD
341
+
342
+ # Maska wody (MNDWI)
343
+ masks['water_mndwi'] = indices['mndwi'] > MNDWI_THRESHOLD
344
+
345
+ # Maska wody (AWEI)
346
+ masks['water_awei'] = indices['awei'] > 0
347
+
348
+ # Maska roślinności (NDVI)
349
+ masks['vegetation_ndvi'] = indices['ndvi'] > NDVI_THRESHOLD
350
+
351
+ # Maska roślinności (EVI)
352
+ masks['vegetation_evi'] = indices['evi'] > 0.3
353
+
354
+ # Maska zabudowy (NDBI)
355
+ masks['buildings_ndbi'] = (indices['ndbi'] > NDBI_THRESHOLD) & (indices['ndvi'] < 0.2)
356
+
357
+ # Maska gołego gruntu (BSI)
358
+ masks['baresoil_bsi'] = (indices['bsi'] > BSI_THRESHOLD) & (indices['ndvi'] < 0.1)
359
+
360
+ return masks
361
+
362
+ def apply_hybrid_corrections(class_map, indices):
363
+ """Zwraca mapę z korektami ORAZ mapy pośrednie."""
364
+ hybrid_map = class_map.copy()
365
+ correction_layers = {}
366
+
367
+ if USE_WATER_CORRECTION:
368
+ water_mask = ((indices['ndwi'] > NDWI_THRESHOLD) | (indices['mndwi'] > MNDWI_THRESHOLD) | (indices['awei'] > 0))
369
+ already_water = (class_map == 80) | (class_map == 90)
370
+ correction_layers['water'] = water_mask & ~already_water
371
+ hybrid_map[correction_layers['water']] = 80
372
+
373
+ if USE_VEGETATION_CORRECTION:
374
+ strong_vegetation = (indices['ndvi'] > 0.5) & (indices['evi'] > 0.3)
375
+ not_water = (hybrid_map != 80) & (hybrid_map != 90)
376
+ correction_layers['vegetation'] = strong_vegetation & not_water & (hybrid_map != 10)
377
+ hybrid_map[correction_layers['vegetation']] = 10
378
+
379
+ if USE_BUILDING_CORRECTION:
380
+ building_mask = ((indices['ndbi'] > NDBI_THRESHOLD) & (indices['ndvi'] < 0.2) & (indices['ndwi'] < 0))
381
+ can_be_building = (hybrid_map != 80) & (hybrid_map != 10)
382
+ correction_layers['buildings'] = building_mask & can_be_building & (hybrid_map != 50)
383
+ hybrid_map[correction_layers['buildings']] = 50
384
+
385
+ if USE_BARE_SOIL_CORRECTION:
386
+ bare_mask = ((indices['bsi'] > BSI_THRESHOLD) & (indices['ndvi'] < 0.1) & (indices['ndwi'] < 0) & (indices['ndbi'] < 0.1))
387
+ can_be_bare = (hybrid_map != 80) & (hybrid_map != 10) & (hybrid_map != 50)
388
+ correction_layers['baresoil'] = bare_mask & can_be_bare & (hybrid_map != 60)
389
+ hybrid_map[correction_layers['baresoil']] = 60
390
+
391
+ return hybrid_map, correction_layers
392
+
393
+ # =========================================
394
+ # 🆕 FUNKCJE WIZUALIZACJI MASEK
395
+ # =========================================
396
+
397
+ def create_rgb_image(input_tensor, brightness=BRIGHTNESS_BOOST):
398
+ """Tworzy obraz RGB z tensora wejściowego."""
399
+ red = input_tensor[0, 3].cpu().numpy()
400
+ green = input_tensor[0, 2].cpu().numpy()
401
+ blue = input_tensor[0, 1].cpu().numpy()
402
+
403
+ rgb = np.stack([red, green, blue], axis=-1)
404
+
405
+ if NORMALIZATION_MODE == "offset":
406
+ rgb = rgb + 1000.0
407
+
408
+ rgb = rgb / 10000.0 * brightness
409
+ rgb = np.clip(rgb, 0, 1)
410
+ rgb_uint8 = (rgb * 255).astype(np.uint8)
411
+
412
+ return rgb_uint8
413
+
414
+ def create_segmentation_image(class_map):
415
+ """Tworzy kolorową mapę segmentacji."""
416
+ h, w = class_map.shape
417
+ rgb = np.zeros((h, w, 3), dtype=np.uint8)
418
+
419
+ for class_id, color in ESA_COLORS.items():
420
+ mask = class_map == class_id
421
+ rgb[mask] = color
422
+
423
+ return rgb
424
+
425
+ # def create_segmentation_image(class_map, alpha=180):
426
+ # """
427
+ # Tworzy kolorową mapę segmentacji z kanałem alfa.
428
+ # alpha: 0 (całkowicie przezroczysty) do 255 (nieprzezroczysty)
429
+ # """
430
+ # h, w = class_map.shape
431
+ # # Tworzymy tablicę RGBA (4 kanały)
432
+ # rgba = np.zeros((h, w, 4), dtype=np.uint8)
433
+ #
434
+ # for class_id, color in ESA_COLORS.items():
435
+ # mask = class_map == class_id
436
+ # rgba[mask, :3] = color # Kopiujemy R, G, B
437
+ # rgba[mask, 3] = alpha # Ustawiamy przezroczystość dla tej klasy
438
+ #
439
+ # return rgba
440
+
441
+ def create_mask_visualization(mask, color=[255, 0, 0]):
442
+ """
443
+ Tworzy wizualizację maski binarnej.
444
+
445
+ Args:
446
+ mask: numpy array bool (True/False)
447
+ color: kolor dla True pikseli [R, G, B]
448
+
449
+ Returns:
450
+ numpy array RGB (transparentne tło, kolorowa maska)
451
+ """
452
+ h, w = mask.shape
453
+ rgb = np.ones((h, w, 3), dtype=np.uint8) * 255 # Białe tło
454
+
455
+ rgb[mask] = color # Zamaluj maskę kolorem
456
+ rgb[~mask] = [240, 240, 240] # Szare tło dla lepszej widoczności
457
+
458
+ return rgb
459
+
460
+ # def create_mask_visualization(mask, color=[255, 0, 0], alpha=180):
461
+ # """
462
+ # Tworzy wizualizację maski binarnej na przezroczystym tle.
463
+ # """
464
+ # h, w = mask.shape
465
+ # # 4 kanały: R, G, B, A
466
+ # rgba = np.zeros((h, w, 4), dtype=np.uint8)
467
+ #
468
+ # # Piksele należące do maski otrzymują kolor i wybraną przezroczystość
469
+ # rgba[mask, :3] = color
470
+ # rgba[mask, 3] = alpha
471
+ #
472
+ # # Piksele poza maską (~mask) mają już 0 w kanale alfa dzięki np.zeros,
473
+ # # więc są całkowicie przezroczyste.
474
+ #
475
+ # return rgba
476
+
477
+ def calculate_class_percentages(class_map):
478
+ """Oblicza procentowy udział każdej klasy."""
479
+ total_pixels = class_map.size
480
+ percentages = {}
481
+
482
+ for class_id, class_name in ESA_CLASSES.items():
483
+ count = np.sum(class_map == class_id)
484
+ if count > 0:
485
+ percentages[class_id] = {
486
+ 'name': class_name,
487
+ 'count': int(count),
488
+ 'percentage': round(count / total_pixels * 100, 2)
489
+ }
490
+
491
+ return percentages
492
+
493
+ def image_to_base64(image_array):
494
+ """Konwertuje numpy array do base64 string (dla frontendu)."""
495
+ img = Image.fromarray(image_array)
496
+ buffer = BytesIO()
497
+ img.save(buffer, format='PNG')
498
+ return base64.b64encode(buffer.getvalue()).decode('utf-8')
499
+
500
+ # =========================================
501
+ # GŁÓWNA FUNKCJA ANALITYCZNA
502
+ # =========================================
503
+
504
+ def analyze(location_data, buffer_km=DEFAULT_BUFFER_KM, max_cloud_cover=DEFAULT_MAX_CLOUD_COVER,
505
+ days_back=DEFAULT_DAYS_BACK, show_visualization=False, save_files=False, model_name="terramind_v1_large_generate"):
506
+ """
507
+ Główna funkcja wywoływana przez Flask.
508
+
509
+ Returns:
510
+ dict z wszystkimi obrazami (RGB, surowy model, maski wskaźników, finał)
511
+ """
512
+
513
+ lat, lon = None, None
514
+ title = "Unknown"
515
+
516
+ # 1. Rozpoznanie typu danych
517
+ if isinstance(location_data, list) or isinstance(location_data, tuple):
518
+ lat, lon = location_data
519
+ title = f"{lat:.4f}N, {lon:.4f}E"
520
+ elif isinstance(location_data, str):
521
+ coords = get_coordinates_from_name(location_data)
522
+ if not coords:
523
+ print("❌ Nie znaleziono współrzędnych dla podanej nazwy.")
524
+ return None
525
+ lat, lon = coords
526
+ title = location_data
527
+ else:
528
+ print("❌ Nieprawidłowy format lokalizacji")
529
+ return None
530
+
531
+ print(f"\n{'='*60}")
532
+ print(f"🌍 ROZPOCZĘCIE ANALIZY: {title}")
533
+ print(f"{'='*60}")
534
+ print(f"📍 Współrzędne: {lat:.6f}, {lon:.6f}")
535
+ print(f"📏 Promień: {buffer_km} km")
536
+ print(f"☁️ Max zachmurzenie: {max_cloud_cover}%")
537
+ print(f"📅 Dni wstecz: {days_back}")
538
+ print(f"{'='*60}\n")
539
+
540
+ # 2. Pobranie danych
541
+ result = download_sentinel2(lat, lon, buffer_km, max_cloud_cover, days_back)
542
+ if result is None:
543
+ return None
544
+ data, date, scene_id = result
545
+
546
+ # 3. Przetwarzanie i AI
547
+ input_tensor = prepare_input(data)
548
+ print(f"🔢 Rozmiar tensora wejściowego: {input_tensor.shape}")
549
+
550
+ model = get_model(model_name)
551
+ lulc_output = run_inference(model, input_tensor)
552
+
553
+ # 4. Dekodowanie (SUROWY model - bez korekcji)
554
+ class_map_raw = decode_output(lulc_output)
555
+
556
+ # 5. Oblicz wskaźniki spektralne
557
+ indices = calculate_spectral_indices(input_tensor)
558
+
559
+ # 6. Generuj maski dla wskaźników
560
+ index_masks = generate_index_masks(indices)
561
+
562
+ # 7. Zastosuj korekty (finalna mapa)
563
+ class_map_final, correction_layers = apply_hybrid_corrections(class_map_raw, indices)
564
+
565
+ # =========================================
566
+ # 🎨 GENEROWANIE WSZYSTKICH OBRAZÓW
567
+ # =========================================
568
+
569
+ print("🎨 Generowanie wizualizacji...")
570
+
571
+ # 1. RGB (satelitarny)
572
+ rgb_image = create_rgb_image(input_tensor)
573
+
574
+ # 2. Surowy TerraMind (bez korekcji)
575
+ raw_segmentation = create_segmentation_image(class_map_raw)
576
+
577
+ # 3. Finalna segmentacja (z korektami)
578
+ final_segmentation = create_segmentation_image(class_map_final)
579
+
580
+ # 4. Maski wskaźników
581
+ mask_images = {}
582
+ mask_colors = {
583
+ 'water_ndwi': [0, 150, 255], # Niebieski
584
+ 'water_mndwi': [0, 100, 200], # Ciemny niebieski
585
+ 'water_awei': [100, 200, 255], # Jasny niebieski
586
+ 'vegetation_ndvi': [0, 150, 0], # Zielony
587
+ 'vegetation_evi': [50, 200, 50], # Jasny zielony
588
+ 'buildings_ndbi': [255, 0, 0], # Czerwony
589
+ 'baresoil_bsi': [180, 140, 100], # Brązowy
590
+ }
591
+
592
+ for mask_name, mask in index_masks.items():
593
+ color = mask_colors.get(mask_name, [128, 128, 128])
594
+ mask_images[mask_name] = create_mask_visualization(mask, color)
595
+
596
+ # 5. Statystyki
597
+ statistics = calculate_class_percentages(class_map_final)
598
+
599
+ # =========================================
600
+ # 📦 PRZYGOTOWANIE WYNIKU
601
+ # =========================================
602
+
603
+ frontend_result = {
604
+ 'success': True,
605
+ 'lat': lat,
606
+ 'lon': lon,
607
+ 'title': title,
608
+ 'date': date,
609
+ 'scene_id': scene_id,
610
+ 'statistics': statistics,
611
+
612
+ # OBRAZY GŁÓWNE
613
+ 'rgb_base64': image_to_base64(rgb_image),
614
+ 'raw_segmentation_base64': image_to_base64(raw_segmentation),
615
+ 'segmentation_base64': image_to_base64(final_segmentation),
616
+
617
+ # MASKI WSKAŹNIKÓW
618
+ 'masks': {
619
+ mask_name: image_to_base64(mask_img)
620
+ for mask_name, mask_img in mask_images.items()
621
+ },
622
+
623
+ # Dla kompatybilności z frontendem
624
+ 'class_map': class_map_final.tolist()
625
+ }
626
+
627
+ print("✅ Analiza zakończona sukcesem!")
628
+
629
+ return frontend_result
630
+
631
+ if __name__ == "__main__":
632
+ print("\n" + "="*70)
633
+ print("🧪 TRYB TESTOWY - terramindFunctions.py")
634
+ print("="*70)
635
+
636
+ result = analyze([50.0540, 19.9352], buffer_km=3, max_cloud_cover=10, days_back=60)
637
+
638
+ if result:
639
+ print("\n" + "="*70)
640
+ print("📦 WYGENEROWANE OBRAZY:")
641
+ print("="*70)
642
+ print(f" ✅ RGB: {len(result['rgb_base64'])} chars")
643
+ print(f" ✅ Surowy TerraMind: {len(result['raw_segmentation_base64'])} chars")
644
+ print(f" ✅ Finalna segmentacja: {len(result['segmentation_base64'])} chars")
645
+ print(f" ✅ Maski wskaźników: {len(result['masks'])} sztuk")
646
+ for mask_name in result['masks'].keys():
647
+ print(f" - {mask_name}")
648
+ print("="*70)
io-app-front/.env ADDED
@@ -0,0 +1 @@
 
 
1
+ VITE_MAPBOX_ACCESS_TOKEN = pk.eyJ1IjoiamFucGllY2hvdGEiLCJhIjoiY21rd2o3eGhkMDBoaDNlcXltdXRrcHl0MyJ9.C93gECPA4gRjY2-IJJrGOA
io-app-front/.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ dist
12
+ dist-ssr
13
+ *.local
14
+
15
+ # Editor directories and files
16
+ .vscode/*
17
+ !.vscode/extensions.json
18
+ .idea
19
+ .DS_Store
20
+ *.suo
21
+ *.ntvs*
22
+ *.njsproj
23
+ *.sln
24
+ *.sw?
io-app-front/README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # React + TypeScript + Vite
2
+
3
+ This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
4
+
5
+ Currently, two official plugins are available:
6
+
7
+ - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
8
+ - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
9
+
10
+ ## React Compiler
11
+
12
+ The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
13
+
14
+ ## Expanding the ESLint configuration
15
+
16
+ If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
17
+
18
+ ```js
19
+ export default defineConfig([
20
+ globalIgnores(['dist']),
21
+ {
22
+ files: ['**/*.{ts,tsx}'],
23
+ extends: [
24
+ // Other configs...
25
+
26
+ // Remove tseslint.configs.recommended and replace with this
27
+ tseslint.configs.recommendedTypeChecked,
28
+ // Alternatively, use this for stricter rules
29
+ tseslint.configs.strictTypeChecked,
30
+ // Optionally, add this for stylistic rules
31
+ tseslint.configs.stylisticTypeChecked,
32
+
33
+ // Other configs...
34
+ ],
35
+ languageOptions: {
36
+ parserOptions: {
37
+ project: ['./tsconfig.node.json', './tsconfig.app.json'],
38
+ tsconfigRootDir: import.meta.dirname,
39
+ },
40
+ // other options...
41
+ },
42
+ },
43
+ ])
44
+ ```
45
+
46
+ You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
47
+
48
+ ```js
49
+ // eslint.config.js
50
+ import reactX from 'eslint-plugin-react-x'
51
+ import reactDom from 'eslint-plugin-react-dom'
52
+
53
+ export default defineConfig([
54
+ globalIgnores(['dist']),
55
+ {
56
+ files: ['**/*.{ts,tsx}'],
57
+ extends: [
58
+ // Other configs...
59
+ // Enable lint rules for React
60
+ reactX.configs['recommended-typescript'],
61
+ // Enable lint rules for React DOM
62
+ reactDom.configs.recommended,
63
+ ],
64
+ languageOptions: {
65
+ parserOptions: {
66
+ project: ['./tsconfig.node.json', './tsconfig.app.json'],
67
+ tsconfigRootDir: import.meta.dirname,
68
+ },
69
+ // other options...
70
+ },
71
+ },
72
+ ])
73
+ ```
io-app-front/eslint.config.js ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import js from '@eslint/js'
2
+ import globals from 'globals'
3
+ import reactHooks from 'eslint-plugin-react-hooks'
4
+ import reactRefresh from 'eslint-plugin-react-refresh'
5
+ import tseslint from 'typescript-eslint'
6
+ import { defineConfig, globalIgnores } from 'eslint/config'
7
+
8
+ export default defineConfig([
9
+ globalIgnores(['dist']),
10
+ {
11
+ files: ['**/*.{ts,tsx}'],
12
+ extends: [
13
+ js.configs.recommended,
14
+ tseslint.configs.recommended,
15
+ reactHooks.configs.flat.recommended,
16
+ reactRefresh.configs.vite,
17
+ ],
18
+ languageOptions: {
19
+ ecmaVersion: 2020,
20
+ globals: globals.browser,
21
+ },
22
+ },
23
+ ])
io-app-front/index.html ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>io-app-front</title>
8
+ </head>
9
+ <body>
10
+ <div id="root"></div>
11
+ <script type="module" src="/src/main.jsx"></script>
12
+ </body>
13
+ </html>
io-app-front/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
io-app-front/package.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "io-app-front",
3
+ "private": true,
4
+ "version": "0.0.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "dev": "vite",
8
+ "build": "tsc -b && vite build",
9
+ "lint": "eslint .",
10
+ "preview": "vite preview"
11
+ },
12
+ "dependencies": {
13
+ "@emotion/react": "^11.14.0",
14
+ "@mantine/core": "^8.3.13",
15
+ "@mantine/hooks": "^8.3.13",
16
+ "@tabler/icons-react": "^3.36.1",
17
+ "axios": "^1.13.2",
18
+ "dotenv": "^17.2.3",
19
+ "leaflet": "^1.9.4",
20
+ "mapbox-gl": "^3.18.1",
21
+ "react": "^19.2.0",
22
+ "react-dom": "^19.2.0",
23
+ "react-leaflet": "^5.0.0"
24
+ },
25
+ "devDependencies": {
26
+ "@eslint/js": "^9.39.1",
27
+ "@types/node": "^24.10.1",
28
+ "@types/react": "^19.2.5",
29
+ "@types/react-dom": "^19.2.3",
30
+ "@vitejs/plugin-react": "^5.1.1",
31
+ "eslint": "^9.39.1",
32
+ "eslint-plugin-react-hooks": "^7.0.1",
33
+ "eslint-plugin-react-refresh": "^0.4.24",
34
+ "globals": "^16.5.0",
35
+ "typescript": "~5.9.3",
36
+ "typescript-eslint": "^8.46.4",
37
+ "vite": "^7.2.4"
38
+ }
39
+ }
io-app-front/public/vite.svg ADDED
io-app-front/src/App.css ADDED
@@ -0,0 +1 @@
 
 
1
+
io-app-front/src/App.jsx ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from "react";
2
+ import Sidebar from './components/Sidebar'
3
+ import MapView from './components/MapView'
4
+ import { MantineProvider } from '@mantine/core'
5
+ import '@mantine/core/styles.css'
6
+
7
+ function App() {
8
+ const [selectedLocation, setSelectedLocation] = useState(null);
9
+ const [analysisResult, setAnalysisResult] = useState(null);
10
+ const [layersConfig, setLayersConfig] = useState({
11
+ firstLayer: 'rgb', // Domyślnie satelita
12
+ secondLayer: 'image', // Domyślnie klasyfikacja LULC
13
+ opacity: 0.5
14
+ });
15
+
16
+ const handleAnalysisComplete = (data) => {
17
+ console.log("✅ App: Odebrano wyniki z Sidebaru", data);
18
+ setAnalysisResult(data);
19
+ setSelectedLocation(null);
20
+ };
21
+
22
+ const handleMapClick = (coords) => {
23
+ setSelectedLocation(coords);
24
+ setAnalysisResult(null);
25
+ console.log('lng: ', coords.lng);
26
+ console.log('lat: ', coords.lat);
27
+ }
28
+
29
+ const handleLocationSelect = (locationData) => {
30
+ console.log('🔍 Wybrano z wyszukiwarki:', locationData);
31
+ setSelectedLocation(locationData);
32
+ setAnalysisResult(null);
33
+ };
34
+
35
+ return (
36
+ <MantineProvider forceColorScheme='dark'>
37
+ <Sidebar
38
+ selectedLocation={selectedLocation}
39
+ onAnalysisComplete={handleAnalysisComplete}
40
+ onLocationSelect={handleLocationSelect}
41
+ layersConfig={layersConfig}
42
+ onLayersChange={setLayersConfig}
43
+ analysisResult={analysisResult}
44
+ />
45
+ <MapView
46
+ onMapClick={handleMapClick}
47
+ analysisResult={analysisResult}
48
+ selectedLocation={selectedLocation}
49
+ layersConfig={layersConfig}
50
+ />
51
+ </MantineProvider>
52
+ )
53
+ }
54
+
55
+ export default App
io-app-front/src/assets/react.svg ADDED
io-app-front/src/components/CompareModal.jsx ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Modal, Select, Button, Group, Stack, Text, Center, Loader } from '@mantine/core';
2
+ import { useState } from 'react';
3
+ import classes from './CompareModal.module.css';
4
+
5
+ const MASK_INFO = {
6
+ water_ndwi: { name: "NDWI - Water", description: "Open water bodies", color: "#1971c2", formula: "(G - NIR)/(G + NIR)" },
7
+ water_mndwi: { name: "MNDWI - Urban Water", description: "Water in urban areas", color: "#1864ab", formula: "(G - SWIR)/(G + SWIR)" },
8
+ water_awei: { name: "AWEI - Automated", description: "Shadow suppression", color: "#0b7285", formula: "4(G-SWIR)-(0.25NIR+2.75SWIR2)" },
9
+ vegetation_ndvi: { name: "NDVI - Vegetation", description: "Plant health", color: "#2f9e44", formula: "(NIR - R)/(NIR + R)" },
10
+ vegetation_evi: { name: "EVI - Enhanced Veg", description: "Dense canopy", color: "#5c940d", formula: "2.5(NIR-R)/(NIR+6R-7.5B+1)" },
11
+ buildings_ndbi: { name: "NDBI - Built-up", description: "Urban structures", color: "#c92a2a", formula: "(SWIR - NIR)/(SWIR + NIR)" },
12
+ baresoil_bsi: { name: "BSI - Bare Soil", description: "Soil detection", color: "#d9480f", formula: "((SWIR+R)-(NIR+B))/((SWIR+R)+(NIR+B))" }
13
+ };
14
+
15
+ export default function CompareModal({ opened, onClose, onRunCompare, isLoading, results }) {
16
+ const [modelA, setModelA] = useState('terramind_v1_small_generate');
17
+ const [modelB, setModelB] = useState('terramind_v1_large_generate');
18
+
19
+ const modelOptions = [
20
+ { value: 'terramind_v1_tiny_generate', label: 'Terramind v1 Tiny' },
21
+ { value: 'terramind_v1_small_generate', label: 'Terramind v1 Small' },
22
+ { value: 'terramind_v1_large_generate', label: 'Terramind v1 Large' },
23
+ ];
24
+
25
+ const getModelLabel = (model) => {
26
+ const parts = model.split('_');
27
+ return parts[2] ? parts[2].toUpperCase() + " Model" : model;
28
+ };
29
+
30
+ return (
31
+ <Modal
32
+ opened={opened}
33
+ onClose={onClose}
34
+ title="ADVANCED ANALYSIS"
35
+ size="100%"
36
+ centered
37
+ classNames={{ content: classes.modalContent, header: classes.modalHeader, body: classes.modalBody }}
38
+ overlayProps={{ backgroundOpacity: 0.85, blur: 12 }}
39
+ >
40
+ <Stack gap="xl" h="100%">
41
+
42
+ {/* --- STAN: WYBÓR MODELI --- */}
43
+ {!results && (
44
+ <Center h={500}>
45
+ <Stack align="center" gap="xl" w="100%" maw={600}>
46
+ <Text size="lg" fw={500} c="dimmed" lts={1}>SELECT MODELS TO COMPARE</Text>
47
+ <Group grow w="100%">
48
+ <Select label="Model A" data={modelOptions} value={modelA} onChange={setModelA} disabled={isLoading} />
49
+ <Select label="Model B" data={modelOptions} value={modelB} onChange={setModelB} disabled={isLoading} />
50
+ </Group>
51
+ <Button fullWidth size="lg" color="blue" onClick={() => onRunCompare(modelA, modelB)} loading={isLoading}>
52
+ START ANALYSIS
53
+ </Button>
54
+ </Stack>
55
+ </Center>
56
+ )}
57
+
58
+ {/* --- STAN: WYNIKI (2 KOLUMNY) --- */}
59
+ {results && !isLoading && (
60
+ <div className={classes.dualLayout}>
61
+
62
+ {/* LEWA KOLUMNA */}
63
+ <div className={classes.modelColumn}>
64
+ <div className={classes.columnHeader}>{getModelLabel(modelA)}</div>
65
+
66
+ <div className={classes.imagesGrid}>
67
+ {/* 1. RGB */}
68
+ <ImageCard
69
+ src={results.modelA.rgb}
70
+ title="Satellite RGB"
71
+ subtitle="Sentinel-2 Source"
72
+ borderColor="#1971c2"
73
+ />
74
+ {/* 2. Raw */}
75
+ <ImageCard
76
+ src={results.modelA.raw_segmentation}
77
+ title="Raw Output"
78
+ subtitle="Raw Model"
79
+ borderColor="#f08c00"
80
+ />
81
+ {/* 3. Final */}
82
+ <ImageCard
83
+ src={results.modelA.image}
84
+ title="Final Segmentation"
85
+ subtitle="Model Prediction"
86
+ borderColor="#2f9e44"
87
+ />
88
+ </div>
89
+ </div>
90
+
91
+ {/* PRAWA KOLUMNA */}
92
+ <div className={classes.modelColumn}>
93
+ <div className={classes.columnHeader}>{getModelLabel(modelB)}</div>
94
+
95
+ <div className={classes.imagesGrid}>
96
+ {/* 1. RGB */}
97
+ <ImageCard
98
+ src={results.modelB.rgb}
99
+ title="Satellite RGB"
100
+ subtitle="Sentinel-2 Source"
101
+ borderColor="#1971c2"
102
+ />
103
+ {/* 2. Raw */}
104
+ <ImageCard
105
+ src={results.modelB.raw_segmentation}
106
+ title="Raw Output"
107
+ subtitle="Reference Model"
108
+ borderColor="#f08c00"
109
+ />
110
+ {/* 3. Final */}
111
+ <ImageCard
112
+ src={results.modelB.image}
113
+ title="Final Segmentation"
114
+ subtitle="Reference Output"
115
+ borderColor="#2f9e44"
116
+ />
117
+ </div>
118
+ </div>
119
+
120
+ {/* MASKI (NA DOLE) */}
121
+ <div className={classes.masksSection}>
122
+ <div className={classes.masksHeader}>Spectral Indices Masks</div>
123
+ <div className={classes.masksGrid}>
124
+ {Object.entries(results.modelA.masks).map(([key, src]) => {
125
+ const info = MASK_INFO[key] || { name: key, color: '#444', formula: 'N/A' };
126
+ return (
127
+ <div key={key} className={classes.maskCard} style={{ borderColor: info.color }}>
128
+ <div className={classes.maskTitleBar} style={{ backgroundColor: info.color }}>
129
+ {info.name}
130
+ </div>
131
+ <img src={src} className={classes.maskImg} alt={info.name} />
132
+ <div className={classes.maskInfo}>
133
+ <div className={classes.maskDesc}>{info.description}</div>
134
+ <div className={classes.maskFormula}>{info.formula}</div>
135
+ </div>
136
+ </div>
137
+ );
138
+ })}
139
+ </div>
140
+ </div>
141
+
142
+ {/* METRYKI */}
143
+ {results.metrics && (
144
+ <div className={classes.metricsSection}>
145
+ <div className={classes.metricsHeader}>📊 Analysis Metrics</div>
146
+
147
+ {/* Główne metryki */}
148
+ <div className={classes.mainMetricsGrid}>
149
+ <MetricCard
150
+ label="Pixel Accuracy"
151
+ value={results.metrics.accuracy?.toFixed(2)}
152
+ unit="%"
153
+ color="#1971c2"
154
+ />
155
+ <MetricCard
156
+ label="mIoU"
157
+ value={results.metrics.miou?.toFixed(2)}
158
+ unit="%"
159
+ color="#2f9e44"
160
+ />
161
+ <MetricCard
162
+ label="Frequency Weighted IoU"
163
+ value={results.metrics.fw_iou?.toFixed(2)}
164
+ unit="%"
165
+ color="#f08c00"
166
+ />
167
+ <MetricCard
168
+ label="Dice Score"
169
+ value={results.metrics.dice?.toFixed(2)}
170
+ unit="%"
171
+ color="#c92a2a"
172
+ />
173
+ <MetricCard
174
+ label="Mean Precision"
175
+ value={results.metrics.mean_precision?.toFixed(2)}
176
+ unit="%"
177
+ color="#7950f2"
178
+ />
179
+ <MetricCard
180
+ label="Mean Recall"
181
+ value={results.metrics.mean_recall?.toFixed(2)}
182
+ unit="%"
183
+ color="#15aabf"
184
+ />
185
+ </div>
186
+
187
+ {/* Metryki dla każdej klasy */}
188
+ {results.metrics.class_details && Object.keys(results.metrics.class_details).length > 0 && (
189
+ <div className={classes.classDetailsSection}>
190
+ <div className={classes.classDetailsHeader}>Per-Class Metrics</div>
191
+ <div className={classes.classDetailsGrid}>
192
+ {Object.entries(results.metrics.class_details).map(([className, metrics]) => (
193
+ <div key={className} className={classes.classMetricCard}>
194
+ <div className={classes.classMetricName}>{className}</div>
195
+ <div className={classes.classMetricValues}>
196
+ <div className={classes.classMetricRow}>
197
+ <span>IoU:</span>
198
+ <span className={classes.metricValue}>{metrics.iou?.toFixed(2)}%</span>
199
+ </div>
200
+ <div className={classes.classMetricRow}>
201
+ <span>Precision:</span>
202
+ <span className={classes.metricValue}>{metrics.precision?.toFixed(2)}%</span>
203
+ </div>
204
+ <div className={classes.classMetricRow}>
205
+ <span>Recall:</span>
206
+ <span className={classes.metricValue}>{metrics.recall?.toFixed(2)}%</span>
207
+ </div>
208
+ </div>
209
+ </div>
210
+ ))}
211
+ </div>
212
+ </div>
213
+ )}
214
+ </div>
215
+ )}
216
+
217
+ </div>
218
+ )}
219
+ </Stack>
220
+ </Modal>
221
+ );
222
+ }
223
+
224
+ // KOMPONENT KARTY (Czysty, z Twoimi stylami ramki)
225
+ function ImageCard({ src, title, subtitle, borderColor }) {
226
+ return (
227
+ <div className={classes.imageCard}>
228
+ <div className={classes.cardTitle} style={{ color: borderColor }}>{title}</div>
229
+ <div className={classes.cardFrame} style={{ borderColor: borderColor }}>
230
+ <img src={src} className={classes.cardImg} alt={title} />
231
+ </div>
232
+ <div className={classes.cardSubtitle}>{subtitle}</div>
233
+ </div>
234
+ );
235
+ }
236
+
237
+ // KOMPONENT KARTY METRYK
238
+ function MetricCard({ label, value, unit, color, highlight = false }) {
239
+ return (
240
+ <div className={classes.metricCard} style={{
241
+ borderColor: color,
242
+ backgroundColor: highlight ? 'rgba(27, 137, 23, 0.1)' : 'rgba(0, 0, 0, 0.2)'
243
+ }}>
244
+ <div className={classes.metricLabel}>{label}</div>
245
+ <div className={classes.metricValueContainer} style={{ color: color }}>
246
+ <span className={classes.metricValueText}>{value}</span>
247
+ <span className={classes.metricUnit}>{unit}</span>
248
+ </div>
249
+ </div>
250
+ );
251
+ }
io-app-front/src/components/CompareModal.module.css ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* --- GŁÓWNY MODAL --- */
2
+ .modalContent {
3
+ /* Ciemne, szklane tło */
4
+ background-color: rgba(15, 15, 18, 0.95) !important;
5
+ backdrop-filter: blur(40px) saturate(180%) !important;
6
+ border: 1px solid rgba(255, 255, 255, 0.08) !important;
7
+ border-radius: 16px !important;
8
+ color: white !important;
9
+ box-shadow: 0 24px 80px rgba(0, 0, 0, 0.8) !important;
10
+ }
11
+
12
+ .modalHeader { background: transparent !important; padding: 24px 32px !important; }
13
+ .modalBody { background: transparent !important; padding: 0 32px 32px 32px !important; }
14
+ .modalTitle { font-size: 1.25rem; font-weight: 700; letter-spacing: 0.5px; color: white; }
15
+
16
+ /* --- UKŁAD DWUKOLUMNOWY (GRID) --- */
17
+ .dualLayout {
18
+ display: grid;
19
+ grid-template-columns: 1fr 1fr; /* Dwie równe kolumny */
20
+ gap: 32px; /* Odstęp między Modelem A i B */
21
+ align-items: start;
22
+ }
23
+
24
+ /* Pojedyncza sekcja modelu (lewa lub prawa) */
25
+ .modelColumn {
26
+ background-color: rgba(255, 255, 255, 0.03); /* Bardzo subtelne tło kolumny */
27
+ border: 1px solid rgba(255, 255, 255, 0.06);
28
+ border-radius: 12px;
29
+ padding: 24px;
30
+ height: 100%;
31
+ }
32
+
33
+ .columnHeader {
34
+ text-align: center;
35
+ font-size: 1.1rem;
36
+ font-weight: 700;
37
+ text-transform: uppercase;
38
+ color: rgba(255, 255, 255, 0.9);
39
+ margin-bottom: 24px;
40
+ padding-bottom: 12px;
41
+ border-bottom: 1px solid rgba(255, 255, 255, 0.1);
42
+ }
43
+
44
+ /* --- GRID OBRAZÓW (Dokładnie jak w Twoim przykładzie) --- */
45
+ .imagesGrid {
46
+ display: grid;
47
+ /* To wymusza minimalną szerokość 280px. */
48
+ grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
49
+ gap: 20px;
50
+ }
51
+
52
+ /* --- KARTA OBRAZU --- */
53
+ .imageCard {
54
+ text-align: center;
55
+ background: rgba(0, 0, 0, 0.2);
56
+ padding: 12px;
57
+ border-radius: 10px;
58
+ transition: transform 0.2s ease;
59
+ }
60
+
61
+ .imageCard:hover {
62
+ background: rgba(255, 255, 255, 0.03);
63
+ }
64
+
65
+ .cardTitle {
66
+ font-size: 0.85rem;
67
+ font-weight: 600;
68
+ text-transform: uppercase;
69
+ letter-spacing: 0.5px;
70
+ margin-bottom: 10px;
71
+ }
72
+
73
+ /* Ramka obrazka */
74
+ .cardFrame {
75
+ width: 100%;
76
+ aspect-ratio: 1 / 1; /* Idealny kwadrat */
77
+ background-color: #000;
78
+ border-radius: 8px;
79
+ border-width: 2px;
80
+ border-style: solid; /* Kolor z propsa */
81
+ overflow: hidden;
82
+ box-shadow: 0 4px 20px rgba(0,0,0,0.4);
83
+ }
84
+
85
+ .cardImg {
86
+ width: 100%;
87
+ height: 100%;
88
+ object-fit: contain;
89
+ display: block;
90
+ }
91
+
92
+ .cardSubtitle {
93
+ margin-top: 8px;
94
+ font-size: 0.75rem;
95
+ color: rgba(255, 255, 255, 0.5);
96
+ }
97
+
98
+ /* --- SEKCJA MASEK (Dół) --- */
99
+ .masksSection {
100
+ grid-column: 1 / -1; /* Rozciągnij na obie kolumny */
101
+ background-color: rgba(255, 255, 255, 0.02);
102
+ border: 1px solid rgba(255, 255, 255, 0.06);
103
+ border-radius: 12px;
104
+ padding: 24px;
105
+ margin-top: 10px;
106
+ }
107
+
108
+ .masksHeader {
109
+ text-align: center;
110
+ font-size: 1rem;
111
+ font-weight: 600;
112
+ color: rgba(255, 255, 255, 0.8);
113
+ margin-bottom: 20px;
114
+ text-transform: uppercase;
115
+ }
116
+
117
+ /* Grid masek (Twoje 250px) */
118
+ .masksGrid {
119
+ display: grid;
120
+ grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
121
+ gap: 16px;
122
+ }
123
+
124
+ .maskCard {
125
+ border: 1px solid; /* Kolor z propsa */
126
+ border-radius: 8px;
127
+ overflow: hidden;
128
+ background: rgba(0, 0, 0, 0.3);
129
+ display: flex;
130
+ flex-direction: column;
131
+ }
132
+
133
+ .maskTitleBar {
134
+ padding: 8px 12px;
135
+ font-size: 0.8rem;
136
+ font-weight: 700;
137
+ color: white; /* Tło z propsa */
138
+ }
139
+
140
+ .maskImg {
141
+ width: 100%;
142
+ aspect-ratio: 1 / 1;
143
+ object-fit: contain;
144
+ background: #000;
145
+ }
146
+
147
+ .maskInfo {
148
+ padding: 10px;
149
+ }
150
+
151
+ .maskDesc {
152
+ font-size: 0.75rem;
153
+ color: rgba(255, 255, 255, 0.6);
154
+ margin-bottom: 6px;
155
+ }
156
+
157
+ .maskFormula {
158
+ font-family: monospace;
159
+ font-size: 0.7rem;
160
+ color: #4dabf7;
161
+ background: rgba(255, 255, 255, 0.05);
162
+ padding: 4px;
163
+ border-radius: 4px;
164
+ display: inline-block;
165
+ }
166
+
167
+ /* --- SEKCJA METRYK --- */
168
+ .metricsSection {
169
+ grid-column: 1 / -1; /* Rozciągnij na obie kolumny */
170
+ background-color: rgba(255, 255, 255, 0.02);
171
+ border: 1px solid rgba(255, 255, 255, 0.06);
172
+ border-radius: 12px;
173
+ padding: 24px;
174
+ margin-top: 20px;
175
+ }
176
+
177
+ .metricsHeader {
178
+ text-align: center;
179
+ font-size: 1rem;
180
+ font-weight: 600;
181
+ color: rgba(255, 255, 255, 0.8);
182
+ margin-bottom: 20px;
183
+ text-transform: uppercase;
184
+ letter-spacing: 0.5px;
185
+ }
186
+
187
+ /* Grid głównych metryk */
188
+ .mainMetricsGrid {
189
+ display: grid;
190
+ grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
191
+ gap: 12px;
192
+ margin-bottom: 24px;
193
+ }
194
+
195
+ .metricCard {
196
+ border: 1px solid;
197
+ border-radius: 8px;
198
+ padding: 16px;
199
+ background: rgba(0, 0, 0, 0.2);
200
+ text-align: center;
201
+ transition: all 0.2s ease;
202
+ }
203
+
204
+ .metricCard:hover {
205
+ background: rgba(255, 255, 255, 0.05);
206
+ transform: translateY(-2px);
207
+ }
208
+
209
+ .metricLabel {
210
+ font-size: 0.75rem;
211
+ color: rgba(255, 255, 255, 0.6);
212
+ text-transform: uppercase;
213
+ letter-spacing: 0.3px;
214
+ margin-bottom: 8px;
215
+ font-weight: 500;
216
+ }
217
+
218
+ .metricValueContainer {
219
+ display: flex;
220
+ justify-content: center;
221
+ align-items: baseline;
222
+ gap: 4px;
223
+ }
224
+
225
+ .metricValueText {
226
+ font-size: 1.5rem;
227
+ font-weight: 700;
228
+ }
229
+
230
+ .metricUnit {
231
+ font-size: 0.8rem;
232
+ font-weight: 500;
233
+ }
234
+
235
+ /* --- SEKCJA METRYK PER-CLASS --- */
236
+ .classDetailsSection {
237
+ margin-top: 24px;
238
+ border-top: 1px solid rgba(255, 255, 255, 0.06);
239
+ padding-top: 20px;
240
+ }
241
+
242
+ .classDetailsHeader {
243
+ font-size: 0.85rem;
244
+ font-weight: 600;
245
+ color: rgba(255, 255, 255, 0.7);
246
+ text-transform: uppercase;
247
+ letter-spacing: 0.3px;
248
+ margin-bottom: 16px;
249
+ }
250
+
251
+ .classDetailsGrid {
252
+ display: grid;
253
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
254
+ gap: 12px;
255
+ }
256
+
257
+ .classMetricCard {
258
+ background: rgba(255, 255, 255, 0.04);
259
+ border: 1px solid rgba(255, 255, 255, 0.06);
260
+ border-radius: 8px;
261
+ padding: 12px;
262
+ }
263
+
264
+ .classMetricName {
265
+ font-size: 0.8rem;
266
+ font-weight: 700;
267
+ color: rgba(255, 255, 255, 0.8);
268
+ margin-bottom: 8px;
269
+ text-transform: uppercase;
270
+ }
271
+
272
+ .classMetricValues {
273
+ display: flex;
274
+ flex-direction: column;
275
+ gap: 4px;
276
+ }
277
+
278
+ .classMetricRow {
279
+ display: flex;
280
+ justify-content: space-between;
281
+ font-size: 0.75rem;
282
+ color: rgba(255, 255, 255, 0.6);
283
+ }
284
+
285
+ .metricValue {
286
+ color: rgba(255, 255, 255, 0.9);
287
+ font-weight: 600;
288
+ }
io-app-front/src/components/Legend.jsx ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Paper, Text, Stack, Group } from '@mantine/core';
2
+
3
+ // 1. Color mapping and Translation helper
4
+ const CLASS_METADATA = {
5
+ 'Drzewa / Las': { label: 'Trees / Forest', color: 'rgb(0, 100, 0)' },
6
+ 'Zarośla': { label: 'Shrubland', color: 'rgb(255, 187, 34)' },
7
+ 'Trawa / Łąki': { label: 'Grassland', color: 'rgb(255, 255, 76)' },
8
+ 'Uprawy rolne': { label: 'Crops', color: 'rgb(240, 150, 255)' },
9
+ 'Zabudowa': { label: 'Built area', color: 'rgb(250, 0, 0)' },
10
+ 'Goły grunt': { label: 'Bare Ground', color: 'rgb(180, 180, 180)' },
11
+ 'Śnieg i lód': { label: 'Snow & Ice', color: 'rgb(240, 240, 240)' },
12
+ 'Woda': { label: 'Water', color: 'rgb(0, 100, 200)' },
13
+ 'Tereny podmokłe': { label: 'Flooded vegetation', color: 'rgb(0, 150, 160)' },
14
+ 'Namorzyny': { label: 'Mangroves', color: 'rgb(0, 207, 117)' },
15
+ 'Mchy i porosty': { label: 'Moss & Lichen', color: 'rgb(250, 230, 160)' },
16
+ 'Brak danych': { label: 'No Data', color: 'rgb(100, 100, 100)' }
17
+ };
18
+
19
+ function getMetadata(className) {
20
+ return CLASS_METADATA[className] || { label: className, color: '#666' };
21
+ }
22
+
23
+ // 2. Row Component
24
+ function LegendItem({ label, percentage, color }) {
25
+ return (
26
+ <Group justify="space-between" align="center" wrap="nowrap" style={{ width: '100%' }}>
27
+ <div style={{ display: 'flex', alignItems: 'center', gap: '8px' }}>
28
+ <span style={{
29
+ width: 14,
30
+ height: 14,
31
+ background: color,
32
+ borderRadius: '3px',
33
+ border: '1px solid rgba(255,255,255,0.3)',
34
+ flexShrink: 0
35
+ }} />
36
+ <Text size="xs" c="gray.3" style={{ lineHeight: 1 }}>{label}</Text>
37
+ </div>
38
+
39
+ <Text size="xs" fw={700} c="white">
40
+ {percentage.toFixed(1)}%
41
+ </Text>
42
+ </Group>
43
+ );
44
+ }
45
+
46
+ // 3. Main Component
47
+ export default function Legend({ analysisResult }) {
48
+ if (!analysisResult || !analysisResult.statistics) return null;
49
+
50
+ const stats = Object.values(analysisResult.statistics)
51
+ .filter(item => item.percentage > 0)
52
+ .sort((a, b) => b.percentage - a.percentage);
53
+
54
+ if (stats.length === 0) return null;
55
+
56
+ return (
57
+ <div style={styles.container}>
58
+ <Paper style={styles.glassPanel}>
59
+
60
+ <Text size="xs" fw={700} c="dimmed" tt="uppercase" mb="sm" style={{ letterSpacing: '1px' }}>
61
+ Legend
62
+ </Text>
63
+
64
+ <Stack gap={8}>
65
+ {stats.map((item) => {
66
+ const metadata = getMetadata(item.name);
67
+ return (
68
+ <LegendItem
69
+ key={item.name}
70
+ label={metadata.label}
71
+ percentage={item.percentage}
72
+ color={metadata.color}
73
+ />
74
+ );
75
+ })}
76
+ </Stack>
77
+
78
+ </Paper>
79
+ </div>
80
+ );
81
+ }
82
+
83
+ const styles = {
84
+ container: {
85
+ position: 'absolute',
86
+ top: '20px',
87
+ right: '20px',
88
+ zIndex: 20,
89
+ width: '240px',
90
+ },
91
+ glassPanel: {
92
+ backgroundColor: 'rgba(10, 10, 12, 0.8)',
93
+ backdropFilter: 'blur(40px)',
94
+ WebkitBackdropFilter: 'blur(40px)',
95
+ border: '1px solid rgba(255, 255, 255, 0.08)',
96
+ borderRadius: '16px',
97
+ boxShadow: '0 12px 48px rgba(0, 0, 0, 0.6)',
98
+ padding: '16px 20px',
99
+ }
100
+ };
io-app-front/src/components/MapView.jsx ADDED
@@ -0,0 +1,743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // import { useEffect, useRef, useState } from 'react'
2
+ // import maplibregl from 'maplibre-gl'
3
+ // import mapboxgl from 'mapbox-gl'
4
+ // import 'maplibre-gl/dist/maplibre-gl.css'
5
+ // import 'mapbox-gl/dist/mapbox-gl.css'
6
+ // import classes from './MapView.module.css'
7
+ // import { Slider, Paper, Text, Group, Stack } from '@mantine/core';
8
+ //
9
+ // const SATELLITE_STYLE = {
10
+ // version: 8,
11
+ // sources: {
12
+ // 'satellite-tiles': {
13
+ // type: 'raster',
14
+ // tiles: [
15
+ // 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}'
16
+ // ],
17
+ // tileSize: 256,
18
+ // attribution: 'Esri, Maxar, Earthstar Geographics'
19
+ // }
20
+ // },
21
+ // layers: [
22
+ // {
23
+ // id: 'satellite-layer',
24
+ // type: 'raster',
25
+ // source: 'satellite-tiles',
26
+ // }
27
+ // ]
28
+ // };
29
+ //
30
+ // const SATELLITE_STYLE_MAPBOX = 'mapbox://styles/mapbox/satellite-streets-v12';
31
+ //
32
+ // function MapView({ onMapClick, analysisResult }) {
33
+ // const mapRef = useRef(null);
34
+ // const mapContainerRef = useRef(null);
35
+ // const markerRef = useRef(null);
36
+ // const [opacity, setOpacity] = useState(0.8);
37
+ //
38
+ // useEffect(() => {
39
+ // mapRef.current = new maplibregl.Map({
40
+ // container: mapContainerRef.current,
41
+ // style: SATELLITE_STYLE,
42
+ // center: [20.0, 50.0],
43
+ // zoom: 4,
44
+ // maxZoom: 16
45
+ // });
46
+ //
47
+ // mapRef.current.on('click', (e) => {
48
+ // const coords = e.lngLat;
49
+ //
50
+ // onMapClick(coords);
51
+ //
52
+ // if (markerRef.current) {
53
+ // markerRef.current.remove();
54
+ // }
55
+ //
56
+ // markerRef.current = new maplibregl.Marker({
57
+ // color: 'red',
58
+ // }).setLngLat(coords).addTo(mapRef.current);
59
+ // });
60
+ //
61
+ // return () => {
62
+ // if (markerRef.current) {
63
+ // markerRef.current.remove();
64
+ // }
65
+ //
66
+ // mapRef.current.remove();
67
+ // }
68
+ // }, []);
69
+ //
70
+ // useEffect(() => {
71
+ // if (!mapRef.current || !analysisResult) return;
72
+ //
73
+ // const { rgb, image, lat, lon } = analysisResult; // 'image' to obraz LULC
74
+ // const radiusKm = analysisResult.radius_km || 5;
75
+ //
76
+ // if (!rgb || !image) return;
77
+ //
78
+ // // Wspólne koordynaty dla obu obrazów
79
+ // const coordinates = calculateBounds(lat, lon, radiusKm);
80
+ //
81
+ // // Funkcja pomocnicza do czyszczenia i dodawania warstwy obrazu
82
+ // const updateImageLayer = (id, url, initialOpacity) => {
83
+ // const sourceId = `source-${id}`;
84
+ // const layerId = `layer-${id}`;
85
+ //
86
+ // if (mapRef.current.getSource(sourceId)) {
87
+ // if (mapRef.current.getLayer(layerId)) mapRef.current.removeLayer(layerId);
88
+ // mapRef.current.removeSource(sourceId);
89
+ // }
90
+ //
91
+ // mapRef.current.addSource(sourceId, {
92
+ // type: 'image',
93
+ // url: url,
94
+ // coordinates: coordinates
95
+ // });
96
+ //
97
+ // mapRef.current.addLayer({
98
+ // id: layerId,
99
+ // type: 'raster',
100
+ // source: sourceId,
101
+ // paint: {
102
+ // 'raster-opacity': initialOpacity,
103
+ // 'raster-fade-duration': 0
104
+ // }
105
+ // });
106
+ // };
107
+ //
108
+ // // A. Dodajemy RGB na spód (100% widoczności)
109
+ // updateImageLayer('rgb', rgb, 1);
110
+ //
111
+ // // B. Dodajemy LULC na wierzch (używamy stanu opacity)
112
+ // updateImageLayer('lulc', image, opacity);
113
+ //
114
+ // // C. Centrowanie mapy
115
+ // mapRef.current.flyTo({
116
+ // center: [lon, lat],
117
+ // zoom: 13,
118
+ // essential: true
119
+ // });
120
+ //
121
+ // }, [analysisResult]);
122
+ //
123
+ // useEffect(() => {
124
+ // if (!mapRef.current || !mapRef.current.getLayer('layer-lulc')) return;
125
+ //
126
+ // mapRef.current.setPaintProperty('layer-lulc', 'raster-opacity', opacity);
127
+ // }, [opacity]);
128
+ //
129
+ // return (
130
+ // <>
131
+ // <div className={ classes.mapContainer } ref={ mapContainerRef } />
132
+ // {analysisResult && (
133
+ // <div style={styles.bottomPanel}>
134
+ // <div style={styles.sliderContainer}>
135
+ // <span style={styles.label}>Satelita</span>
136
+ //
137
+ // <input
138
+ // type="range"
139
+ // min="0"
140
+ // max="1"
141
+ // step="0.01"
142
+ // value={opacity}
143
+ // onChange={(e) => setOpacity(parseFloat(e.target.value))}
144
+ // style={styles.slider}
145
+ // />
146
+ //
147
+ // <span style={styles.label}>Analiza ({Math.round(opacity * 100)}%)</span>
148
+ // </div>
149
+ // </div>
150
+ // )}
151
+ // </>
152
+ //
153
+ // )
154
+ // }
155
+ //
156
+ // function calculateBounds(centerLat, centerLng, radiusKm) {
157
+ // const R = 6378137; // Promień Ziemi (Sphere Mercator)
158
+ //
159
+ // // 1. Obliczamy ten sam Scale Factor co w Pythonie
160
+ // const scaleFactor = 1.0 / Math.cos(centerLat * Math.PI / 180);
161
+ //
162
+ // // 2. Przeliczamy promień na metry Mercatora
163
+ // const mercatorExtent = (radiusKm * 1000) * scaleFactor;
164
+ //
165
+ // // 3. Konwersja środka na metry X/Y
166
+ // const x = centerLng * (Math.PI / 180) * R;
167
+ // const latRad = centerLat * (Math.PI / 180);
168
+ // const y = Math.log(Math.tan((Math.PI / 4) + (latRad / 2))) * R;
169
+ //
170
+ // // 4. Wyznaczamy rogi w metrach Mercatora
171
+ // const minX = x - mercatorExtent;
172
+ // const maxX = x + mercatorExtent;
173
+ // const minY = y - mercatorExtent;
174
+ // const maxY = y + mercatorExtent;
175
+ //
176
+ // // 5. Powrót na stopnie
177
+ // const toLng = (mercX) => (mercX / R) * (180 / Math.PI);
178
+ // const toLat = (mercY) => {
179
+ // const rad = (2 * Math.atan(Math.exp(mercY / R))) - (Math.PI / 2);
180
+ // return rad * (180 / Math.PI);
181
+ // };
182
+ //
183
+ // return [
184
+ // [toLng(minX), toLat(maxY)], // NW
185
+ // [toLng(maxX), toLat(maxY)], // NE
186
+ // [toLng(maxX), toLat(minY)], // SE
187
+ // [toLng(minX), toLat(minY)] // SW
188
+ // ];
189
+ // }
190
+ //
191
+ // // --- STYLE DLA SUWAKA ---
192
+ // const styles = {
193
+ // bottomPanel: {
194
+ // position: 'absolute',
195
+ // bottom: '30px',
196
+ // left: '50%',
197
+ // transform: 'translateX(-50%)',
198
+ // width: '320px',
199
+ // backgroundColor: 'rgba(255, 255, 255, 0.9)',
200
+ // padding: '15px 20px',
201
+ // borderRadius: '12px',
202
+ // zIndex: 10,
203
+ // backdropFilter: 'blur(5px)',
204
+ // boxShadow: '0 4px 20px rgba(0,0,0,0.2)',
205
+ // border: '1px solid rgba(255,255,255,0.5)'
206
+ // },
207
+ // sliderContainer: {
208
+ // display: 'flex',
209
+ // alignItems: 'center',
210
+ // justifyContent: 'space-between',
211
+ // gap: '10px'
212
+ // },
213
+ // slider: {
214
+ // flexGrow: 1,
215
+ // cursor: 'pointer',
216
+ // accentColor: '#007AFF'
217
+ // },
218
+ // label: {
219
+ // fontWeight: '600',
220
+ // fontSize: '13px',
221
+ // color: '#333',
222
+ // minWidth: '50px',
223
+ // textAlign: 'center'
224
+ // }
225
+ // };
226
+ //
227
+ // export default MapView
228
+ //
229
+
230
+ // import { useEffect, useRef, useState } from 'react';
231
+ // import maplibregl from 'maplibre-gl';
232
+ // import mapboxgl from 'mapbox-gl'
233
+ // import 'maplibre-gl/dist/maplibre-gl.css';
234
+ // import classes from './MapView.module.css';
235
+ // import 'mapbox-gl/dist/mapbox-gl.css'
236
+ // import { Slider, Paper, Text, Group, Stack } from '@mantine/core';
237
+ // import Legend from './Legend'
238
+ //
239
+ // const SATELLITE_STYLE = {
240
+ // version: 8,
241
+ // sources: {
242
+ // 'satellite-tiles': {
243
+ // type: 'raster',
244
+ // tiles: [
245
+ // 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}'
246
+ // ],
247
+ // tileSize: 256,
248
+ // attribution: 'Esri, Maxar, Earthstar Geographics'
249
+ // }
250
+ // },
251
+ // layers: [
252
+ // {
253
+ // id: 'satellite-layer',
254
+ // type: 'raster',
255
+ // source: 'satellite-tiles',
256
+ // }
257
+ // ]
258
+ // };
259
+ //
260
+ // const ACCESS_TOKEN = mapboxgl.accessToken = 'pk.eyJ1IjoiamFucGllY2hvdGEiLCJhIjoiY21rZzUyeW50MDJmdzNjc2MyYWUwZmcwMyJ9.G3jH0_FyGPoxtUxe0ZcfJA';
261
+ //
262
+ // const SATELLITE_STYLE_MAPBOX = 'mapbox://styles/mapbox/satellite-streets-v12';
263
+ //
264
+ // function MapView({ onMapClick, analysisResult, selectedLocation }) {
265
+ // const mapRef = useRef(null);
266
+ // const mapContainerRef = useRef(null);
267
+ // const markerRef = useRef(null);
268
+ //
269
+ // const [opacity, setOpacity] = useState(0.5);
270
+ //
271
+ // const handleFocusAnalysis = () => {
272
+ // if (mapRef.current && analysisResult) {
273
+ // mapRef.current.flyTo({
274
+ // center: [analysisResult.lon, analysisResult.lat],
275
+ // zoom: 12,
276
+ // duration: 2000,
277
+ // essential: true
278
+ // });
279
+ // }
280
+ // };
281
+ //
282
+ // useEffect(() => {
283
+ // if (mapRef.current) return;
284
+ //
285
+ // mapRef.current = new mapboxgl.Map({
286
+ // container: mapContainerRef.current,
287
+ // style: SATELLITE_STYLE_MAPBOX,
288
+ // center: [20.0, 50.0],
289
+ // zoom: 4,
290
+ // projection: 'globe'
291
+ // });
292
+ //
293
+ // mapRef.current.addControl(new mapboxgl.NavigationControl(), 'bottom-right');
294
+ //
295
+ // mapRef.current.on('style.load', () => {
296
+ // // Pobieramy wszystkie warstwy obecne w stylu
297
+ // const layers = mapRef.current.getStyle().layers;
298
+ //
299
+ // layers.forEach((layer) => {
300
+ // // Sprawdzamy, czy nazwa warstwy sugeruje, że to droga/ulica
301
+ // const isRoad = layer.id.includes('road') ||
302
+ // layer.id.includes('bridge') ||
303
+ // layer.id.includes('tunnel') ||
304
+ // layer.id.includes('highway');
305
+ //
306
+ // if (isRoad) {
307
+ // mapRef.current.setLayoutProperty(layer.id, 'visibility', 'none');
308
+ // }
309
+ // });
310
+ // });
311
+ //
312
+ // mapRef.current.on('click', (e) => {
313
+ // const coords = e.lngLat;
314
+ // onMapClick(coords);
315
+ // });
316
+ // }, []);
317
+ //
318
+ // useEffect(() => {
319
+ // if (!mapRef.current) return;
320
+ //
321
+ // // Sytuacja A: Brak lokalizacji (np. wyczyszczono stan) -> usuń pinezkę
322
+ // if (!selectedLocation) {
323
+ // if (markerRef.current) {
324
+ // markerRef.current.remove();
325
+ // markerRef.current = null;
326
+ // }
327
+ // return;
328
+ // }
329
+ //
330
+ // // --- LOGIKA RUCHU KAMERY (zawsze lecimy do punktu) ---
331
+ // mapRef.current.flyTo({
332
+ // center: [selectedLocation.lng, selectedLocation.lat],
333
+ // zoom: 13,
334
+ // essential: true
335
+ // });
336
+ //
337
+ // // --- LOGIKA PINEZKI (MARKERA) ---
338
+ // // Jeśli lokalizacja pochodzi z wyszukiwarki (ma flagę isSearch), NIE stawiamy pinezki.
339
+ // // Pinezkę stawiamy tylko przy kliknięciu myszką (wtedy isSearch jest undefined/false).
340
+ //
341
+ // if (selectedLocation.isSearch) {
342
+ // // Jeśli była jakaś stara pinezka, usuwamy ją, żeby nie myliła
343
+ // if (markerRef.current) {
344
+ // markerRef.current.remove();
345
+ // markerRef.current = null;
346
+ // }
347
+ // } else {
348
+ // // To jest kliknięcie ręczne -> stawiamy/przesuwamy pinezkę
349
+ // if (markerRef.current) {
350
+ // markerRef.current.setLngLat([selectedLocation.lng, selectedLocation.lat]);
351
+ // } else {
352
+ // markerRef.current = new mapboxgl.Marker({ color: 'red' })
353
+ // .setLngLat([selectedLocation.lng, selectedLocation.lat])
354
+ // .addTo(mapRef.current);
355
+ // }
356
+ // }
357
+ //
358
+ // }, [selectedLocation]);
359
+ //
360
+ // useEffect(() => {
361
+ // if (!mapRef.current || !analysisResult) return;
362
+ //
363
+ // const { rgb, image, lat, lon } = analysisResult;
364
+ // const radiusKm = analysisResult.radius_km || 5;
365
+ //
366
+ // if (!rgb || !image) return;
367
+ //
368
+ // if (markerRef.current) {
369
+ // markerRef.current.remove();
370
+ // markerRef.current = null;
371
+ // }
372
+ //
373
+ // const coordinates = calculateBounds(lat, lon, radiusKm);
374
+ //
375
+ // const updateImageLayer = (id, url, initialOpacity) => {
376
+ // const sourceId = `source-${id}`;
377
+ // const layerId = `layer-${id}`;
378
+ //
379
+ // if (mapRef.current.getSource(sourceId)) {
380
+ // if (mapRef.current.getLayer(layerId)) mapRef.current.removeLayer(layerId);
381
+ // mapRef.current.removeSource(sourceId);
382
+ // }
383
+ //
384
+ // mapRef.current.addSource(sourceId, {
385
+ // type: 'image',
386
+ // url: url,
387
+ // coordinates: coordinates
388
+ // });
389
+ //
390
+ // mapRef.current.addLayer({
391
+ // id: layerId,
392
+ // type: 'raster',
393
+ // source: sourceId,
394
+ // paint: {
395
+ // 'raster-opacity': initialOpacity,
396
+ // 'raster-fade-duration': 0,
397
+ // }
398
+ // });
399
+ // };
400
+ //
401
+ // updateImageLayer('rgb', rgb, 1);
402
+ // updateImageLayer('lulc', image, opacity);
403
+ //
404
+ // mapRef.current.flyTo({
405
+ // center: [lon, lat],
406
+ // zoom: 11,
407
+ // essential: true
408
+ // });
409
+ //
410
+ // }, [analysisResult]);
411
+ //
412
+ // useEffect(() => {
413
+ // if (!mapRef.current || !mapRef.current.getLayer('layer-lulc')) return;
414
+ // mapRef.current.setPaintProperty('layer-lulc', 'raster-opacity', opacity);
415
+ // }, [opacity]);
416
+ //
417
+ // return (
418
+ // <>
419
+ // <div className={classes.mapContainer} ref={mapContainerRef} />
420
+ //
421
+ // {/* --- LEGENDA (PRAWY GÓRNY RÓG) --- */}
422
+ // {/* Wyświetlamy tylko, gdy mamy wyniki analizy */}
423
+ // {analysisResult && (
424
+ // <Legend analysisResult={analysisResult} />
425
+ // )}
426
+ //
427
+ // {/* --- PANEL SUWAKA (DÓŁ) --- */}
428
+ // {analysisResult && (
429
+ // <div style={styles.floatingContainer}>
430
+ // <Paper style={styles.glassPanel}>
431
+ // <Stack gap={6}>
432
+ // {/* ... (Twój kod slidera bez zmian) ... */}
433
+ // <Group justify="space-between" align="center">
434
+ // <Text size="xs" fw={700} c="dimmed" tt="uppercase" style={{ letterSpacing: '1px' }}>
435
+ // Przezroczystość
436
+ // </Text>
437
+ // <Text size="md" c="blue.4" fw={700}>
438
+ // {Math.round(opacity * 100)}%
439
+ // </Text>
440
+ // </Group>
441
+ //
442
+ // <Slider
443
+ // // ... propsy slidera ...
444
+ // value={opacity * 100}
445
+ // onChange={(val) => setOpacity(val / 100)}
446
+ // min={0}
447
+ // max={100}
448
+ // label={null}
449
+ // color="blue"
450
+ // size="xl"
451
+ // thumbSize={24}
452
+ // style={{ width: '100%' }}
453
+ // styles={{ track: { backgroundColor: 'rgba(255,255,255,0.1)' } }}
454
+ // />
455
+ //
456
+ // <Group justify="space-between">
457
+ // <Text size="10px" c="dimmed">Satelita</Text>
458
+ // <Text size="10px" c="dimmed">Analiza</Text>
459
+ // </Group>
460
+ //
461
+ // </Stack>
462
+ // </Paper>
463
+ // </div>
464
+ // )}
465
+ // </>
466
+ // );
467
+ // }
468
+ //
469
+ // function calculateBounds(centerLat, centerLng, radiusKm) {
470
+ // const R = 6378137;
471
+ // const scaleFactor = 1.0 / Math.cos(centerLat * Math.PI / 180);
472
+ // const mercatorExtent = (radiusKm * 1000) * scaleFactor;
473
+ // const x = centerLng * (Math.PI / 180) * R;
474
+ // const latRad = centerLat * (Math.PI / 180);
475
+ // const y = Math.log(Math.tan((Math.PI / 4) + (latRad / 2))) * R;
476
+ //
477
+ // const minX = x - mercatorExtent;
478
+ // const maxX = x + mercatorExtent;
479
+ // const minY = y - mercatorExtent;
480
+ // const maxY = y + mercatorExtent;
481
+ //
482
+ // const toLng = (mercX) => (mercX / R) * (180 / Math.PI);
483
+ // const toLat = (mercY) => {
484
+ // const rad = (2 * Math.atan(Math.exp(mercY / R))) - (Math.PI / 2);
485
+ // return rad * (180 / Math.PI);
486
+ // };
487
+ //
488
+ // return [
489
+ // [toLng(minX), toLat(maxY)],
490
+ // [toLng(maxX), toLat(maxY)],
491
+ // [toLng(maxX), toLat(minY)],
492
+ // [toLng(minX), toLat(minY)]
493
+ // ];
494
+ // }
495
+ //
496
+ // const styles = {
497
+ // floatingContainer: {
498
+ // position: 'absolute',
499
+ // bottom: '40px',
500
+ // left: '50%',
501
+ // transform: 'translateX(-50%)',
502
+ // width: '600px',
503
+ // zIndex: 20,
504
+ // },
505
+ // glassPanel: {
506
+ // backgroundColor: 'rgba(10, 10, 12, 0.8)',
507
+ // backdropFilter: 'blur(40px)',
508
+ // WebkitBackdropFilter: 'blur(40px)',
509
+ // border: '1px solid rgba(255, 255, 255, 0.08)',
510
+ // borderRadius: '16px',
511
+ // boxShadow: '0 12px 48px rgba(0, 0, 0, 0.6)',
512
+ // padding: '16px 24px',
513
+ // }
514
+ // };
515
+ //
516
+ // export default MapView;
517
+
518
+ import { useEffect, useRef, useState } from 'react';
519
+ import mapboxgl from 'mapbox-gl';
520
+ import 'mapbox-gl/dist/mapbox-gl.css';
521
+ import classes from './MapView.module.css';
522
+ import { Slider, Paper, Text, Group, Stack } from '@mantine/core';
523
+ import Legend from './Legend';
524
+
525
+ const SATELLITE_STYLE_MAPBOX = 'mapbox://styles/mapbox/satellite-streets-v12';
526
+
527
+ const getImageByPath = (result, path) => {
528
+ if (!result || !path) return null;
529
+ if (path.startsWith('masks.')) {
530
+ const maskKey = path.split('.')[1];
531
+ return result.masks ? result.masks[maskKey] : null;
532
+ }
533
+ return result[path];
534
+ };
535
+
536
+ function MapView({ onMapClick, analysisResult, selectedLocation, layersConfig }) {
537
+ const mapRef = useRef(null);
538
+ const mapContainerRef = useRef(null);
539
+ const markerRef = useRef(null);
540
+ const [localOpacity, setLocalOpacity] = useState(0.8);
541
+
542
+ // 1. INICJALIZACJA MAPY
543
+ useEffect(() => {
544
+ if (mapRef.current) return;
545
+
546
+ // Ustaw token dostępu Mapbox
547
+ mapboxgl.accessToken = import.meta.env.VITE_MAPBOX_ACCESS_TOKEN;
548
+
549
+ mapRef.current = new mapboxgl.Map({
550
+ container: mapContainerRef.current,
551
+ style: SATELLITE_STYLE_MAPBOX,
552
+ center: [20.0, 50.0],
553
+ zoom: 4,
554
+ projection: 'globe'
555
+ });
556
+
557
+ mapRef.current.addControl(new mapboxgl.NavigationControl(), 'bottom-right');
558
+
559
+ // Usunięto pętlę ukrywającą drogi, aby zachować kontekst na wierzchu
560
+
561
+ mapRef.current.on('click', (e) => {
562
+ onMapClick(e.lngLat);
563
+ });
564
+ }, []);
565
+
566
+ // 2. OBSŁUGA PINEZKI I RUCHU KAMERY
567
+ useEffect(() => {
568
+ if (!mapRef.current) return;
569
+ if (!selectedLocation) {
570
+ if (markerRef.current) {
571
+ markerRef.current.remove();
572
+ markerRef.current = null;
573
+ }
574
+ return;
575
+ }
576
+
577
+ mapRef.current.flyTo({
578
+ center: [selectedLocation.lng, selectedLocation.lat],
579
+ zoom: 13,
580
+ essential: true
581
+ });
582
+
583
+ if (selectedLocation.isSearch) {
584
+ if (markerRef.current) {
585
+ markerRef.current.remove();
586
+ markerRef.current = null;
587
+ }
588
+ } else {
589
+ if (markerRef.current) {
590
+ markerRef.current.setLngLat([selectedLocation.lng, selectedLocation.lat]);
591
+ } else {
592
+ markerRef.current = new mapboxgl.Marker({ color: 'red' })
593
+ .setLngLat([selectedLocation.lng, selectedLocation.lat])
594
+ .addTo(mapRef.current);
595
+ }
596
+ }
597
+ }, [selectedLocation]);
598
+
599
+ // 3. OBSŁUGA DWÓCH WARSTW ANALIZY
600
+ useEffect(() => {
601
+ if (!mapRef.current || !analysisResult || !layersConfig) return;
602
+
603
+ const radiusKm = analysisResult.radius_km || 5;
604
+ const coordinates = calculateBounds(analysisResult.lat, analysisResult.lon, radiusKm);
605
+
606
+ const updateLayer = (id, layerKey, currentOpacity) => {
607
+ const sourceId = `source-${id}`;
608
+ const layerId = `layer-${id}`;
609
+ const imageData = getImageByPath(analysisResult, layersConfig[layerKey]);
610
+
611
+ if (mapRef.current.getSource(sourceId)) {
612
+ if (mapRef.current.getLayer(layerId)) mapRef.current.removeLayer(layerId);
613
+ mapRef.current.removeSource(sourceId);
614
+ }
615
+
616
+ if (imageData) {
617
+ mapRef.current.addSource(sourceId, {
618
+ type: 'image',
619
+ url: imageData,
620
+ coordinates: coordinates
621
+ });
622
+
623
+ // --- DYNAMIZACJA WARSTW (NAZWY NA WIERZCHU) ---
624
+ // Szukamy ID pierwszej warstwy z napisami (typ 'symbol')
625
+ const layers = mapRef.current.getStyle().layers;
626
+ let firstLabelId;
627
+ for (const layer of layers) {
628
+ if (layer.type === 'symbol') {
629
+ firstLabelId = layer.id;
630
+ break;
631
+ }
632
+ }
633
+
634
+ mapRef.current.addLayer({
635
+ id: layerId,
636
+ type: 'raster',
637
+ source: sourceId,
638
+ paint: {
639
+ 'raster-opacity': currentOpacity,
640
+ 'raster-fade-duration': 800,
641
+ }
642
+ }, firstLabelId); // Wstawienie POD etykiety
643
+ }
644
+ };
645
+
646
+ updateLayer('first', 'firstLayer', 1);
647
+ updateLayer('second', 'secondLayer', localOpacity);
648
+
649
+ }, [analysisResult, layersConfig.firstLayer, layersConfig.secondLayer]);
650
+
651
+ // 4. AKTUALIZACJA PRZEZROCZYSTOŚCI
652
+ useEffect(() => {
653
+ if (mapRef.current?.getLayer('layer-second')) {
654
+ mapRef.current.setPaintProperty('layer-second', 'raster-opacity', localOpacity);
655
+ }
656
+ }, [localOpacity]);
657
+
658
+ return (
659
+ <>
660
+ <div className={classes.mapContainer} ref={mapContainerRef} />
661
+ {analysisResult && <Legend analysisResult={analysisResult} />}
662
+ {analysisResult && (
663
+ <div style={styles.floatingContainer}>
664
+ <Paper style={styles.glassPanel}>
665
+ <Stack gap={6}>
666
+ <Group justify="space-between" align="center">
667
+ <Text size="xs" fw={700} c="dimmed" tt="uppercase" style={{ letterSpacing: '1px' }}>
668
+ Overlay Opacity
669
+ </Text>
670
+ <Text size="md" c="blue.4" fw={700}>
671
+ {Math.round(localOpacity * 100)}%
672
+ </Text>
673
+ </Group>
674
+
675
+ <Slider
676
+ value={localOpacity * 100}
677
+ onChange={(val) => setLocalOpacity(val / 100)}
678
+ min={0}
679
+ max={100}
680
+ label={null}
681
+ color="blue"
682
+ size="xl"
683
+ thumbSize={24}
684
+ style={{ width: '100%' }}
685
+ styles={{ track: { backgroundColor: 'rgba(255,255,255,0.1)' } }}
686
+ />
687
+
688
+ <Group justify="space-between">
689
+ <Text size="10px" c="dimmed">RGB Context</Text>
690
+ <Text size="10px" c="dimmed">Classification Layer</Text>
691
+ </Group>
692
+ </Stack>
693
+ </Paper>
694
+ </div>
695
+ )}
696
+ </>
697
+ );
698
+ }
699
+
700
+ // Funkcja calculateBounds i style pozostają bez zmian
701
+ function calculateBounds(centerLat, centerLng, radiusKm) {
702
+ const R = 6378137;
703
+ const scaleFactor = 1.0 / Math.cos(centerLat * Math.PI / 180);
704
+ const mercatorExtent = (radiusKm * 1000) * scaleFactor;
705
+ const x = centerLng * (Math.PI / 180) * R;
706
+ const latRad = centerLat * (Math.PI / 180);
707
+ const y = Math.log(Math.tan((Math.PI / 4) + (latRad / 2))) * R;
708
+
709
+ const minX = x - mercatorExtent;
710
+ const maxX = x + mercatorExtent;
711
+ const minY = y - mercatorExtent;
712
+ const maxY = y + mercatorExtent;
713
+
714
+ const toLng = (mercX) => (mercX / R) * (180 / Math.PI);
715
+ const toLat = (mercY) => (2 * Math.atan(Math.exp(mercY / R)) - Math.PI / 2) * (180 / Math.PI);
716
+
717
+ return [
718
+ [toLng(minX), toLat(maxY)], [toLng(maxX), toLat(maxY)],
719
+ [toLng(maxX), toLat(minY)], [toLng(minX), toLat(minY)]
720
+ ];
721
+ }
722
+
723
+ const styles = {
724
+ floatingContainer: {
725
+ position: 'absolute',
726
+ bottom: '40px',
727
+ left: '50%',
728
+ transform: 'translateX(-50%)',
729
+ width: '600px',
730
+ zIndex: 20,
731
+ },
732
+ glassPanel: {
733
+ backgroundColor: 'rgba(10, 10, 12, 0.8)',
734
+ backdropFilter: 'blur(40px)',
735
+ WebkitBackdropFilter: 'blur(40px)',
736
+ border: '1px solid rgba(255, 255, 255, 0.08)',
737
+ borderRadius: '16px',
738
+ boxShadow: '0 12px 48px rgba(0, 0, 0, 0.6)',
739
+ padding: '16px 24px',
740
+ }
741
+ };
742
+
743
+ export default MapView;
io-app-front/src/components/MapView.module.css ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ .mapContainer {
2
+ height: 100%;
3
+ width: 100%;
4
+ z-index: 1;
5
+ }
6
+
7
+
8
+
io-app-front/src/components/SearchBar.jsx ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // import { useState, useEffect } from 'react'
2
+ // import { useDebouncedValue } from '@mantine/hooks'
3
+ // import axios from 'axios'
4
+ // import { Autocomplete } from '@mantine/core'
5
+ // import classes from './SearchBar.module.css'
6
+ //
7
+ // function SearchBar({ onLocationSelect }) {
8
+ // const [inputValue, setInputValue] = useState('');
9
+ // const [suggestions, setSuggestions] = useState([]);
10
+ //
11
+ // const [debouncedValue] = useDebouncedValue(inputValue, 400);
12
+ //
13
+ // useEffect(() => {
14
+ // if (!debouncedValue || debouncedValue.length < 3) {
15
+ // setSuggestions([]);
16
+ // return;
17
+ // }
18
+ //
19
+ // const fetchSuggestions = async () => {
20
+ // try {
21
+ // const response = await axios.get('https://photon.komoot.io/api/', {
22
+ // params: {
23
+ // q: debouncedValue,
24
+ // limit: 5,
25
+ // lang: 'pl'
26
+ // }
27
+ // });
28
+ //
29
+ // const features = response.data.features;
30
+ //
31
+ // const formattedData = features.map((f) => {
32
+ // const { name, street, city, country } = f.properties;
33
+ // const label = [name, street, city, country].filter(Boolean).join(', ');
34
+ //
35
+ // return {
36
+ // value: label,
37
+ // label: label,
38
+ // coords: f.geometry.coordinates,
39
+ // }
40
+ // });
41
+ //
42
+ // setSuggestions(formattedData);
43
+ // } catch (error) {
44
+ // console.error(error);
45
+ // }
46
+ // }
47
+ //
48
+ // fetchSuggestions().catch(console.error);
49
+ // }, [debouncedValue]);
50
+ //
51
+ // const handleOptionSubmit = (val) => {
52
+ // const selected = suggestions.find((opt) => opt.value === val);
53
+ // if (selected) {
54
+ // onLocationSelect({
55
+ // lng: selected.coords[0],
56
+ // lat: selected.coords[1],
57
+ // address: selected.value
58
+ // });
59
+ // }
60
+ // };
61
+ //
62
+ // return (
63
+ // <Autocomplete
64
+ // placeholder="Search location"
65
+ // value={ inputValue }
66
+ // onChange={setInputValue}
67
+ // data={suggestions}
68
+ // onOptionSubmit={handleOptionSubmit}
69
+ // />
70
+ // )
71
+ // }
72
+ //
73
+ // export default SearchBar
74
+
75
+ import { useState, useEffect } from 'react';
76
+ import { Autocomplete, Loader } from '@mantine/core';
77
+ import { useDebouncedValue } from '@mantine/hooks';
78
+ import { IconSearch } from '@tabler/icons-react'; // Odkomentuj jeśli masz ikony
79
+
80
+ function SearchBar({ onLocationSelect }) {
81
+ const [value, setValue] = useState('');
82
+ const [loading, setLoading] = useState(false);
83
+ const [data, setData] = useState([]);
84
+
85
+ // Opóźnienie 400ms
86
+ const [debounced] = useDebouncedValue(value, 400);
87
+
88
+ useEffect(() => {
89
+ // 1. Strażnik
90
+ if (!debounced || debounced.trim().length < 3) {
91
+ setData([]);
92
+ setLoading(false);
93
+ return;
94
+ }
95
+
96
+ const controller = new AbortController();
97
+
98
+ const fetchLocations = async () => {
99
+ setLoading(true);
100
+ console.log(`📡 Szukam w Open-Meteo: "${debounced}"...`);
101
+
102
+ try {
103
+ // ZMIANA API NA OPEN-METEO
104
+ const query = encodeURIComponent(debounced);
105
+ const url = `https://geocoding-api.open-meteo.com/v1/search?name=${query}&count=5&language=en&format=json`;
106
+
107
+ const response = await fetch(url, { signal: controller.signal });
108
+
109
+ if (!response.ok) {
110
+ throw new Error(`HTTP Error! status: ${response.status}`);
111
+ }
112
+
113
+ const jsonData = await response.json();
114
+
115
+ const results = jsonData.results || [];
116
+
117
+ // Używamy Map, aby zachować tylko unikalne nazwy (value)
118
+ const uniqueMap = new Map();
119
+
120
+ results.forEach((item) => {
121
+ const label = [item.name, item.admin1, item.country]
122
+ .filter(Boolean)
123
+ .join(', ');
124
+
125
+ // Jeśli etykieta już istnieje, Map jej nie nadpisze (lub nadpisze nowszą)
126
+ // Kluczem jest label, wartością jest obiekt opcji
127
+ if (!uniqueMap.has(label)) {
128
+ uniqueMap.set(label, {
129
+ value: label,
130
+ lat: item.latitude,
131
+ lng: item.longitude,
132
+ rawName: item.name
133
+ });
134
+ }
135
+ });
136
+
137
+ const formattedData = Array.from(uniqueMap.values());
138
+
139
+ setData(formattedData);
140
+
141
+ } catch (error) {
142
+ if (error.name !== 'AbortError') {
143
+ console.error("❌ Błąd wyszukiwania:", error);
144
+ }
145
+ } finally {
146
+ setLoading(false);
147
+ }
148
+ };
149
+
150
+ fetchLocations();
151
+
152
+ return () => {
153
+ controller.abort();
154
+ };
155
+ }, [debounced]);
156
+
157
+ const handleSelect = (val) => {
158
+ const selected = data.find((item) => item.value === val);
159
+ if (selected && onLocationSelect) {
160
+ console.log("✅ Wybrano:", selected);
161
+
162
+ // ZMIANA TUTAJ: Przekazujemy obiekt z flagą isSearch: true
163
+ onLocationSelect({
164
+ ...selected,
165
+ isSearch: true // <--- To jest kluczowa flaga
166
+ });
167
+
168
+ setValue(selected.value);
169
+ }
170
+ };
171
+
172
+ return (
173
+ <Autocomplete
174
+ value={value}
175
+ onChange={setValue}
176
+ onOptionSubmit={handleSelect}
177
+ data={data}
178
+ placeholder="Search city..."
179
+ rightSection={loading ? <Loader size="xs" /> : null}
180
+ leftSection={<IconSearch size={16} />}
181
+
182
+ comboboxProps={{ zIndex: 10000, withinPortal: true }}
183
+
184
+ styles={{
185
+ input: {
186
+ backgroundColor: '#25262b',
187
+ color: '#fff',
188
+ borderColor: '#373A40',
189
+ },
190
+ dropdown: {
191
+ backgroundColor: '#25262b',
192
+ borderColor: '#373A40',
193
+ color: '#fff'
194
+ },
195
+ option: {
196
+ color: '#C1C2C5',
197
+ '&:hover': { backgroundColor: '#2C2E33' }
198
+ }
199
+ }}
200
+ />
201
+ );
202
+ }
203
+
204
+ export default SearchBar;
io-app-front/src/components/SearchBar.module.css ADDED
File without changes
io-app-front/src/components/SearchFilters.jsx ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // import React, { useState } from 'react';
2
+ //
3
+ // export default function SearchFilters({
4
+ // cloudCover, // Traktujemy to jako wartość początkową (Initial Value)
5
+ // setCloudCover, // Funkcja wywoływana przy zmianie (Callback)
6
+ // radius, // Wartość początkowa
7
+ // setRadius // Callback
8
+ // }) {
9
+ //
10
+ // // 1. Tworzymy LOKALNY stan, inicjowany wartościami od rodzica
11
+ // const [localCloud, setLocalCloud] = useState(cloudCover);
12
+ // const [localRadius, setLocalRadius] = useState(radius);
13
+ //
14
+ // // 2. Handlery obsługujące zmianę
15
+ // const handleCloudChange = (e) => {
16
+ // const val = parseInt(e.target.value);
17
+ // setLocalCloud(val); // Aktualizujemy wygląd suwaka i liczbę natychmiast
18
+ // setCloudCover(val); // Wysyłamy informację do rodzica (Sidebar/App)
19
+ // };
20
+ //
21
+ // const handleRadiusChange = (e) => {
22
+ // const val = parseInt(e.target.value);
23
+ // setLocalRadius(val);
24
+ // setRadius(val);
25
+ // };
26
+ //
27
+ // return (
28
+ // <div style={styles.panel}>
29
+ //
30
+ // {/* --- SEKCJA 1: MAX ZACHMURZENIE --- */}
31
+ // <div style={styles.section}>
32
+ // <div style={styles.header}>
33
+ // <span>☁️ Max Zachmurzenie</span>
34
+ // {/* Wyświetlamy LOKALNĄ wartość */}
35
+ // <span style={styles.value}>{localCloud}%</span>
36
+ // </div>
37
+ // <input
38
+ // type="range"
39
+ // min="0"
40
+ // max="100"
41
+ // step="1"
42
+ // // Suwak jest kontrolowany przez ten komponent (lokalnie), a nie przez rodzica
43
+ // value={localCloud}
44
+ // onChange={handleCloudChange}
45
+ // style={styles.slider}
46
+ // className="custom-range-slider"
47
+ // />
48
+ // </div>
49
+ //
50
+ // <div style={styles.divider} />
51
+ //
52
+ // {/* --- SEKCJA 2: ODLEGŁOŚĆ / PROMIEŃ --- */}
53
+ // <div style={styles.section}>
54
+ // <div style={styles.header}>
55
+ // <span>📏 Odległość analizy</span>
56
+ // <span style={styles.value}>{localRadius} km</span>
57
+ // </div>
58
+ // <input
59
+ // type="range"
60
+ // min="1"
61
+ // max="20"
62
+ // step="1"
63
+ // value={localRadius}
64
+ // onChange={handleRadiusChange}
65
+ // style={styles.slider}
66
+ // className="custom-range-slider"
67
+ // />
68
+ // </div>
69
+ //
70
+ // </div>
71
+ // );
72
+ // }
73
+ //
74
+ // const styles = {
75
+ // panel: {
76
+ // position: 'absolute',
77
+ // top: '80px',
78
+ // left: '20px',
79
+ // width: '300px',
80
+ // backgroundColor: 'rgba(255, 255, 255, 0.9)',
81
+ // backdropFilter: 'blur(10px)',
82
+ // borderRadius: '12px',
83
+ // padding: '15px',
84
+ // boxShadow: '0 4px 15px rgba(0,0,0,0.1)',
85
+ // zIndex: 40,
86
+ // display: 'flex',
87
+ // flexDirection: 'column',
88
+ // gap: '10px'
89
+ // },
90
+ // section: {
91
+ // display: 'flex',
92
+ // flexDirection: 'column',
93
+ // gap: '5px'
94
+ // },
95
+ // header: {
96
+ // display: 'flex',
97
+ // justifyContent: 'space-between',
98
+ // fontSize: '13px',
99
+ // fontWeight: '600',
100
+ // color: '#444',
101
+ // marginBottom: '4px'
102
+ // },
103
+ // value: {
104
+ // color: '#007AFF',
105
+ // fontWeight: 'bold'
106
+ // },
107
+ // slider: {
108
+ // width: '100%',
109
+ // cursor: 'pointer'
110
+ // },
111
+ // divider: {
112
+ // height: '1px',
113
+ // backgroundColor: '#eee',
114
+ // margin: '5px 0'
115
+ // }
116
+ // };
117
+
118
+ import React from 'react';
119
+ import { Slider, Select, NumberInput, Text, Stack } from '@mantine/core';
120
+
121
+ export default function SearchFilters({ values, onChange }) {
122
+
123
+ // Funkcja pomocnicza do szybszej aktualizacji stanu
124
+ const handleChange = (key, val) => {
125
+ onChange(key, val);
126
+ };
127
+
128
+ return (
129
+ <Stack gap="md">
130
+
131
+ {/* 1. SUWAK: Max Zachmurzenie */}
132
+ <div>
133
+ <div style={styles.header}>
134
+ <Text size="sm" fw={600} c="dimmed">Max Cloud Cover</Text>
135
+ <Text size="sm" fw={700} c="blue">{values.cloudCover}%</Text>
136
+ </div>
137
+ <Slider
138
+ value={values.cloudCover}
139
+ onChange={(val) => handleChange('cloudCover', val)}
140
+ min={0}
141
+ max={100}
142
+ step={1}
143
+ label={null} // Wyłączamy dymek, bo wartość jest w nagłówku
144
+ size="sm"
145
+ />
146
+ </div>
147
+
148
+ {/* 2. SUWAK: Promień analizy */}
149
+ <div>
150
+ <div style={styles.header}>
151
+ <Text size="sm" fw={600} c="dimmed">Analysis Radius</Text>
152
+ <Text size="sm" fw={700} c="blue">{values.radius} km</Text>
153
+ </div>
154
+ <Slider
155
+ value={values.radius}
156
+ onChange={(val) => handleChange('radius', val)}
157
+ min={1}
158
+ max={10}
159
+ step={1}
160
+ label={null}
161
+ size="sm"
162
+ />
163
+ </div>
164
+
165
+ {/* 3. INPUT: Zakres dni */}
166
+ <NumberInput
167
+ label="Time Range (days back)"
168
+ placeholder="Np. 30"
169
+ value={values.daysRange}
170
+ onChange={(val) => handleChange('daysRange', val)}
171
+ min={1}
172
+ max={365}
173
+ size="sm"
174
+ // Stylizacja etykiety, żeby pasowała do reszty
175
+ styles={{ label: { fontSize: '13px', fontWeight: 600, color: '#868e96' } }}
176
+ />
177
+
178
+ {/* 4. DROPDOWN: Wybór modelu */}
179
+ <Select
180
+ label="Model Version"
181
+ placeholder="Select model version"
182
+ data={[
183
+ { value: 'terramind_v1_large_generate', label: 'Terramind v1 Large' },
184
+ { value: 'terramind_v1_medium_generate', label: 'Terramind v1 Medium' },
185
+ { value: 'terramind_v1_small_generate', label: 'Terramind v1 Small' },
186
+ { value: 'terramind_v1_tiny_generate', label: 'Terramind v1 Tiny' },
187
+ ]}
188
+ value={values.model}
189
+ onChange={(val) => handleChange('model', val)}
190
+ size="sm"
191
+ styles={{ label: { fontSize: '13px', fontWeight: 600, color: '#868e96' } }}
192
+ />
193
+
194
+ </Stack>
195
+ );
196
+ }
197
+
198
+ // Style CSS-in-JS (zaktualizowane pod Mantine)
199
+ const styles = {
200
+ panel: {
201
+ position: 'absolute',
202
+ top: '20px', // Przesunąłem wyżej, żeby nie zasłaniało mapy
203
+ right: '20px', // Zmieniłem na prawą stronę (standard dla paneli opcji), ale możesz dać left
204
+ width: '320px',
205
+ backgroundColor: 'rgba(30, 31, 48, 0.95)', // Ciemne tło (skoro masz ciemny motyw)
206
+ backdropFilter: 'blur(12px)',
207
+ borderRadius: '12px',
208
+ padding: '20px',
209
+ boxShadow: '0 8px 32px rgba(0,0,0,0.3)',
210
+ border: '1px solid rgba(255,255,255,0.1)', // Subtelna ramka
211
+ zIndex: 1000,
212
+ },
213
+ header: {
214
+ display: 'flex',
215
+ justifyContent: 'space-between',
216
+ marginBottom: '8px',
217
+ alignItems: 'center'
218
+ }
219
+ };
io-app-front/src/components/Sidebar.jsx ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import SearchFilters from './SearchFilters';
4
+ import SearchBar from './SearchBar';
5
+ import CompareModal from './CompareModal';
6
+ import classes from './Sidebar.module.css';
7
+ import { Divider, Title, Text, Stack, Select, Button, Group } from '@mantine/core';
8
+ import { IconArrowsLeftRight, IconPlayerPlay, IconMapPin } from '@tabler/icons-react';
9
+
10
+ function Sidebar({
11
+ selectedLocation,
12
+ onAnalysisComplete,
13
+ onLocationSelect,
14
+ layersConfig,
15
+ onLayersChange,
16
+ analysisResult
17
+ }) {
18
+ // Stan filtrów i wybranego modelu
19
+ const [filters, setFilters] = useState({
20
+ cloudCover: 20,
21
+ radius: 5,
22
+ daysRange: 60,
23
+ model: 'terramind_v1_large_generate'
24
+ });
25
+
26
+ const [isLoading, setIsLoading] = useState(false);
27
+ const [compareOpened, setCompareOpened] = useState(false);
28
+ const [comparisonData, setComparisonData] = useState(null);
29
+
30
+ const layerOptions = [
31
+ { group: 'Main Images', items: [
32
+ { label: 'Satellite (RGB)', value: 'rgb' },
33
+ { label: 'Classification (LULC)', value: 'image' },
34
+ { label: 'Raw Segmentation', value: 'raw_segmentation' }
35
+ ]},
36
+ { group: 'Spectral Indices (Masks)', items: [
37
+ { label: 'Vegetation (NDVI)', value: 'masks.vegetation_ndvi' },
38
+ { label: 'Water (NDWI)', value: 'masks.water_ndwi' },
39
+ { label: 'Buildings (NDBI)', value: 'masks.buildings_ndbi' },
40
+ { label: 'Bare Soil (BSI)', value: 'masks.baresoil_bsi' }
41
+ ]}
42
+ ];
43
+
44
+ const handleLayerChange = (layerKey, value) => {
45
+ onLayersChange(prev => ({ ...prev, [layerKey]: value }));
46
+ };
47
+
48
+ const handleFilterChange = (key, value) => {
49
+ setFilters(prev => ({ ...prev, [key]: value }));
50
+ };
51
+
52
+ // --- ANALIZA POJEDYNCZA (Wynik na mapę) ---
53
+ const handleAnalyze = async () => {
54
+ if (!selectedLocation || selectedLocation.isSearch) return;
55
+ setIsLoading(true);
56
+
57
+ const payload = {
58
+ location: { lat: selectedLocation.lat, lng: selectedLocation.lng },
59
+ params: {
60
+ bufferKm: filters.radius,
61
+ maxCloudCover: filters.cloudCover,
62
+ daysBack: filters.daysRange,
63
+ model: filters.model
64
+ }
65
+ };
66
+
67
+ try {
68
+ const response = await axios.post('http://127.0.0.1:5000/api/analyze', payload);
69
+ if (response.data.success && onAnalysisComplete) {
70
+ onAnalysisComplete(response.data);
71
+ } else {
72
+ alert("Błąd serwera: " + (response.data.error || "Nieznany błąd"));
73
+ }
74
+ } catch (error) {
75
+ console.error("❌ Błąd połączenia:", error);
76
+ alert("Błąd połączenia z backendem Flask.");
77
+ } finally {
78
+ setIsLoading(false);
79
+ }
80
+ };
81
+
82
+ // --- ANALIZA PORÓWNAWCZA (Wyniki do modala) ---
83
+ const handleRunCompare = async (modelA, modelB) => {
84
+ if (!selectedLocation || selectedLocation.isSearch) return;
85
+ setIsLoading(true);
86
+ setComparisonData(null);
87
+
88
+ const baseParams = {
89
+ bufferKm: filters.radius,
90
+ maxCloudCover: filters.cloudCover,
91
+ daysBack: filters.daysRange,
92
+ };
93
+
94
+ try {
95
+ // JEDNO żądanie do /api/advanced-analyze z oboma modelami
96
+ const response = await axios.post('http://127.0.0.1:5000/api/advanced-analyze', {
97
+ location: { lat: selectedLocation.lat, lng: selectedLocation.lng },
98
+ params: { ...baseParams, modelA: modelA, modelB: modelB }
99
+ });
100
+
101
+ if (response.data.success) {
102
+ setComparisonData({
103
+ modelA: response.data.modelA,
104
+ modelB: response.data.modelB,
105
+ metrics: response.data.metrics
106
+ });
107
+ }
108
+ } catch (error) {
109
+ console.error("❌ Błąd porównania:", error);
110
+ alert("Wystąpił błąd podczas analizy porównawczej.");
111
+ } finally {
112
+ setIsLoading(false);
113
+ }
114
+ };
115
+
116
+ const isLocationReady = selectedLocation && !selectedLocation.isSearch;
117
+
118
+ return (
119
+ <div className={classes.sidebar}>
120
+ <div className={classes.scrollArea}>
121
+ <Title order={3} c="white" mb="md">Analysis Panel</Title>
122
+
123
+ {/* WYSZUKIWARKA */}
124
+ <div style={{ marginBottom: '20px' }}>
125
+ <SearchBar onLocationSelect={onLocationSelect} />
126
+ </div>
127
+
128
+ {/* STATUS LOKALIZACJI */}
129
+ <div className={classes.locationBadge}>
130
+ {isLocationReady ? (
131
+ <Stack gap={2}>
132
+ <Text size="xs" c="dimmed" fw={700} tt="uppercase">Analysis Target:</Text>
133
+ <Group gap="xs">
134
+ <IconMapPin size={14} color="#228be6" />
135
+ <Text size="sm" c="blue.4" fw={500}>
136
+ {selectedLocation.lat.toFixed(5)}, {selectedLocation.lng.toFixed(5)}
137
+ </Text>
138
+ </Group>
139
+ </Stack>
140
+ ) : (
141
+ /* Naprawione fs="italic" */
142
+ <Text size="sm" c="dimmed" fs="italic">
143
+ {selectedLocation?.isSearch
144
+ ? "Now click on the map to place a pin."
145
+ : "Search for a city or click on the map."}
146
+ </Text>
147
+ )}
148
+ </div>
149
+
150
+ <Divider my="lg" color="gray.8" />
151
+
152
+ {/* FILTRY I WYBÓR MODELU */}
153
+ <SearchFilters values={filters} onChange={handleFilterChange} />
154
+
155
+ {/* PRZYCISKI AKCJI */}
156
+ <Stack gap="sm" mt="30px">
157
+ <Button
158
+ leftSection={<IconPlayerPlay size={18} />}
159
+ onClick={handleAnalyze}
160
+ disabled={!isLocationReady || isLoading}
161
+ loading={isLoading && !comparisonData}
162
+ size="md"
163
+ fullWidth
164
+ >
165
+ Run Analysis
166
+ </Button>
167
+
168
+ <Button
169
+ variant="outline"
170
+ leftSection={<IconArrowsLeftRight size={18} />}
171
+ onClick={() => setCompareOpened(true)}
172
+ disabled={!isLocationReady || isLoading}
173
+ size="md"
174
+ fullWidth
175
+ >
176
+ Advanced Analysis
177
+ </Button>
178
+ </Stack>
179
+
180
+ {/* WARSTWY MAPY (widoczne po otrzymaniu wyniku głównego) */}
181
+ {analysisResult && (
182
+ <div className={classes.layersBox}>
183
+ <Divider my="lg" color="gray.8" label="Map Display" labelPosition="center" />
184
+ <Stack gap="md">
185
+ <Select
186
+ label="Base Layer"
187
+ data={layerOptions}
188
+ value={layersConfig.firstLayer}
189
+ onChange={(val) => handleLayerChange('firstLayer', val)}
190
+ classNames={{ input: classes.selectInput, dropdown: classes.selectDropdown }}
191
+ />
192
+
193
+ <Select
194
+ label="Overlay Layer"
195
+ data={layerOptions}
196
+ value={layersConfig.secondLayer}
197
+ onChange={(val) => handleLayerChange('secondLayer', val)}
198
+ classNames={{ input: classes.selectInput, dropdown: classes.selectDropdown }}
199
+ />
200
+ </Stack>
201
+ </div>
202
+ )}
203
+ </div>
204
+
205
+ {/* MODAL WYNIKÓW PORÓWNAWCZYCH */}
206
+ <CompareModal
207
+ opened={compareOpened}
208
+ onClose={() => {
209
+ setCompareOpened(false);
210
+ setComparisonData(null);
211
+ }}
212
+ onRunCompare={handleRunCompare}
213
+ isLoading={isLoading}
214
+ results={comparisonData}
215
+ />
216
+ </div>
217
+ );
218
+ }
219
+
220
+ export default Sidebar;
io-app-front/src/components/Sidebar.module.css ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .sidebar {
2
+ position: absolute;
3
+ top: 0;
4
+ left: 0;
5
+ bottom: 0;
6
+ width: 500px;
7
+ z-index: 10;
8
+
9
+ background-color: rgba(10, 10, 12, 0.8);
10
+ backdrop-filter: blur(40px);
11
+ -webkit-backdrop-filter: blur(40px);
12
+
13
+ border: 1px solid rgba(255, 255, 255, 0.08);
14
+ border-radius: 0 10px 10px 0;
15
+ box-shadow: 0 12px 48px rgba(0, 0, 0, 0.6);
16
+ }
17
+
18
+ .scrollArea {
19
+ padding: 20px;
20
+ flex: 1;
21
+ overflow-y: auto;
22
+ }
23
+
24
+ .locationBadge {
25
+ padding: 12px;
26
+ background-color: #25262b;
27
+ border-radius: 8px;
28
+ border: 1px solid #373a40;
29
+ margin-bottom: 20px;
30
+ }
31
+
32
+ .layersBox {
33
+ margin-top: 10px;
34
+ padding-bottom: 20px;
35
+ }
36
+
37
+ .selectInput {
38
+ background-color: #25262b !important;
39
+ color: #fff !important;
40
+ border-color: #373a40 !important;
41
+ }
42
+
43
+ .selectDropdown {
44
+ background-color: #25262b !important;
45
+ border-color: #373a40 !important;
46
+ }
47
+
48
+ /* Styl dla paska przewijania */
49
+ .scrollArea::-webkit-scrollbar {
50
+ width: 6px;
51
+ }
52
+ .scrollArea::-webkit-scrollbar-thumb {
53
+ background-color: #373a40;
54
+ border-radius: 3px;
55
+ }
io-app-front/src/index.css ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #root {
2
+ position: fixed;
3
+ top: 0;
4
+ right: 0;
5
+ bottom: 0;
6
+ left: 0;
7
+ }
io-app-front/src/main.jsx ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import { StrictMode } from 'react'
2
+ import { createRoot } from 'react-dom/client'
3
+ import './index.css'
4
+ import App from './App'
5
+
6
+ createRoot(document.getElementById('root')).render(
7
+ <StrictMode>
8
+ <App />
9
+ </StrictMode>,
10
+ )
io-app-front/vite.config.js ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from 'vite'
2
+ import react from '@vitejs/plugin-react'
3
+
4
+ // https://vite.dev/config/
5
+ export default defineConfig({
6
+ plugins: [react()],
7
+ })