Tirush12 commited on
Commit
d20f02a
·
0 Parent(s):

Initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
3
+ *.pt filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
5
+ *.onnx filter=lfs diff=lfs merge=lfs -text
6
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
.github/workflows/sync_to_hub.yml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+ branches: [main]
5
+
6
+ # Allows manual run from Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+ with:
15
+ fetch-depth: 0
16
+ lfs: true
17
+
18
+ - name: Push to hub
19
+ env:
20
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
21
+ # REPLACE 'YourHFUsername/RescueAI' with your actual HF Space address below!
22
+ run: git push https://Tirush12:$HF_TOKEN@huggingface.co/spaces/Tirush12/ResQ-Agent main
.gitignore ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # Ruff stuff:
171
+ .ruff_cache/
172
+
173
+ # PyPI configuration file
174
+ .pypirc
175
+
176
+ # Cursor
177
+ # Cursor is an AI-powered code editor.`.cursorignore` specifies files/directories to
178
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
179
+ # refer to https://docs.cursor.com/context/ignore-files
180
+ .cursorignore
181
+ .cursorindexingignore
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
app.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from database import init_db, add_user, verify_user, create_session, get_user_sessions, get_session_details, save_message, get_session_messages
4
+ from backend import get_image_data, chat_with_context
5
+
6
+ # Initialize DB
7
+ init_db()
8
+ st.set_page_config(page_title="RescueAI Analyst", page_icon="🚁", layout="wide")
9
+
10
+ # --- STATE ---
11
+ if "logged_in" not in st.session_state: st.session_state.logged_in = False
12
+ if "username" not in st.session_state: st.session_state.username = ""
13
+ if "current_session_id" not in st.session_state: st.session_state.current_session_id = None
14
+
15
+ # --- UI HELPER: LEGEND ---
16
+ def display_floodnet_legend():
17
+ """Renders legend for the map."""
18
+ legend_html = """
19
+ <style>
20
+ .legend-container {
21
+ display: grid; grid-template-columns: repeat(2, 1fr); gap: 8px;
22
+ margin-top: 10px; padding: 10px; background-color: #f0f2f6;
23
+ border-radius: 8px; font-size: 14px; color: #000000; font-weight: 500;
24
+ }
25
+ .legend-item { display: flex; align-items: center; }
26
+ .color-box { width: 18px; height: 18px; margin-right: 8px; border-radius: 4px; border: 1px solid #ccc; }
27
+ </style>
28
+ <div class="legend-container">
29
+ <div class="legend-item"><div class="color-box" style="background: #FF0000;"></div>Flooded Bldg</div>
30
+ <div class="legend-item"><div class="color-box" style="background: #8B4513;"></div>Safe Bldg</div>
31
+ <div class="legend-item"><div class="color-box" style="background: #00008B;"></div>Flooded Road</div>
32
+ <div class="legend-item"><div class="color-box" style="background: #808080;"></div>Safe Road</div>
33
+ <div class="legend-item"><div class="color-box" style="background: #00BFFF;"></div>Water</div>
34
+ <div class="legend-item"><div class="color-box" style="background: #FFD700;"></div>Vehicle</div>
35
+ <div class="legend-item"><div class="color-box" style="background: #228B22;"></div>Tree</div>
36
+ <div class="legend-item"><div class="color-box" style="background: #00FFFF;"></div>Pool</div>
37
+ </div>
38
+ """
39
+ st.markdown(legend_html, unsafe_allow_html=True)
40
+
41
+ # --- LOGIN ---
42
+ def login_page():
43
+ st.title("🔐 RescueAI Analyst Login")
44
+ tab1, tab2 = st.tabs(["Login", "Sign Up"])
45
+ with tab1:
46
+ user = st.text_input("Username", key="l_user")
47
+ pw = st.text_input("Password", type="password", key="l_pw")
48
+ if st.button("Login"):
49
+ if verify_user(user, pw):
50
+ st.session_state.logged_in = True
51
+ st.session_state.username = user
52
+ st.rerun()
53
+ else:
54
+ st.error("Invalid credentials")
55
+ with tab2:
56
+ new_u = st.text_input("New Username", key="n_user")
57
+ new_p = st.text_input("New Password", type="password", key="n_pw")
58
+ if st.button("Create Account"):
59
+ if add_user(new_u, new_p):
60
+ st.success("Created! Log in now.")
61
+ else:
62
+ st.error("User exists.")
63
+
64
+ # --- NEW ANALYSIS ---
65
+ def start_new_analysis(uploaded_file):
66
+ save_dir = "uploaded_images"
67
+ os.makedirs(save_dir, exist_ok=True)
68
+ file_path = os.path.join(save_dir, uploaded_file.name)
69
+ with open(file_path, "wb") as f:
70
+ f.write(uploaded_file.getbuffer())
71
+
72
+ with st.spinner("🚀 Analyzing Terrain & Structures..."):
73
+ try:
74
+ data = get_image_data(file_path)
75
+
76
+ title = f"Scan: {uploaded_file.name}"
77
+ session_id = create_session(st.session_state.username, title, file_path)
78
+
79
+ # --- FULL CONTEXT STRING ---
80
+ context_str = f"""
81
+ [DETAILED VISION REPORT]
82
+ OBJECTS DETECTED:
83
+ - Flooded Buildings: {data['flooded_bldgs']}
84
+ - Safe Buildings: {data['safe_bldgs']}
85
+ - Vehicles Trapped: {data['vehicles']}
86
+ - Swimming Pools: {data['pools_count']}
87
+
88
+ TERRAIN ANALYSIS (Coverage %):
89
+ - Tree Coverage: {data['trees_pct']}%
90
+ - Grass/Land: {data['grass_pct']}%
91
+ - Natural Water Body: {data['water_pct']}%
92
+
93
+ INFRASTRUCTURE STATUS:
94
+ - Building Damage Rate: {data['bldg_damage_pct']:.1f}%
95
+ - ROAD STATUS: {data['road_flood_severity_pct']:.1f}% of roads are SUBMERGED.
96
+
97
+ MAP FILE: {data['map_path']}
98
+ """
99
+ save_message(session_id, "system", context_str)
100
+
101
+ # REMOVED: No automatic greeting.
102
+ # We just set the session and let the user speak first.
103
+
104
+ st.session_state.current_session_id = session_id
105
+ st.rerun()
106
+
107
+ except Exception as e:
108
+ st.error(f"Analysis Failed: {e}")
109
+
110
+ # --- MAIN APP ---
111
+ def main_app():
112
+ with st.sidebar:
113
+ st.header("🗂️ Case Files")
114
+ if st.button("➕ New Analysis", type="primary"):
115
+ st.session_state.current_session_id = None
116
+ st.rerun()
117
+
118
+ st.divider()
119
+ sessions = get_user_sessions(st.session_state.username)
120
+ for s_id, title, date in sessions:
121
+ if st.button(title, key=s_id):
122
+ st.session_state.current_session_id = s_id
123
+ st.rerun()
124
+
125
+ st.divider()
126
+ if st.button("Logout"):
127
+ st.session_state.logged_in = False
128
+ st.rerun()
129
+
130
+ if st.session_state.current_session_id is None:
131
+ st.title("🚁 New Disaster Analysis")
132
+ uploaded_file = st.file_uploader("Select Aerial Image", type=['jpg', 'png'])
133
+ if uploaded_file and st.button("Process Image"):
134
+ start_new_analysis(uploaded_file)
135
+
136
+ else:
137
+ s_id = st.session_state.current_session_id
138
+ details = get_session_details(s_id)
139
+ if not details:
140
+ st.error("Session error.")
141
+ return
142
+
143
+ img_path, title = details
144
+ messages = get_session_messages(s_id)
145
+
146
+ system_context = ""
147
+ chat_history_display = []
148
+ for role, content in messages:
149
+ if role == "system":
150
+ system_context = content
151
+ else:
152
+ chat_history_display.append((role, content))
153
+
154
+ col1, col2 = st.columns([1, 1])
155
+
156
+ with col1:
157
+ st.subheader("👁️ Visual Intel")
158
+ if os.path.exists(img_path):
159
+ st.image(img_path, caption="Original Scene", use_container_width=True)
160
+
161
+ try:
162
+ if "MAP FILE: " in system_context:
163
+ map_path = system_context.split("MAP FILE: ")[1].strip()
164
+ if os.path.exists(map_path):
165
+ st.image(map_path, caption="AI Segmentation Mask", use_container_width=True)
166
+ display_floodnet_legend()
167
+ except:
168
+ pass
169
+
170
+ with col2:
171
+ st.subheader("💬 Analyst Chat")
172
+ container = st.container(height=600)
173
+
174
+ with container:
175
+ # If no history yet, show a welcome tip
176
+ if not chat_history_display:
177
+ st.info("Analysis Complete. Ask me about damage, roads, or vehicles.")
178
+ else:
179
+ for role, content in chat_history_display:
180
+ with st.chat_message(role):
181
+ st.write(content)
182
+
183
+ if user_input := st.chat_input("Ask about trees, pools, roads..."):
184
+ save_message(s_id, "user", user_input)
185
+ with container:
186
+ with st.chat_message("user"):
187
+ st.write(user_input)
188
+
189
+ with st.spinner("Consulting data..."):
190
+ response = chat_with_context(system_context, user_input)
191
+ save_message(s_id, "assistant", response)
192
+ with container:
193
+ with st.chat_message("assistant"):
194
+ st.write(response)
195
+ st.rerun()
196
+
197
+ if st.session_state.logged_in:
198
+ main_app()
199
+ else:
200
+ login_page()
backend.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from langchain_groq import ChatGroq
4
+ from dotenv import load_dotenv
5
+ from tools import object_detection_tool, semantic_segmentation_tool
6
+
7
+ # Load Env
8
+ load_dotenv()
9
+
10
+ # --- SETUP GROQ ---
11
+ api_key = "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"
12
+
13
+ if not api_key:
14
+ # Default for local testing
15
+ print("⚠️ Warning: GROQ_API_KEY not found in .env")
16
+
17
+ llm = ChatGroq(
18
+ api_key=api_key,
19
+ model_name="llama-3.3-70b-versatile",
20
+ temperature=0.3
21
+ )
22
+
23
+ # FloodNet Class Mapping (ID -> Name)
24
+ SEG_CLASSES = {
25
+ 0: "Background",
26
+ 1: "Building Flooded",
27
+ 2: "Building Non-Flooded",
28
+ 3: "Road Flooded",
29
+ 4: "Road Non-Flooded",
30
+ 5: "Water",
31
+ 6: "Tree",
32
+ 7: "Vehicle",
33
+ 8: "Pool",
34
+ 9: "Grass"
35
+ }
36
+
37
+ def get_image_data(image_path):
38
+ """
39
+ Runs Vision Tools and returns a comprehensive data dictionary.
40
+ """
41
+ print(f"🚀 Extracting Full Data from: {image_path}")
42
+
43
+ # --- 1. RUN PERCEPTION TOOLS ---
44
+ det_json = object_detection_tool.invoke(image_path)
45
+ det_data = json.loads(det_json)
46
+
47
+ seg_json = semantic_segmentation_tool.invoke(image_path)
48
+ seg_data = json.loads(seg_json)
49
+
50
+ # --- 2. PROCESS YOLO COUNTS (Objects) ---
51
+ counts = det_data.get('counts', {})
52
+
53
+ # Specific Counts
54
+ flooded_bldgs = counts.get('Building Flooded', 0)
55
+ safe_bldgs = counts.get('Building Non-Flooded', 0)
56
+ vehicles = counts.get('Vehicle', 0)
57
+ pools = counts.get('Pool', 0)
58
+
59
+ total_buildings = flooded_bldgs + safe_bldgs
60
+ if total_buildings > 0:
61
+ bldg_damage_pct = (flooded_bldgs / total_buildings) * 100
62
+ else:
63
+ bldg_damage_pct = 0.0
64
+
65
+ # --- 3. PROCESS SEGMENTATION (Terrain & Roads) ---
66
+ pixel_counts = seg_data.get('pixel_counts', {})
67
+ total_pixels = sum(pixel_counts.values()) if pixel_counts else 1
68
+
69
+ # Calculate Area Percentages
70
+ area_stats = {}
71
+ for cls_id, cls_name in SEG_CLASSES.items():
72
+ px_count = pixel_counts.get(str(cls_id), pixel_counts.get(cls_id, 0))
73
+ pct = (px_count / total_pixels) * 100
74
+ area_stats[cls_name] = round(pct, 2)
75
+
76
+ # Road Specific Analysis
77
+ road_flooded_area = area_stats.get("Road Flooded", 0)
78
+ road_safe_area = area_stats.get("Road Non-Flooded", 0)
79
+ total_road_area = road_flooded_area + road_safe_area
80
+
81
+ if total_road_area > 0.1:
82
+ road_flood_severity = (road_flooded_area / total_road_area) * 100
83
+ else:
84
+ road_flood_severity = 0.0
85
+
86
+ # --- 4. COMPILE FINAL DATA PACKET ---
87
+ return {
88
+ "flooded_bldgs": flooded_bldgs,
89
+ "safe_bldgs": safe_bldgs,
90
+ "vehicles": vehicles,
91
+ "pools_count": pools,
92
+ "bldg_damage_pct": bldg_damage_pct,
93
+ "road_flood_severity_pct": road_flood_severity,
94
+ "trees_pct": area_stats.get("Tree", 0),
95
+ "grass_pct": area_stats.get("Grass", 0),
96
+ "water_pct": area_stats.get("Water", 0),
97
+ "map_path": seg_data.get('map_path', "")
98
+ }
99
+
100
+ def chat_with_context(system_context, user_input):
101
+ prompt = f"""
102
+ SYSTEM INSTRUCTIONS:
103
+ You are an advanced Disaster Response AI. You have access to precise sensor data from a drone.
104
+
105
+ RESPONSE GUIDELINES:
106
+ 1. **VISUAL AWARENESS:** The user can see the "Visual Intel" panel on the left side of their screen.
107
+ - If they ask "Show me the map" or "Where is the flood?", say: "I have already visualized the flood extent for you. Please check the **AI Segmentation Mask** in the left panel."
108
+ 2. **DIRECT ANSWER:** Answer questions directly. Do not explain your logic.
109
+ 3. **BE SPECIFIC:** Use the provided counts (e.g., "5 vehicles") instead of vague terms ("some cars").
110
+ 4. **RELEVANCE:** Only mention road safety if relevant or if roads are totally blocked.
111
+ 5. **TONE:** Professional, concise, and helpful.
112
+
113
+ SENSOR DATA CONTEXT:
114
+ {system_context}
115
+
116
+ USER QUESTION:
117
+ {user_input}
118
+ """
119
+
120
+ response = llm.invoke(prompt)
121
+ return response.content
database.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ import hashlib
3
+ import uuid
4
+ from datetime import datetime
5
+
6
+ DB_NAME = "app.db"
7
+
8
+ def init_db():
9
+ conn = sqlite3.connect(DB_NAME)
10
+ c = conn.cursor()
11
+
12
+ # Users Table
13
+ c.execute('''CREATE TABLE IF NOT EXISTS users
14
+ (username TEXT PRIMARY KEY, password TEXT)''')
15
+
16
+ # Sessions Table (New!) - Stores the conversation metadata
17
+ c.execute('''CREATE TABLE IF NOT EXISTS sessions
18
+ (session_id TEXT PRIMARY KEY, username TEXT,
19
+ title TEXT, image_path TEXT, created_at DATETIME)''')
20
+
21
+ # Messages Table - Linked to Session ID
22
+ c.execute('''CREATE TABLE IF NOT EXISTS messages
23
+ (id INTEGER PRIMARY KEY AUTOINCREMENT,
24
+ session_id TEXT, role TEXT, content TEXT,
25
+ timestamp DATETIME DEFAULT CURRENT_TIMESTAMP)''')
26
+
27
+ conn.commit()
28
+ conn.close()
29
+
30
+ def add_user(username, password):
31
+ conn = sqlite3.connect(DB_NAME)
32
+ c = conn.cursor()
33
+ hashed_pw = hashlib.sha256(password.encode()).hexdigest()
34
+ try:
35
+ c.execute("INSERT INTO users (username, password) VALUES (?, ?)", (username, hashed_pw))
36
+ conn.commit()
37
+ return True
38
+ except sqlite3.IntegrityError:
39
+ return False
40
+ finally:
41
+ conn.close()
42
+
43
+ def verify_user(username, password):
44
+ conn = sqlite3.connect(DB_NAME)
45
+ c = conn.cursor()
46
+ hashed_pw = hashlib.sha256(password.encode()).hexdigest()
47
+ c.execute("SELECT * FROM users WHERE username=? AND password=?", (username, hashed_pw))
48
+ user = c.fetchone()
49
+ conn.close()
50
+ return user is not None
51
+
52
+ # --- Session Management ---
53
+
54
+ def create_session(username, title, image_path):
55
+ """Creates a new chat session."""
56
+ session_id = str(uuid.uuid4())
57
+ conn = sqlite3.connect(DB_NAME)
58
+ c = conn.cursor()
59
+ c.execute("INSERT INTO sessions (session_id, username, title, image_path, created_at) VALUES (?, ?, ?, ?, ?)",
60
+ (session_id, username, title, image_path, datetime.now()))
61
+ conn.commit()
62
+ conn.close()
63
+ return session_id
64
+
65
+ def get_user_sessions(username):
66
+ """Returns list of sessions for the sidebar."""
67
+ conn = sqlite3.connect(DB_NAME)
68
+ c = conn.cursor()
69
+ c.execute("SELECT session_id, title, created_at FROM sessions WHERE username=? ORDER BY created_at DESC", (username,))
70
+ rows = c.fetchall()
71
+ conn.close()
72
+ return rows
73
+
74
+ def get_session_details(session_id):
75
+ """Get image path for a session."""
76
+ conn = sqlite3.connect(DB_NAME)
77
+ c = conn.cursor()
78
+ c.execute("SELECT image_path, title FROM sessions WHERE session_id=?", (session_id,))
79
+ row = c.fetchone()
80
+ conn.close()
81
+ return row
82
+
83
+ def save_message(session_id, role, content):
84
+ """Save a message to a specific session."""
85
+ conn = sqlite3.connect(DB_NAME)
86
+ c = conn.cursor()
87
+ c.execute("INSERT INTO messages (session_id, role, content) VALUES (?, ?, ?)",
88
+ (session_id, role, content))
89
+ conn.commit()
90
+ conn.close()
91
+
92
+ def get_session_messages(session_id):
93
+ """Get full chat history for a session."""
94
+ conn = sqlite3.connect(DB_NAME)
95
+ c = conn.cursor()
96
+ c.execute("SELECT role, content FROM messages WHERE session_id=? ORDER BY id ASC", (session_id,))
97
+ rows = c.fetchall()
98
+ conn.close()
99
+ return rows
models/segformer_custom/config.json ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SegformerForSemanticSegmentation"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "classifier_dropout_prob": 0.1,
7
+ "decoder_hidden_size": 256,
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 2,
12
+ 2
13
+ ],
14
+ "downsampling_rates": [
15
+ 1,
16
+ 4,
17
+ 8,
18
+ 16
19
+ ],
20
+ "drop_path_rate": 0.1,
21
+ "dtype": "float32",
22
+ "hidden_act": "gelu",
23
+ "hidden_dropout_prob": 0.0,
24
+ "hidden_sizes": [
25
+ 32,
26
+ 64,
27
+ 160,
28
+ 256
29
+ ],
30
+ "id2label": {
31
+ "0": "Background",
32
+ "1": "Building Flooded",
33
+ "2": "Building Non-Flooded",
34
+ "3": "Road Flooded",
35
+ "4": "Road Non-Flooded",
36
+ "5": "Water",
37
+ "6": "Tree",
38
+ "7": "Vehicle",
39
+ "8": "Pool",
40
+ "9": "Grass"
41
+ },
42
+ "image_size": 224,
43
+ "initializer_range": 0.02,
44
+ "label2id": {
45
+ "Background": 0,
46
+ "Building Flooded": 1,
47
+ "Building Non-Flooded": 2,
48
+ "Grass": 9,
49
+ "Pool": 8,
50
+ "Road Flooded": 3,
51
+ "Road Non-Flooded": 4,
52
+ "Tree": 6,
53
+ "Vehicle": 7,
54
+ "Water": 5
55
+ },
56
+ "layer_norm_eps": 1e-06,
57
+ "mlp_ratios": [
58
+ 4,
59
+ 4,
60
+ 4,
61
+ 4
62
+ ],
63
+ "model_type": "segformer",
64
+ "num_attention_heads": [
65
+ 1,
66
+ 2,
67
+ 5,
68
+ 8
69
+ ],
70
+ "num_channels": 3,
71
+ "num_encoder_blocks": 4,
72
+ "patch_sizes": [
73
+ 7,
74
+ 3,
75
+ 3,
76
+ 3
77
+ ],
78
+ "reshape_last_stage": true,
79
+ "semantic_loss_ignore_index": 255,
80
+ "sr_ratios": [
81
+ 8,
82
+ 4,
83
+ 2,
84
+ 1
85
+ ],
86
+ "strides": [
87
+ 4,
88
+ 2,
89
+ 2,
90
+ 2
91
+ ],
92
+ "transformers_version": "4.57.1"
93
+ }
models/segformer_custom/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68e3a4c70f61f670610e2075510ef32868b85e66921d99e06b5c5bd32e9cb60d
3
+ size 14893008
models/segformer_custom/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_reduce_labels": false,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.485,
8
+ 0.456,
9
+ 0.406
10
+ ],
11
+ "image_processor_type": "SegformerImageProcessor",
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 512,
21
+ "width": 512
22
+ }
23
+ }
models/yolov8_floodnet.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afa4b42a5f0f35c1f7a3f91fa3c56cd7ea88f4501a14a2ccc1f34bdba42f8fb7
3
+ size 52077074
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ gradio
3
+ torch
4
+ torchvision
5
+ transformers
6
+ accelerate
7
+ bitsandbytes
8
+ langchain
9
+ langchain-community
10
+ langchain-groq
11
+ langchain-huggingface
12
+ ultralytics
13
+ sahi
14
+ opencv-python-headless
15
+ sqlalchemy
16
+ python-dotenv
17
+ pillow
tools.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ import numpy as np
4
+ import json
5
+ import os
6
+ import cv2
7
+ from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation
8
+ from sahi import AutoDetectionModel
9
+ from sahi.predict import get_sliced_prediction
10
+ from langchain_core.tools import tool
11
+
12
+ # --- CONFIGURATION ---
13
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
14
+
15
+ # FloodNet Color Palette
16
+ FLOODNET_PALETTE = np.array([
17
+ [0, 0, 0], # 0: Background
18
+ [255, 0, 0], # 1: Building Flooded (Red)
19
+ [139, 69, 19], # 2: Building Non-Flooded (Brown)
20
+ [0, 0, 139], # 3: Road Flooded (Dark Blue)
21
+ [128, 128, 128], # 4: Road Non-Flooded (Gray)
22
+ [0, 191, 255], # 5: Water (Light Blue)
23
+ [34, 139, 34], # 6: Tree (Green)
24
+ [255, 215, 0], # 7: Vehicle (Yellow)
25
+ [0, 255, 255], # 8: Pool (Cyan)
26
+ [50, 205, 50] # 9: Grass (Lime)
27
+ ], dtype=np.uint8)
28
+
29
+ # --- LOAD MODELS ---
30
+ YOLO_PATH = os.path.join("models", "yolov8_floodnet.pt")
31
+ SEG_PATH = os.path.join("models", "segformer_custom")
32
+
33
+ print(f"Loading Models on {DEVICE}...")
34
+ try:
35
+ detection_model = AutoDetectionModel.from_pretrained(
36
+ model_type='yolov8', model_path=YOLO_PATH, confidence_threshold=0.5, device=DEVICE
37
+ )
38
+
39
+ seg_processor = SegformerImageProcessor.from_pretrained("nvidia/mit-b0", do_reduce_labels=False)
40
+ seg_model = SegformerForSemanticSegmentation.from_pretrained(SEG_PATH)
41
+ seg_model.to(DEVICE)
42
+ if DEVICE == "cuda": seg_model.half()
43
+ except Exception as e:
44
+ print(f"⚠️ Model Load Error: {e}")
45
+ detection_model = None
46
+ seg_model = None
47
+
48
+ @tool
49
+ def object_detection_tool(image_path: str) -> str:
50
+ """Detects objects (Flooded Building, Vehicle, etc.)."""
51
+ try:
52
+ if not os.path.exists(image_path): return json.dumps({"error": "File not found"})
53
+
54
+ result = get_sliced_prediction(
55
+ image_path, detection_model, slice_height=2000, slice_width=2000,
56
+ overlap_height_ratio=0.05, overlap_width_ratio=0.05
57
+ )
58
+
59
+ detections = []
60
+ counts = {}
61
+ for obj in result.object_prediction_list:
62
+ if obj.score.value < 0.5: continue
63
+ label = obj.category.name
64
+ detections.append({"label": label, "confidence": round(obj.score.value, 2)})
65
+ counts[label] = counts.get(label, 0) + 1
66
+
67
+ return json.dumps({"counts": counts, "detections": detections})
68
+ except Exception as e:
69
+ return json.dumps({"error": str(e)})
70
+
71
+ @tool
72
+ def semantic_segmentation_tool(image_path: str) -> str:
73
+ """
74
+ Segments terrain.
75
+ Returns JSON: {"map_path": "...", "pixel_counts": {...}}
76
+ """
77
+ try:
78
+ image = Image.open(image_path).convert("RGB")
79
+ orig_size = image.size
80
+
81
+ # Resize for inference
82
+ inputs = seg_processor(images=image.resize((768, 768), Image.BILINEAR), return_tensors="pt").to(DEVICE)
83
+ if DEVICE == "cuda": inputs['pixel_values'] = inputs['pixel_values'].half()
84
+
85
+ with torch.no_grad():
86
+ outputs = seg_model(**inputs)
87
+
88
+ upsampled = torch.nn.functional.interpolate(outputs.logits, size=(768, 768), mode='bilinear', align_corners=False)
89
+ pred_seg = upsampled.argmax(dim=1)[0].cpu().numpy().astype(np.uint8)
90
+
91
+ # Resize mask to original size
92
+ final_mask = cv2.resize(pred_seg, orig_size, interpolation=cv2.INTER_NEAREST)
93
+
94
+ # --- NEW: CALCULATE PIXEL STATS ---
95
+ # Count pixels for each class (0-9)
96
+ unique, counts = np.unique(final_mask, return_counts=True)
97
+ pixel_stats = dict(zip(unique.tolist(), counts.tolist()))
98
+
99
+ # Save Colored Map
100
+ color_mask = FLOODNET_PALETTE[final_mask]
101
+ save_dir = "output_maps"
102
+ os.makedirs(save_dir, exist_ok=True)
103
+ filename = os.path.basename(image_path).replace('.jpg', '_seg_mask.png')
104
+ out_path = os.path.join(save_dir, filename)
105
+ Image.fromarray(color_mask).save(out_path)
106
+
107
+ # Return BOTH path and stats
108
+ return json.dumps({
109
+ "map_path": out_path,
110
+ "pixel_counts": pixel_stats # Dictionary of {class_id: count}
111
+ })
112
+
113
+ except Exception as e:
114
+ return json.dumps({"error": str(e)})