UmerAS commited on
Commit
af18e1e
Β·
verified Β·
1 Parent(s): 83f898c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +191 -0
app.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import base64, tempfile, requests, os, re, json
3
+ import whisper # local Whisper model
4
+ from piper import PiperVoice
5
+ import wave
6
+
7
+ from sentence_transformers import SentenceTransformer, util # NEW for embeddings retrieval
8
+
9
+
10
+ # ─────────────────────────────────────────────────────────────
11
+ st.set_page_config(page_title="Med11 – AI Medical Emergency guide", page_icon="🩺")
12
+
13
+ GROQ_API_KEY = st.secrets["GROQ_API_KEY"]
14
+
15
+ st.session_state.setdefault("show_numbers", False)
16
+ st.session_state.setdefault("upload_id", 0)
17
+ st.session_state.setdefault("chat_history", [])
18
+
19
+ # ─────────────────────────────────────────────────────────────
20
+ @st.cache_resource(show_spinner="Loading Whisper model…")
21
+ def _load_whisper():
22
+ return whisper.load_model("base")
23
+
24
+ @st.cache_resource(show_spinner="Loading Piper voice model…")
25
+ def load_voice():
26
+ return PiperVoice.load("models/en_US-amy-low.onnx")
27
+
28
+ # ─────────────── NEW: Load and embed first aid JSON knowledge ───────────────
29
+ @st.cache_resource(show_spinner="Loading embedder and first aid data…")
30
+ def load_embedder_and_knowledge():
31
+ embedder = SentenceTransformer("all-MiniLM-L6-v2")
32
+ # Load JSON knowledge base (adjust path if needed)
33
+ with open("first_aid_intents.json", "r", encoding="utf-8") as f:
34
+ intents = json.load(f)
35
+ # Flatten all patterns+responses as docs
36
+ docs = []
37
+ for intent in intents:
38
+ for p in intent.get("patterns", []):
39
+ docs.append(p)
40
+ for r in intent.get("responses", []):
41
+ docs.append(r)
42
+ embeddings = embedder.encode(docs, convert_to_tensor=True)
43
+ return embedder, docs, embeddings
44
+
45
+ embedder, docs, embeddings = load_embedder_and_knowledge()
46
+
47
+ def get_relevant_context(query, top_k=3):
48
+ query_emb = embedder.encode(query, convert_to_tensor=True)
49
+ hits = util.semantic_search(query_emb, embeddings, top_k=top_k)[0]
50
+ return "\n".join(docs[hit["corpus_id"]] for hit in hits)
51
+
52
+ # ─────────────────────────────────────────────────────────────
53
+ def groq_reply(prompt: str, model="llama3-8b-8192"):
54
+ # ───── NEW: Retrieve local first aid context ─────
55
+ context = get_relevant_context(prompt)
56
+ full_prompt = (
57
+ f"Use the following first-aid information to answer accurately:\n\n{context}\n\n"
58
+ f"Emergency query: {prompt}"
59
+ )
60
+
61
+ url = "https://api.groq.com/openai/v1/chat/completions"
62
+ payload = {
63
+ "model": model,
64
+ "messages": [
65
+ {
66
+ "role": "system",
67
+ "content": (
68
+ "You are an emergency-response medic. "
69
+ "Classify the emergency, rate severity, and give clear, step-by-step first-aid. "
70
+ "Limit to 120 words."
71
+ ),
72
+ },
73
+ *st.session_state.chat_history,
74
+ {"role": "user", "content": full_prompt}, # ← uses augmented prompt
75
+ ],
76
+ "temperature": 0.2,
77
+ "max_tokens": 256,
78
+ }
79
+ r = requests.post(
80
+ url, json=payload,
81
+ headers={"Authorization": f"Bearer {GROQ_API_KEY}"},
82
+ timeout=45,
83
+ )
84
+ r.raise_for_status()
85
+ return r.json()["choices"][0]["message"]["content"]
86
+
87
+ def transcribe_audio(file_path: str) -> str:
88
+ model = _load_whisper()
89
+ result = model.transcribe(file_path)
90
+ return result["text"]
91
+
92
+ def clean_text_for_tts(text: str) -> str:
93
+ text = re.sub(r"\*\*|\*|__|_", "", text)
94
+ text = re.sub(r"[|~>#]", "", text)
95
+ text = re.sub(r"[\u2600-\u26FF\u2700-\u27BF]+", "", text)
96
+ text = re.sub(r"\s+", " ", text)
97
+ return text.strip()
98
+
99
+ def tts_player(text: str):
100
+ voice = load_voice()
101
+ clean_text = clean_text_for_tts(text)
102
+
103
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_wav:
104
+ with wave.open(tmp_wav.name, "wb") as wav_file:
105
+ wav_file.setnchannels(1)
106
+ wav_file.setsampwidth(2)
107
+ wav_file.setframerate(voice.config.sample_rate)
108
+ voice.synthesize(clean_text, wav_file)
109
+
110
+ audio_bytes = open(tmp_wav.name, "rb").read()
111
+
112
+ st.audio(audio_bytes, format="audio/wav")
113
+ os.remove(tmp_wav.name)
114
+
115
+
116
+ st.title("🩺 Med11 – AI Medical Emergency guide")
117
+
118
+ if st.button("πŸ“ž Emergency Numbers"):
119
+ st.session_state.show_numbers = not st.session_state.show_numbers
120
+
121
+ if st.session_state.show_numbers:
122
+ with st.expander("Key Emergency Numbers (Pakistan)", expanded=True):
123
+ st.markdown(
124
+ """
125
+ | Service | Number |
126
+ |---------|--------|
127
+ | πŸš‘ **Edhi Ambulance** | **115** |
128
+ | πŸš’ **Rescue / Fire** | **1122** |
129
+ | πŸš“ Police | **15** |
130
+ | πŸ’Š Poison Control (NIH) | **051-9255075** |
131
+ """
132
+ )
133
+
134
+ uploader_key = f"audio_{st.session_state.upload_id}"
135
+ audio_file = st.file_uploader(
136
+ "Upload a WAV/MP3 describing the emergency",
137
+ type=["wav", "mp3", "m4a"],
138
+ key=uploader_key,
139
+ )
140
+
141
+ if audio_file:
142
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp:
143
+ tmp.write(audio_file.read())
144
+ tmp_path = tmp.name
145
+
146
+ with st.spinner("Transcribing…"):
147
+ try:
148
+ transcript = transcribe_audio(tmp_path).strip()
149
+ except Exception as e:
150
+ st.error(f"Transcription error: {e}")
151
+ transcript = ""
152
+ os.remove(tmp_path)
153
+
154
+ with st.expander("πŸ“œ Transcript", expanded=False):
155
+ st.write(transcript or "_(no speech detected)_")
156
+
157
+ if transcript:
158
+ with st.spinner("Analyzing…"):
159
+ try:
160
+ result = groq_reply(
161
+ f'Analyze this emergency: "{transcript}". '
162
+ "Classify type, rate severity, and give brief first-aid steps."
163
+ )
164
+ st.session_state.chat_history.append({"role": "user", "content": transcript})
165
+ st.session_state.chat_history.append({"role": "assistant", "content": result})
166
+ except Exception as e:
167
+ st.error(f"Groq error: {e}")
168
+ result = ""
169
+
170
+ if result:
171
+ tts_player(result)
172
+ st.markdown("### 🧠 Guidance")
173
+ st.markdown(result)
174
+
175
+ st.session_state.upload_id += 1
176
+ st.markdown("---")
177
+ st.markdown("**Need to tell me more? Upload another voice clip below.**")
178
+
179
+ col1, col2 = st.columns(2)
180
+ with col1:
181
+ if st.button("πŸ₯ Nearest Hospital (Google Maps)"):
182
+ st.markdown(
183
+ "[Open Maps β†’](https://www.google.com/maps/search/hospital/)",
184
+ unsafe_allow_html=True,
185
+ )
186
+ with col2:
187
+ if st.button("πŸ—ΊοΈ Nearby Pharmacies"):
188
+ st.markdown(
189
+ "[Open Maps β†’](https://www.google.com/maps/search/pharmacy/)",
190
+ unsafe_allow_html=True,
191
+ )