IZERE HIRWA Roger commited on
Commit
e3d301c
·
1 Parent(s): b84fe3f
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. app.py +47 -47
Dockerfile CHANGED
@@ -47,4 +47,4 @@ RUN python -c "from sentence_transformers import SentenceTransformer; SentenceTr
47
 
48
  EXPOSE 7860
49
 
50
- CMD ["python", "run_aimhsa.py"]
 
47
 
48
  EXPOSE 7860
49
 
50
+ CMD ["python", "app.py"]
app.py CHANGED
@@ -1,23 +1,27 @@
1
- import os, json, numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from flask import Flask, request, jsonify, send_from_directory, render_template, redirect
3
  from flask_cors import CORS
4
  # Replace direct ollama import with OpenAI client
5
  from openai import OpenAI
6
  from dotenv import load_dotenv
7
- import sqlite3
8
  from werkzeug.security import generate_password_hash, check_password_hash
9
- import time
10
- import uuid
11
- from datetime import datetime
12
- import tempfile
13
- import pytesseract
14
- from werkzeug.utils import secure_filename
15
- import re
16
- import math
17
- from typing import Dict, List, Tuple, Optional
18
  from translation_service import translation_service
19
  from sms_service import initialize_sms_service, get_sms_service
20
- import time as _time
21
  from config import current_config
22
 
23
  # Initialize OpenAI client for Ollama
@@ -42,11 +46,6 @@ def _retry_openai_call(func, *args, _retries=1, _delay=0.5, **kwargs):
42
  pass
43
  else:
44
  raise last_err
45
- import smtplib
46
- from email.mime.text import MIMEText
47
- from email.mime.multipart import MIMEMultipart
48
-
49
- load_dotenv()
50
 
51
  # --- Helper Functions ---
52
  def get_time_ago(timestamp):
@@ -475,24 +474,12 @@ def save_message(conv_id: str, role: str, content: str):
475
  # determine owner_key if needed
476
  owner_key = get_owner_key_for_conv(conv_id)
477
  if row is None:
478
- # create conversation row if missing, attach owner_key when available
479
  conn.execute(
480
- "INSERT OR REPLACE INTO conversations (conv_id, owner_key, preview, ts) VALUES (?, ?, ?, ?)",
481
  (conv_id, owner_key, snippet[:120], time.time()),
482
  )
483
  else:
484
- existing_preview = row[0] or ""
485
- if existing_preview.strip() in ("", "New chat"):
486
- conn.execute(
487
- "UPDATE conversations SET preview = ?, ts = ? WHERE conv_id = ?",
488
- (snippet[:120], time.time(), conv_id),
489
- )
490
- else:
491
- # update timestamp at least so listing sorts by recent activity
492
- conn.execute(
493
- "UPDATE conversations SET ts = ? WHERE conv_id = ?",
494
- (time.time(), conv_id),
495
- )
496
  except Exception:
497
  # don't break saving messages if preview update fails
498
  pass
@@ -648,13 +635,13 @@ class RiskDetector:
648
  Respond in JSON format: {{"risk_score": 0.0-1.0, "indicators": ["indicator1", "indicator2"]}}
649
  """
650
 
651
- response = _retry_ollama_call(ollama.chat, model=CHAT_MODEL, messages=[
652
  {"role": "system", "content": "You are a mental health risk assessment AI. Analyze conversations for risk indicators and provide structured JSON responses."},
653
  {"role": "user", "content": ai_prompt}
654
  ])
655
 
656
  # Parse AI response robustly (extract JSON if wrapper text present)
657
- raw = response.get("message", {}).get("content", "")
658
  ai_result = {}
659
  try:
660
  ai_result = json.loads(raw)
@@ -1238,20 +1225,20 @@ def retrieve(query: str, k: int = 4, lambda_param: float = 0.6):
1238
  app.logger.error(f"Failed to use sentence-transformers: {e}")
1239
  # fallback to ollama if local model not available
1240
  try:
1241
- app.logger.info(f"Falling back to ollama.embeddings with model: {EMBED_MODEL}")
1242
- q_emb_resp = _retry_ollama_call(ollama.embeddings, model=EMBED_MODEL, prompt=query)
1243
- q_emb = np.array([q_emb_resp["embedding"]], dtype=np.float32)
1244
- app.logger.info("Successfully embedded query with ollama fallback")
1245
  except Exception as e2:
1246
- app.logger.error(f"Ollama fallback also failed: {e2}")
1247
  raise
1248
  else:
1249
- app.logger.info(f"Using ollama embeddings API with model: {SENT_EMBED_MODEL}")
1250
- # default: use ollama embeddings API
1251
  try:
1252
- q_emb_resp = _retry_ollama_call(ollama.embeddings, model=SENT_EMBED_MODEL, prompt=query)
1253
- q_emb = np.array([q_emb_resp["embedding"]], dtype=np.float32)
1254
- app.logger.info(f"Successfully embedded query with ollama, shape: {q_emb.shape}")
1255
  except Exception as e:
1256
  app.logger.error(f"Failed to embed query with {SENT_EMBED_MODEL}: {e}")
1257
  # Return empty results if embedding fails
@@ -1268,14 +1255,14 @@ def retrieve(query: str, k: int = 4, lambda_param: float = 0.6):
1268
  )
1269
  # Always use nomic-embed-text to match the stored chunks
1270
  try:
1271
- re_q = ollama.embeddings(model="nomic-embed-text", prompt=query)
1272
- q_emb2 = np.array([re_q["embedding"]], dtype=np.float32)
1273
  q_dim2 = int(q_emb2.shape[1])
1274
  if q_dim2 == doc_dim:
1275
  q_emb = q_emb2
1276
- app.logger.info("Re-embedded query with nomic-embed-text to match chunk dimensions")
1277
  else:
1278
- app.logger.error(f"nomic-embed-text still produces wrong dimension: {q_dim2} != {doc_dim}")
1279
  return []
1280
  except Exception as re_err:
1281
  app.logger.error(f"Re-embedding with nomic-embed-text failed: {re_err}")
@@ -1664,6 +1651,19 @@ Directives professionnelles:
1664
  - Maintenez un ton naturel et conversationnel en français
1665
  - Assurez des standards professionnels de soutien en santé mentale
1666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1667
  Rappelez-vous: Vous êtes un système professionnel de soutien en santé mentale conçu pour fournir une assistance immédiate et culturellement appropriée tout en connectant les utilisateurs aux soins professionnels quand nécessaire.""",
1668
 
1669
  'rw': """Uri AIMHSA, umufasha w'ubuzima bw'ubwoba bw'u Rwanda w'ubuhanga.
 
1
+ import os
2
+ import sys
3
+ import time
4
+ import uuid
5
+ import json
6
+ import re
7
+ import sqlite3
8
+ import numpy as np
9
+ import tempfile
10
+ import smtplib
11
+ import secrets
12
+ import math
13
+ import traceback
14
+ import ollama
15
+ import _time
16
  from flask import Flask, request, jsonify, send_from_directory, render_template, redirect
17
  from flask_cors import CORS
18
  # Replace direct ollama import with OpenAI client
19
  from openai import OpenAI
20
  from dotenv import load_dotenv
 
21
  from werkzeug.security import generate_password_hash, check_password_hash
 
 
 
 
 
 
 
 
 
22
  from translation_service import translation_service
23
  from sms_service import initialize_sms_service, get_sms_service
24
+
25
  from config import current_config
26
 
27
  # Initialize OpenAI client for Ollama
 
46
  pass
47
  else:
48
  raise last_err
 
 
 
 
 
49
 
50
  # --- Helper Functions ---
51
  def get_time_ago(timestamp):
 
474
  # determine owner_key if needed
475
  owner_key = get_owner_key_for_conv(conv_id)
476
  if row is None:
 
477
  conn.execute(
478
+ "INSERT OR IGNORE INTO conversations (conv_id, owner_key, preview, ts) VALUES (?, ?, ?, ?)",
479
  (conv_id, owner_key, snippet[:120], time.time()),
480
  )
481
  else:
482
+ conn.execute("UPDATE conversations SET preview = ?, ts = ? WHERE conv_id = ?", (snippet[:120], time.time(), conv_id))
 
 
 
 
 
 
 
 
 
 
 
483
  except Exception:
484
  # don't break saving messages if preview update fails
485
  pass
 
635
  Respond in JSON format: {{"risk_score": 0.0-1.0, "indicators": ["indicator1", "indicator2"]}}
636
  """
637
 
638
+ response = _retry_openai_call(openai_client.chat.completions.create, model=CHAT_MODEL, messages=[
639
  {"role": "system", "content": "You are a mental health risk assessment AI. Analyze conversations for risk indicators and provide structured JSON responses."},
640
  {"role": "user", "content": ai_prompt}
641
  ])
642
 
643
  # Parse AI response robustly (extract JSON if wrapper text present)
644
+ raw = response.choices[0].message.content if response.choices else ""
645
  ai_result = {}
646
  try:
647
  ai_result = json.loads(raw)
 
1225
  app.logger.error(f"Failed to use sentence-transformers: {e}")
1226
  # fallback to ollama if local model not available
1227
  try:
1228
+ app.logger.info(f"Falling back to openai embeddings with model: {EMBED_MODEL}")
1229
+ response = openai_client.embeddings.create(model=EMBED_MODEL, input=query)
1230
+ q_emb = np.array([response.data[0].embedding], dtype=np.float32)
1231
+ app.logger.info("Successfully embedded query with openai fallback")
1232
  except Exception as e2:
1233
+ app.logger.error(f"OpenAI fallback also failed: {e2}")
1234
  raise
1235
  else:
1236
+ app.logger.info(f"Using openai embeddings API with model: {SENT_EMBED_MODEL}")
1237
+ # default: use openai embeddings API
1238
  try:
1239
+ response = openai_client.embeddings.create(model=SENT_EMBED_MODEL, input=query)
1240
+ q_emb = np.array([response.data[0].embedding], dtype=np.float32)
1241
+ app.logger.info(f"Successfully embedded query with openai, shape: {q_emb.shape}")
1242
  except Exception as e:
1243
  app.logger.error(f"Failed to embed query with {SENT_EMBED_MODEL}: {e}")
1244
  # Return empty results if embedding fails
 
1255
  )
1256
  # Always use nomic-embed-text to match the stored chunks
1257
  try:
1258
+ response = openai_client.embeddings.create(model="nomic-embed-text", input=query)
1259
+ q_emb2 = np.array([response.data[0].embedding], dtype=np.float32)
1260
  q_dim2 = int(q_emb2.shape[1])
1261
  if q_dim2 == doc_dim:
1262
  q_emb = q_emb2
1263
+ app.logger.info(f"Successfully re-embedded with nomic-embed-text, shape: {q_emb.shape}")
1264
  else:
1265
+ app.logger.error(f"Even nomic-embed-text dimension {q_dim2} doesn't match chunk dim {doc_dim}")
1266
  return []
1267
  except Exception as re_err:
1268
  app.logger.error(f"Re-embedding with nomic-embed-text failed: {re_err}")
 
1651
  - Maintenez un ton naturel et conversationnel en français
1652
  - Assurez des standards professionnels de soutien en santé mentale
1653
 
1654
+ Rappelez-vous: Vous êtes un système professionnel de soutien en santé mentale conçu pour fournir une assistance immédiate et culturellement appropriée tout en connectant les utilisateurs aux soins professionnels quand nécessaire.""",
1655
+
1656
+ 'rw': """Uri AIMHSA, umufasha w'ubuzima bw'ubwoba bw'u Rwanda w'ubuhanga.
1657
+
1658
+ Amabwiriza y'ubuhanga:
1659
+ - Ube umuntu w'umutima mwiza, w'umutima mwiza, kandi w'umutima mwiza
1660
+ - Tanga amakuru yashyizweho ku bikoresho byo mu cyerekezo mugihe cyose
1661
+ - VUGURA BURI
1662
+ - Gardez les réponses professionnelles, concises mais utiles
1663
+ - Utilisez le contexte fourni pour donner des informations précises et pertinentes
1664
+ - Maintenez un ton naturel et conversationnel en français
1665
+ - Assurez des standards professionnels de soutien en santé mentale
1666
+
1667
  Rappelez-vous: Vous êtes un système professionnel de soutien en santé mentale conçu pour fournir une assistance immédiate et culturellement appropriée tout en connectant les utilisateurs aux soins professionnels quand nécessaire.""",
1668
 
1669
  'rw': """Uri AIMHSA, umufasha w'ubuzima bw'ubwoba bw'u Rwanda w'ubuhanga.