NitinBot001 commited on
Commit
7ef7541
·
verified ·
1 Parent(s): eec5d1d

Upload 6 files

Browse files
Files changed (6) hide show
  1. .env +4 -0
  2. Dockerfile +30 -0
  3. disease.py +145 -0
  4. main.py +192 -0
  5. medicine.py +193 -0
  6. requirements.txt +5 -0
.env ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ GEMINI_API_KEY=AIzaSyAr-nyhXQ3-O4ZkfzomHP_7cRmrRoNyOXg
2
+ GOOGLE_API_KEY=AIzaSyAr-nyhXQ3-O4ZkfzomHP_7cRmrRoNyOXg
3
+ OPENAI_API_URL=https://api.groq.com/openai/v1
4
+ MODEL=openai/gpt-oss-20b
Dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12-slim
2
+
3
+ # Create non-root user
4
+ RUN useradd -m appuser
5
+
6
+ WORKDIR /app
7
+
8
+ # Install dependencies
9
+ COPY requirements.txt .
10
+ RUN pip install --no-cache-dir -r requirements.txt
11
+
12
+ # Copy project files
13
+ COPY . .
14
+
15
+ # Change ownership to non-root user
16
+ RUN chown -R appuser:appuser /app
17
+
18
+ # Switch to non-root user
19
+ USER appuser
20
+
21
+ # Expose only main app port
22
+ EXPOSE 5000
23
+
24
+ # Start all three apps
25
+ CMD ["sh", "-c", "\
26
+ gunicorn -b 0.0.0.0:5000 main:app & \
27
+ gunicorn -b 0.0.0.0:5001 disease:app & \
28
+ gunicorn -b 0.0.0.0:5002 medicine:app && \
29
+ wait \
30
+ "]
disease.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from flask import Flask, request, jsonify
4
+ import google.generativeai as genai
5
+ import dotenv
6
+
7
+ dotenv.load_dotenv()
8
+
9
+ # --- Configuration ---
10
+ FACT_SHEET_DIR = "Text_Files"
11
+
12
+ # --- System Instruction for the Gemini Model ---
13
+ # This instruction guides the model's behavior, ensuring it stays on task
14
+ # and uses only the tools and information we provide.
15
+ SYSTEM_INSTRUCTION = """
16
+ You are a helpful Health Fact Sheet Assistant. Your role is to answer questions
17
+ about specific diseases based ONLY on the information contained in the fact sheets
18
+ provided to you through the get_disease_fact_sheet tool.
19
+ Generate response in same language as user query.
20
+
21
+ Follow these rules strictly:
22
+ 1. Use the fact sheet for answers whenever possible.
23
+ 2. To get information, call the get_disease_fact_sheet function.
24
+ 3. If the fact sheet doesn't cover the answer, reply using general knowledge and include a disclaimer.
25
+ 4. First, check the user's query to see which disease it refers to, then fetch that fact sheet.
26
+ 5. If the query isn't about a specific disease, reply using general knowledge with a disclaimer.
27
+ 6. Keep responses clear, short, and simple. Don't mention the source of the information.
28
+ """
29
+
30
+ # Configure the Google Generative AI SDK.
31
+ try:
32
+ # This will automatically look for the GOOGLE_API_KEY environment variable.
33
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
34
+
35
+ # Initialize the Gemini model with the system instruction.
36
+ # We recommend using a model that is highly optimized for tool use.
37
+ model = genai.GenerativeModel(
38
+ 'gemini-1.5-flash',
39
+ system_instruction=SYSTEM_INSTRUCTION
40
+ )
41
+ except Exception as e:
42
+ print(f"Error configuring Google Generative AI: {e}")
43
+ print("Please make sure your GOOGLE_API_KEY environment variable is set.")
44
+ model = None
45
+
46
+ # --- Flask App Initialization ---
47
+ app = Flask(__name__)
48
+
49
+
50
+ # --- Helper Functions (Tool Implementation) ---
51
+
52
+ def get_available_diseases():
53
+ """Scans the directory for available disease fact sheets."""
54
+ if not os.path.isdir(FACT_SHEET_DIR):
55
+ return []
56
+ # Create a clean list of names from filenames (e.g., "Chickenpox_and_Shingles.txt" -> "Chickenpox and Shingles")
57
+ return [os.path.splitext(f)[0].replace('_', ' ') for f in os.listdir(FACT_SHEET_DIR) if f.endswith(".txt")]
58
+
59
+ def get_disease_fact_sheet(disease_name: str):
60
+ """
61
+ This is the actual Python function that gets executed by the model.
62
+ It reads the content of a specific disease's text file from the local directory.
63
+ """
64
+ print(f"--- TOOL EXECUTION: Running get_disease_fact_sheet(disease_name='{disease_name}') ---")
65
+ # Convert the friendly name back to a filename format
66
+ filename = disease_name.replace(' ', '_') + ".txt"
67
+ filepath = os.path.join(FACT_SHEET_DIR, filename)
68
+
69
+ if os.path.exists(filepath):
70
+ with open(filepath, 'r', encoding='utf-8') as f:
71
+ content = f.read()
72
+ print(f"--- SUCCESS: Found and read '{filename}' ---")
73
+ # Return a dictionary, which will be implicitly handled by the Gemini SDK
74
+ return {"disease": disease_name, "content": content}
75
+ else:
76
+ print(f"--- ERROR: Fact sheet not found for '{disease_name}' ---")
77
+ return {"error": f"Fact sheet not found for the disease: {disease_name}."}
78
+
79
+
80
+ # --- Main API Endpoint ---
81
+
82
+ @app.route('/ask', methods=['POST'])
83
+ def ask_question():
84
+ """
85
+ Handles user queries by orchestrating the interaction with the Gemini model,
86
+ which is guided by the system instruction to use the provided tools and context.
87
+ """
88
+ if not model:
89
+ return jsonify({"error": "Gemini client is not configured. Check your API key."}), 500
90
+
91
+ data = request.get_json()
92
+ if not data or 'query' not in data:
93
+ return jsonify({"error": "Request must be JSON and contain a 'query' field."}), 400
94
+
95
+ user_query = data['query']
96
+ print(f"\n=================================================")
97
+ print(f"Received new query: '{user_query}'")
98
+ print(f"=================================================")
99
+
100
+
101
+ available_diseases = get_available_diseases()
102
+ if not available_diseases:
103
+ return jsonify({"error": f"No fact sheets found in the '{FACT_SHEET_DIR}' directory."}), 500
104
+
105
+ # === Orchestration with Gemini ===
106
+ try:
107
+ # We start a chat session. The model will automatically handle calling the
108
+ # function and using its output to generate a final answer, thanks to
109
+ # automatic function calling and the system instruction.
110
+ chat = model.start_chat(enable_automatic_function_calling=True)
111
+
112
+ # Construct a more informative prompt for the model.
113
+ prompt = f"""
114
+ Here is the user's question: '{user_query}'
115
+
116
+ Please use your tools to answer it. The available diseases you can look up are:
117
+ {', '.join(available_diseases)}
118
+ """
119
+
120
+ print("--- Sending request to Gemini... ---")
121
+ # Send the user's query and the definitions of the available tools
122
+ response = chat.send_message(
123
+ prompt,
124
+ tools=[get_disease_fact_sheet] # Pass the actual function reference
125
+ )
126
+
127
+ final_answer = response.text
128
+ print(f"--- Gemini's Final Answer: ---\n{final_answer}\n")
129
+ return jsonify({"response": final_answer})
130
+
131
+ except Exception as e:
132
+ print(f"--- An unexpected error occurred: {e} ---")
133
+ return jsonify({"error": f"Gemini API Error: {e}"}), 500
134
+
135
+
136
+ # --- To run the app ---
137
+ if __name__ == '__main__':
138
+ # Make sure the Text_Files directory exists before starting
139
+ if not os.path.isdir(FACT_SHEET_DIR):
140
+ print(f"CRITICAL ERROR: The directory '{FACT_SHEET_DIR}' does not exist.")
141
+ print("Please create it and populate it with the disease .txt files.")
142
+ else:
143
+ print("Starting Flask server...")
144
+ print(f"Fact sheets loaded for: {', '.join(get_available_diseases())}")
145
+ app.run(debug=True, port=5001)
main.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ import json
4
+ import requests
5
+ from flask import Flask, request, jsonify
6
+ from werkzeug.utils import secure_filename
7
+ import google.generativeai as genai
8
+ from dotenv import load_dotenv
9
+ from flask_cors import CORS
10
+
11
+
12
+ # Step 1: API Key aur Environment Setup
13
+ load_dotenv()
14
+ try:
15
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
16
+ except TypeError:
17
+ print("ERROR: Google API Key nahi mila.")
18
+ print("Ek .env file banayein aur usmein 'GOOGLE_API_KEY=your_key_here' likhein.")
19
+ exit()
20
+
21
+ app = Flask(__name__)
22
+ CORS(app)
23
+
24
+ # Step 2: API Endpoints aur Session Storage
25
+ API_ENDPOINTS = {
26
+ "skin_disease": "https://your-api-domain.com/skin-disease-detection",
27
+ "medicine_info": "http://localhost:5002/api/query",
28
+ "report_reading": "https://your-api-domain.com/report-reading",
29
+ "disease_query": "http://localhost:5001/ask"
30
+ }
31
+ SESSIONS = {}
32
+
33
+ # Step 3: Gemini se Query Classify karne ka Function
34
+ def classify_query_with_gemini(query: str):
35
+ """User ki query ko Gemini API ka istemal karke classify karta hai."""
36
+ model = genai.GenerativeModel('gemini-2.5-flash-lite')
37
+
38
+ # *** PROMPT HAS BEEN IMPROVED ***
39
+ prompt = f"""
40
+ Analyze the user's medical query and classify it into one of the following categories:
41
+ - skin_disease: For queries about skin conditions, rashes, moles, spots, or any visible symptoms on the skin.
42
+ - medicine_info: For query about medicine(like how to use it, side effects, etc.) and also questions about a specific Medicine shown in attached image (optional) .
43
+ - report_reading: For queries asking to interpret or explain a medical report, lab test, or blood work from an image.
44
+ - disease_query: For general questions about diseases, symptoms, causes, or treatments.
45
+
46
+ Based on the classification, determine if an image is essential to answer the query accurately.
47
+ Generate response in English only.
48
+
49
+ The user query is:
50
+ ---START OF QUERY---
51
+ {query}
52
+ ---END OF QUERY---
53
+
54
+ Provide the output ONLY in a valid JSON format with two keys: "category" (string) and "image_required" (boolean).
55
+
56
+ Example 1:
57
+ Query: "what are the symptoms of typhoid"
58
+ Output: {{"category": "disease_query", "image_required": false}}
59
+
60
+ Example 2:
61
+ Query: "I have a red circular rash on my arm, what is it?"
62
+ Output: {{"category": "skin_disease", "image_required": true}}
63
+
64
+ Example 3:
65
+ Query: "Can you tell me what this lab report says?"
66
+ Output: {{"category": "report_reading", "image_required": true}}
67
+
68
+ Example 4:
69
+ Query: "What is this white pill with 'IP 204' written on it?"
70
+ Output: {{"category": "medicine_info", "image_required": true}}
71
+ """
72
+
73
+ try:
74
+ response = model.generate_content(prompt)
75
+ cleaned_text = response.text.strip().replace('```json', '').replace('```', '')
76
+ result = json.loads(cleaned_text)
77
+ return result
78
+ except Exception as e:
79
+ print(f"Gemini API call ya JSON parsing mein error: {e}")
80
+ return None
81
+
82
+ # Step 4: API Routes (Endpoints)
83
+ @app.route('/start_session', methods=['POST'])
84
+ def start_session():
85
+ session_id = str(uuid.uuid4())
86
+ SESSIONS[session_id] = {"status": "started"}
87
+ print(f"Session started: {session_id}")
88
+ return jsonify({"session_id": session_id}), 200
89
+
90
+ @app.route('/process_query', methods=['POST'])
91
+ def process_query():
92
+ data = request.get_json()
93
+ session_id = data.get('session_id')
94
+ query = data.get('query')
95
+
96
+ if not session_id or session_id not in SESSIONS:
97
+ return jsonify({"error": "Invalid or missing session_id"}), 400
98
+ if not query:
99
+ return jsonify({"error": "Query is required"}), 400
100
+
101
+ print(f"Session {session_id}: Query received: '{query}'")
102
+ classification = classify_query_with_gemini(query)
103
+
104
+ if not classification:
105
+ return jsonify({"error": "Could not classify the query."}), 500
106
+
107
+ SESSIONS[session_id]['classification'] = classification
108
+ SESSIONS[session_id]['query'] = query
109
+
110
+ if classification.get('image_required'):
111
+ print(f"Session {session_id}: Image required for category '{classification.get('category')}'")
112
+ return jsonify({
113
+ "status": "image_required",
114
+ "message": "Please send the request to /process_with_image with the required photo."
115
+ }), 200
116
+ else:
117
+ print(f"Session {session_id}: No image required. Forwarding to '{classification.get('category')}' API.")
118
+ # Asli API ko call karein (Abhi ke liye mock response)
119
+ # Session se query aur classification nikalein
120
+ query = SESSIONS[session_id].get('query')
121
+ classification = SESSIONS[session_id].get('classification')
122
+ category = classification['category']
123
+ endpoint_url = API_ENDPOINTS.get(category)
124
+ response = requests.post(endpoint_url, json={"query": query}) or requests.post(endpoint_url, data={"query": query}) or requests.post(endpoint_url, files={"query": query}) or requests.post(endpoint_url, payload={"query": query})
125
+ del SESSIONS[session_id]
126
+ print(f"Session {session_id} closed.")
127
+ return jsonify({
128
+ "status": "success",
129
+ "response": response.json(),
130
+ "data": f"Information about '{query}': This is a tuned response from the {classification.get('category')} service."
131
+ })
132
+
133
+ @app.route('/process_with_image', methods=['POST'])
134
+ def process_with_image():
135
+ session_id = request.form.get('session_id')
136
+
137
+ if not session_id or session_id not in SESSIONS:
138
+ return jsonify({"error": "Invalid or missing session_id"}), 400
139
+
140
+ if 'photo' not in request.files:
141
+ return jsonify({"error": "No photo file found in the request"}), 400
142
+
143
+ file = request.files['photo']
144
+ if file.filename == '':
145
+ return jsonify({"error": "No selected file"}), 400
146
+
147
+ # Session se query aur classification nikalein
148
+ query = SESSIONS[session_id].get('query')
149
+ classification = SESSIONS[session_id].get('classification')
150
+ category = classification['category']
151
+ endpoint_url = API_ENDPOINTS.get(category)
152
+
153
+ print(f"Session {session_id}: Image received. Preparing to forward to '{category}' API.")
154
+
155
+ # *** NEW: FORWARDING LOGIC THAT MATCHES YOUR CURL COMMAND ***
156
+ # The file object from Flask needs its stream to be readable by `requests`
157
+ # We pass the file stream, filename, and mimetype to requests
158
+ # The dictionary key 'file' matches the '-F file=@...' part of your curl command
159
+ files_payload = {'file': (file.filename, file.stream, file.mimetype)}
160
+
161
+ # The dictionary key 'query' matches the '-F query=...' part
162
+ data_payload = {'query': query}
163
+
164
+ try:
165
+ # NOTE: Neeche di gayi line asli API call hai.
166
+ # Jab aapka backend service (e.g., http://localhost:5002/api/query) taiyaar ho,
167
+ # to is line ko uncomment kar dein.
168
+
169
+ response_from_service = requests.post(endpoint_url, files=files_payload, data=data_payload)
170
+ response_from_service.raise_for_status() # Agar 4xx/5xx error ho to exception raise karega
171
+ tuned_response = response_from_service.json() # Assume service returns JSON
172
+
173
+ # Abhi ke liye, hum ek mock response bhej rahe hain
174
+ mock_response = {
175
+ "status": "success",
176
+ "response": tuned_response,
177
+ "data": f"Analysis for '{query}' based on your image: This is a tuned MOCK response from the {category} service."
178
+ }
179
+
180
+ # Session close karein
181
+ del SESSIONS[session_id]
182
+ print(f"Session {session_id} closed.")
183
+
184
+ return jsonify(mock_response)
185
+
186
+ except requests.exceptions.RequestException as e:
187
+ del SESSIONS[session_id]
188
+ print(f"Session {session_id} closed after failed API call.")
189
+ return jsonify({"status": "error", "message": f"Backend service call failed: {e}"}), 503
190
+
191
+ if __name__ == '__main__':
192
+ app.run(debug=True, port=5000)
medicine.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ from flask import Flask, request, jsonify
4
+ from PIL import Image
5
+ from dotenv import load_dotenv
6
+ import google.generativeai as genai
7
+ import json
8
+
9
+ # --- INITIAL SETUP ---
10
+
11
+ # Load environment variables from the .env file
12
+ load_dotenv()
13
+
14
+ # Configure the Gemini API with your key
15
+ api_key = os.getenv("GOOGLE_API_KEY")
16
+ if not api_key:
17
+ raise ValueError("GOOGLE_API_KEY not found. Please set it in your .env file.")
18
+ genai.configure(api_key=api_key)
19
+
20
+ # Initialize the Flask application
21
+ app = Flask(__name__)
22
+
23
+ # --- CONFIGURATION ---
24
+ TEXT_FILES_DIR = "Text_Files"
25
+ # Allowed file extensions for image uploads
26
+ ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
27
+
28
+ # Get a list of available knowledge base files
29
+ try:
30
+ AVAILABLE_FILES = [f for f in os.listdir(TEXT_FILES_DIR) if f.endswith('.txt')]
31
+ if not AVAILABLE_FILES:
32
+ raise FileNotFoundError("No .txt files found in the 'Text_Files' directory.")
33
+ except FileNotFoundError:
34
+ print("Warning: 'Text_Files' directory not found. The API will not have a knowledge base.")
35
+ AVAILABLE_FILES = []
36
+
37
+ # --- HELPER FUNCTIONS ---
38
+
39
+ def allowed_file(filename):
40
+ return '.' in filename and \
41
+ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
42
+
43
+ def find_relevant_file(topic: str) -> str | None:
44
+ """
45
+ Uses Gemini to determine the most relevant file for a given topic.
46
+ This is more robust than simple keyword matching.
47
+ """
48
+ if not AVAILABLE_FILES:
49
+ return None
50
+
51
+ try:
52
+ model = genai.GenerativeModel('gemini-2.5-flash-lite')
53
+ prompt = f"""
54
+ From the following list of files, which one is the most relevant for a query about "{topic}"?
55
+ Respond with only the single, most relevant filename dont include any other text.
56
+
57
+ File List:
58
+ {', '.join(AVAILABLE_FILES)}
59
+ """
60
+ response = model.generate_content(prompt)
61
+ # Clean up the response to get just the filename
62
+ filename = response.text.strip().replace("`", "")
63
+
64
+ if filename in AVAILABLE_FILES:
65
+ print(f"Gemini identified relevant file: {filename} for topic: {topic}")
66
+ return filename
67
+ else:
68
+ print(f"Warning: Gemini suggested a file that doesn't exist: {filename}")
69
+ return None
70
+ except Exception as e:
71
+ print(f"Error in find_relevant_file: {e}")
72
+ return None
73
+
74
+ def get_context_from_file(filename: str) -> str | None:
75
+ """Reads and returns the content of a specified text file."""
76
+ filepath = os.path.join(TEXT_FILES_DIR, filename)
77
+ try:
78
+ with open(filepath, 'r', encoding='utf-8') as f:
79
+ return f.read()
80
+ except FileNotFoundError:
81
+ return None
82
+
83
+ # --- CORE API LOGIC ---
84
+
85
+ @app.route('/api/query', methods=['POST'])
86
+ def handle_query():
87
+ """
88
+ Main API endpoint to handle user queries.
89
+ Accepts form data with 'query' (required) and 'file' (optional image upload).
90
+ """
91
+ # 1. Get and validate the request data
92
+ form_data = request.form
93
+ if not form_data or 'query' not in form_data:
94
+ return jsonify({"error": "Missing 'query' in request"}), 400
95
+
96
+ user_query = form_data.get('query')
97
+ medicine_topic = None
98
+
99
+ # 2. Handle File Upload (if provided)
100
+ if 'file' in request.files:
101
+ file = request.files['file']
102
+ if file.filename == '':
103
+ return jsonify({"error": "No selected file"}), 400
104
+
105
+ if file and allowed_file(file.filename):
106
+ try:
107
+ print("Image file received. Identifying medicine from image...")
108
+ # Read the uploaded file directly
109
+ img = Image.open(file.stream)
110
+
111
+ # Use the vision model to identify the medicine
112
+ vision_model = genai.GenerativeModel('gemini-2.5-flash')
113
+ prompt = ["""Identify the specific formula or Rx or medicine name or primary subject from this image.""", img]
114
+ response = vision_model.generate_content(prompt)
115
+
116
+ medicine_topic = response.text.strip()
117
+ print(f"Medicine identified from image: {medicine_topic}")
118
+
119
+ except Exception as e:
120
+ print(f"Error processing image: {e}")
121
+ return jsonify({"error": "Failed to process the uploaded image."}), 500
122
+ else:
123
+ return jsonify({"error": f"Invalid file type. Allowed types: {', '.join(ALLOWED_EXTENSIONS)}"}), 400
124
+
125
+ # 3. Handle Text-Only Input (or use the topic identified from the image)
126
+ if not medicine_topic:
127
+ print("No image provided. Identifying topic from text query...")
128
+ try:
129
+ model = genai.GenerativeModel('gemini-2.5-flash')
130
+ prompt = f"""
131
+ From the user query '{user_query}', identify the main medicine or medical topic.
132
+ Respond with only the name of the topic or medicine (e.g., 'Ibuprofen', 'Antacids', 'Cough Suppressants').
133
+
134
+ """
135
+ response = model.generate_content(prompt)
136
+ medicine_topic = response.text.strip()
137
+ print(f"Topic identified from query: {medicine_topic}")
138
+ except Exception as e:
139
+ print(f"Error identifying topic from query: {e}")
140
+ return jsonify({"error": "Failed to understand the query topic."}), 500
141
+
142
+ # 4. Find the Relevant Knowledge Base File
143
+ relevant_filename = find_relevant_file(medicine_topic)
144
+ if not relevant_filename:
145
+ return jsonify({"error": f"Could not find a relevant information file for '{medicine_topic}'."}), 404
146
+
147
+ # 5. Get the Context from the File
148
+ context = get_context_from_file(relevant_filename)
149
+ if not context:
150
+ return jsonify({"error": "Failed to read the content of the relevant file."}), 500
151
+
152
+ # 6. Generate the Final Response Using the Context
153
+ try:
154
+ model = genai.GenerativeModel('gemini-2.5-flash-lite')
155
+ final_prompt = f"""
156
+ You are a helpful medical information assistant.
157
+ Your task is to answer the user's question based ONLY on the provided context from the guide.
158
+ Generate response in same language as user query.
159
+ If there have no information about any medicine then prepare response using given context and your knowlage base make sure there have satisfied answer.
160
+ if there have any relevent medicine of provided medicine in context then prepare answer using that context.
161
+ Answer should be in simple language and short not more than 200 words.
162
+ If the answer cannot be found in the provided context, then you have to prepare response using your knowlage base make sure there have satisfied answer.
163
+ ---important---
164
+ Dont tell user to i have no information about that medicine. inplace of that prepare answer using given context and your knowlage base make sure there have satisfied answer.
165
+ user is also provide the medicine name and description of the medicine.
166
+ name:{medicine_topic}
167
+ ---important---
168
+
169
+ --- CONTEXT FROM THE GUIDE ---
170
+ {context}
171
+ --- END OF CONTEXT ---
172
+
173
+ USER'S QUESTION: {user_query}
174
+
175
+ YOUR ANSWER:
176
+ """
177
+
178
+ final_response = model.generate_content(final_prompt)
179
+
180
+ # 7. Return the final, context-aware response
181
+ return jsonify({
182
+ "response": final_response.text.strip(),
183
+ "identified_topic": medicine_topic,
184
+ "source_file": relevant_filename
185
+ })
186
+
187
+ except Exception as e:
188
+ print(f"Error generating final response: {e}")
189
+ return jsonify({"error": "An error occurred while generating the response."}), 500
190
+
191
+ if __name__ == '__main__':
192
+ # Runs the Flask server
193
+ app.run(host='0.0.0.0', port=5002, debug=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ flask
2
+ flask-cors
3
+ google-generativeai
4
+ python-dotenv
5
+ waitress