| import os |
| import io |
| import torch |
| import torch.nn as nn |
| from torchvision import models as torchvision_models |
| from torchvision import transforms |
| from PIL import Image |
| from flask import Flask, request, jsonify |
| from huggingface_hub import hf_hub_download |
| import os |
| import json |
| from groq import Groq |
| import logging |
| from telegram import Update |
| from telegram.ext import ( |
| ApplicationBuilder, |
| ContextTypes, |
| CommandHandler, |
| MessageHandler, |
| filters |
| ) |
| from google import genai |
| from google.genai import types |
|
|
|
|
| API_KEY_1 = os.getenv("API_KEY_1") |
| API_KEY_2 = os.getenv("API_KEY_2") |
| TASK = os.getenv("TASK") |
| client = genai.Client(api_key=API_KEY_2) |
| SYSTEM = os.getenv("SYSTEM") |
|
|
| |
| logging.basicConfig(level=logging.INFO) |
| LANGUAGE_DETECTION_PROMPT = os.getenv("Language") |
|
|
| |
|
|
| def extract_json_from_response(response): |
| message_body = response.text |
| if "```" in message_body: |
| start = message_body.find("```") |
| end = message_body.find("```", start + 3) |
| if end != -1: |
| code_block = message_body[start + 3:end].strip() |
| if code_block.startswith("json"): |
| code_block = code_block[4:].strip() |
| message_body = code_block |
| else: |
| message_body = message_body[start + 3:].strip() |
|
|
| try: |
| parsed_json = json.loads(message_body) |
| logging.info("Parsed JSON: %s", parsed_json) |
| return parsed_json |
| except json.JSONDecodeError as e: |
| logging.error("Failed to parse JSON: %s", e) |
| return {} |
|
|
| def detect_language(text): |
| try: |
| response = client.models.generate_content( |
| model="gemini-2.0-flash", |
| config=types.GenerateContentConfig(system_instruction=LANGUAGE_DETECTION_PROMPT), |
| contents=[{"role": "user", "parts": [{"text": text}]}] |
| ) |
| detected_json = extract_json_from_response(response) |
| return detected_json.get("input_lang", "English") |
| except Exception as e: |
| logging.error(f"Language detection failed: {e}") |
| return "English" |
|
|
| def translate(content, target_language): |
| try: |
| if not content: |
| return {"translation": ""} |
| response = client.models.generate_content( |
| model="gemini-2.0-flash", |
| config=types.GenerateContentConfig(system_instruction=f"Translate the following content into {target_language}. Respond with plain text only."), |
| contents=[{"role": "user", "parts": [{"text": content}]}] |
| ) |
| return extract_json_from_response(response) or {"translation": response.text.strip()} |
| except Exception as e: |
| logging.error(f"Translation failed: {e}") |
| return {"translation": content} |
|
|
| |
|
|
| def gen_message(data_dict, language="English"): |
| if not data_dict.get("symptoms") or not data_dict.get("diseases"): |
| template = f""" |
| Hi there, I'm Med-AI-Care, your virtual health assistant. |
| |
| I understood your message as: |
| "{data_dict.get('translation')}" |
| |
| Thank you for reaching out. I couldn't detect specific symptoms or conditions. It's okay to feel unsure — I'm still here to support you. |
| |
| My Analysis: |
| {data_dict.get('analysis')} |
| |
| Recommendation: |
| {data_dict.get('recommendation')} |
| |
| Please consult a licensed medical professional for any health concerns. |
| |
| Take care! |
| """ |
| else: |
| symptoms = ', '.join(data_dict.get("symptoms")).capitalize() |
| diseases = '\n'.join([f"- {disease}: {likelihood}" for disease, likelihood in data_dict.get("diseases").items()]) |
| template = f""" |
| Hi there, I'm Med-AI-Care, your virtual health assistant. |
| |
| You said: |
| "{data_dict.get('translation')}" |
| |
| Symptoms detected: |
| {symptoms} |
| |
| Possible conditions: |
| {diseases} |
| |
| My Analysis: |
| {data_dict.get('analysis')} |
| |
| Recommendation: |
| {data_dict.get('recommendation')} |
| |
| I'm not a doctor — please follow up with a healthcare professional. |
| |
| Thanks for reaching out! |
| """ |
| if language != "English": |
| translated = translate(template, language) |
| return translated.get("translation", template) |
| return template |
|
|
| |
|
|
| def chat(cmd): |
| language = detect_language(cmd) |
|
|
| client_instance = Groq(api_key=API_KEY_1) |
| completion = client_instance.chat.completions.create( |
| model="meta-llama/llama-4-scout-17b-16e-instruct", |
| messages=[ |
| {"role": "system", "content": TASK}, |
| {"role": "user", "content": cmd} |
| ], |
| temperature=1, |
| max_completion_tokens=1024, |
| top_p=1, |
| stream=True, |
| ) |
|
|
| output = [] |
| for chunk in completion: |
| output.append(chunk.choices[0].delta.content or "") |
|
|
| |
| result = "" |
| in_block = False |
| for line in output: |
| if line.strip() == "```": |
| in_block = not in_block |
| continue |
| if in_block and line.strip().lower() != "json": |
| result += line |
|
|
| try: |
| result_json = json.loads(result) |
| data_dict = result_json if language == "English" else translate(result, language) |
| message_body = gen_message(data_dict, language) |
| except json.JSONDecodeError as e: |
| logging.error("Final JSON parsing failed: %s", e) |
| data_dict = {} |
| message_body = "Sorry, I had trouble understanding your input." |
|
|
| return output, data_dict, message_body |
|
|
| bot_token = os.getenv("BOT") |
|
|
| |
| async def start(update: Update, context: ContextTypes.DEFAULT_TYPE): |
| welcome_message = """ |
| Hello! 👋 Welcome to Med-AI-Care, your trusted virtual health assistant. |
| |
| Our mission is to provide you with intelligent, personalized, and data-driven insights to help you better understand your health. By analyzing the symptoms you share, we aim to offer thoughtful analysis and recommend possible next steps tailored just for you. |
| |
| Whenever you're ready, just tell me about how you're feeling, and we'll start your health assessment together. |
| |
| Remember, while I’m here to guide you, this is an AI-powered assistant and not a substitute for professional medical advice. For urgent or serious concerns, please consult a healthcare professional. |
| |
| Let’s take this step toward better health—together! 💙 |
| """ |
|
|
|
|
| await context.bot.send_message(chat_id=update.effective_chat.id, text = welcome_message) |
|
|
|
|
| |
| async def query(update: Update, context: ContextTypes.DEFAULT_TYPE): |
| message = update.message.text |
| output,data,message_body = chat(message) |
| await context.bot.send_message(chat_id=update.effective_chat.id, text=message_body) |
|
|
| async def error_handler(update: object, context: ContextTypes.DEFAULT_TYPE) -> None: |
| |
| print(f"Exception while handling an update: {context.error}") |
| |
| |
| if isinstance(update, Update) and update.effective_chat: |
| await context.bot.send_message(chat_id=update.effective_chat.id, |
| text="Server Busy. Please Try Again Later.") |
|
|
|
|
| |
| app = Flask(__name__) |
|
|
| @app.route("/", methods=["GET"]) |
| def home(): |
|
|
| return "ConvNeXt Skin Disease Prediction API. Use POST to /predict with an image file." |
|
|
|
|
| application = ApplicationBuilder().token(bot_token).build() |
|
|
| application.add_handler(CommandHandler('start', start)) |
| application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), query)) |
| application.add_error_handler(error_handler) |
| application.run_polling() |
|
|
|
|