Spaces:
Sleeping
Sleeping
File size: 3,530 Bytes
987110a 792a566 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 987110a d54d1b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import streamlit as st
import cv2
import numpy as np
import tempfile
import os
import easyocr
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
os.environ["HF_TOKEN"]=os.getenv('HF_Token')
os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('HF_Token')
st.set_page_config(page_title="π§ MediAssist", layout="centered")
st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π§ MediAssist</h1>", unsafe_allow_html=True)
st.markdown("<h4 style='text-align: center;'>Upload a doctor's prescription and get detailed medicine analysis</h4><br>", unsafe_allow_html=True)
uploaded_file = st.file_uploader("π€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
if uploaded_file:
# Save uploaded image temporarily
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
temp_file.write(uploaded_file.read())
orig_path = temp_file.name
# Step 1: Preprocess the image
image = cv2.imread(orig_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
kernel = np.ones((3, 3), np.uint8)
dilated = cv2.dilate(binary_inv, kernel, iterations=1)
# Save the dilated image temporarily for reference
dilated_path = orig_path.replace(".png", "_dilated.png")
cv2.imwrite(dilated_path, dilated)
# Step 2: OCR using EasyOCR
reader = easyocr.Reader(['en'])
text_list = reader.readtext(dilated, detail=0)
text = "\n".join(text_list)
# Step 3: Prompt for the LLM
template = """
You are a helpful medical assistant.
Here is a prescription text extracted from an image:
{prescription_text}
Please do the following:
1. Extract only the medicine names mentioned in the prescription (ignore any other text).
2. For each medicine, provide:
- When to take it (timing and dosage)
- Possible side effects
- Any special instructions
Format your answer as bullet points, listing only medicines and their details.
"""
prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
# Step 4: Load LLM
llm_model = HuggingFaceEndpoint(
repo_id="meta-llama/Llama-3.1-8B-Instruct",
provider="nebius",
temperature=0.6,
max_new_tokens=300,
task="conversational"
)
model = ChatHuggingFace(
llm=llm_model,
repo_id="meta-llama/Llama-3.1-8B-Instruct",
provider="nebius",
temperature=0.6,
max_new_tokens=300,
task="conversational"
)
chain = LLMChain(llm=model, prompt=prompt)
# Step 5: Layout for output
col1, col2 = st.columns([1, 2])
with col1:
st.image(dilated, caption="π§Ύ Preprocessed Prescription", channels="GRAY", use_container_width=True)
with col2:
st.success("β
Prescription Uploaded & Preprocessed Successfully")
st.markdown("### π Extracted Text")
st.code(text)
if st.button("π Analyze Text"):
with st.spinner("Analyzing with LLM..."):
response = chain.run(prescription_text=text)
st.markdown("### π‘ AI-Powered Summary")
st.success(response)
# Cleanup temp files
os.remove(orig_path)
os.remove(dilated_path)
else:
st.markdown("<center><i>Upload a prescription image to begin analysis.</i></center>", unsafe_allow_html=True) |