Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import cv2 | |
| import numpy as np | |
| import tempfile | |
| import os | |
| import easyocr | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chains import LLMChain | |
| from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace | |
| os.environ["HF_TOKEN"]=os.getenv('HF_Token') | |
| os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('HF_Token') | |
| st.set_page_config(page_title="π§ MediAssist", layout="centered") | |
| st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π§ MediAssist</h1>", unsafe_allow_html=True) | |
| st.markdown("<h4 style='text-align: center;'>Upload a doctor's prescription and get detailed medicine analysis</h4><br>", unsafe_allow_html=True) | |
| uploaded_file = st.file_uploader("π€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"]) | |
| if uploaded_file: | |
| # Save uploaded image temporarily | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file: | |
| temp_file.write(uploaded_file.read()) | |
| orig_path = temp_file.name | |
| # Step 1: Preprocess the image | |
| image = cv2.imread(orig_path) | |
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
| _, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV) | |
| kernel = np.ones((3, 3), np.uint8) | |
| dilated = cv2.dilate(binary_inv, kernel, iterations=1) | |
| # Save the dilated image temporarily for reference | |
| dilated_path = orig_path.replace(".png", "_dilated.png") | |
| cv2.imwrite(dilated_path, dilated) | |
| # Step 2: OCR using EasyOCR | |
| reader = easyocr.Reader(['en']) | |
| text_list = reader.readtext(dilated, detail=0) | |
| text = "\n".join(text_list) | |
| # Step 3: Prompt for the LLM | |
| template = """ | |
| You are a helpful medical assistant. | |
| Here is a prescription text extracted from an image: | |
| {prescription_text} | |
| Please do the following: | |
| 1. Extract only the medicine names mentioned in the prescription (ignore any other text). | |
| 2. For each medicine, provide: | |
| - When to take it (timing and dosage) | |
| - Possible side effects | |
| - Any special instructions | |
| Format your answer as bullet points, listing only medicines and their details. | |
| """ | |
| prompt = PromptTemplate(input_variables=["prescription_text"], template=template) | |
| # Step 4: Load LLM | |
| llm_model = HuggingFaceEndpoint( | |
| repo_id="meta-llama/Llama-3.1-8B-Instruct", | |
| provider="nebius", | |
| temperature=0.6, | |
| max_new_tokens=300, | |
| task="conversational" | |
| ) | |
| model = ChatHuggingFace( | |
| llm=llm_model, | |
| repo_id="meta-llama/Llama-3.1-8B-Instruct", | |
| provider="nebius", | |
| temperature=0.6, | |
| max_new_tokens=300, | |
| task="conversational" | |
| ) | |
| chain = LLMChain(llm=model, prompt=prompt) | |
| # Step 5: Layout for output | |
| col1, col2 = st.columns([1, 2]) | |
| with col1: | |
| st.image(dilated, caption="π§Ύ Preprocessed Prescription", channels="GRAY", use_container_width=True) | |
| with col2: | |
| st.success("β Prescription Uploaded & Preprocessed Successfully") | |
| st.markdown("### π Extracted Text") | |
| st.code(text) | |
| if st.button("π Analyze Text"): | |
| with st.spinner("Analyzing with LLM..."): | |
| response = chain.run(prescription_text=text) | |
| st.markdown("### π‘ AI-Powered Summary") | |
| st.success(response) | |
| # Cleanup temp files | |
| os.remove(orig_path) | |
| os.remove(dilated_path) | |
| else: | |
| st.markdown("<center><i>Upload a prescription image to begin analysis.</i></center>", unsafe_allow_html=True) |