Harika22 commited on
Commit
d54d1b0
Β·
verified Β·
1 Parent(s): 792a566

Update pages/model.py

Browse files
Files changed (1) hide show
  1. pages/model.py +64 -36
pages/model.py CHANGED
@@ -18,62 +18,90 @@ st.set_page_config(page_title="🧠 MediAssist", layout="centered")
18
  st.markdown("<h1 style='text-align: center; color: #4A90E2;'>🧠 MediAssist</h1>", unsafe_allow_html=True)
19
  st.markdown("<h4 style='text-align: center;'>Upload a doctor's prescription and get detailed medicine analysis</h4><br>", unsafe_allow_html=True)
20
 
21
- uploaded_file = st.file_uploader("πŸ“€ Upload Prescription Image", type=["jpg", "jpeg", "png"])
22
 
23
  if uploaded_file:
24
-
25
  with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
26
  temp_file.write(uploaded_file.read())
27
- img_path = temp_file.name
28
 
29
- image = cv2.imread(img_path)
 
30
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
31
- _, binary = cv2.threshold(gray, 130, 255, cv2.THRESH_BINARY_INV)
32
- dilated = cv2.dilate(binary, np.ones((2, 2), np.uint8), iterations=1)
 
33
 
34
- reader = easyocr.Reader(['en'], gpu=False)
35
- extracted_text = "\n".join(reader.readtext(dilated, detail=0))
 
36
 
37
- st.image(dilated, caption="🧾 Preprocessed Image", use_column_width=True)
38
- st.markdown("### πŸ“œ Extracted Text from Image")
39
- st.code(extracted_text)
 
40
 
 
41
  template = """
42
- You're a medical assistant AI. Below is a doctor's handwritten prescription text:
 
 
43
 
44
  {prescription_text}
45
 
46
- Based on the text, please do the following:
47
- 1. Extract all medicine names (ignore other notes).
48
- 2. For each medicine, mention:
49
- - When to take it (morning/night, before/after food)
50
- - Dosage
51
  - Possible side effects
52
- - Any precautions or special instructions
53
- Return results in clear bullet points.
54
- """
55
 
 
 
56
  prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
57
 
58
- llm = ChatHuggingFace(
59
- llm=HuggingFaceEndpoint(
60
- repo_id="meta-llama/Llama-3.1-8B-Instruct",
61
- provider="nebius",
62
- temperature=0.5,
63
- max_new_tokens=500,
64
- task="conversational"
65
- )
66
  )
67
 
68
- chain = LLMChain(llm=llm, prompt=prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
- if st.button("πŸ” Analyze Prescription"):
71
- with st.spinner("Analyzing the prescription..."):
72
- response = chain.run(prescription_text=extracted_text)
73
- st.markdown("### πŸ’Š Medicine Summary")
74
- st.success(response)
75
 
76
- os.remove(img_path)
 
 
77
 
78
  else:
79
- st.info("Upload a prescription image to start the analysis.")
 
18
  st.markdown("<h1 style='text-align: center; color: #4A90E2;'>🧠 MediAssist</h1>", unsafe_allow_html=True)
19
  st.markdown("<h4 style='text-align: center;'>Upload a doctor's prescription and get detailed medicine analysis</h4><br>", unsafe_allow_html=True)
20
 
21
+ uploaded_file = st.file_uploader("πŸ“€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
22
 
23
  if uploaded_file:
24
+ # Save uploaded image temporarily
25
  with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
26
  temp_file.write(uploaded_file.read())
27
+ orig_path = temp_file.name
28
 
29
+ # Step 1: Preprocess the image
30
+ image = cv2.imread(orig_path)
31
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
32
+ _, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
33
+ kernel = np.ones((3, 3), np.uint8)
34
+ dilated = cv2.dilate(binary_inv, kernel, iterations=1)
35
 
36
+ # Save the dilated image temporarily for reference
37
+ dilated_path = orig_path.replace(".png", "_dilated.png")
38
+ cv2.imwrite(dilated_path, dilated)
39
 
40
+ # Step 2: OCR using EasyOCR
41
+ reader = easyocr.Reader(['en'])
42
+ text_list = reader.readtext(dilated, detail=0)
43
+ text = "\n".join(text_list)
44
 
45
+ # Step 3: Prompt for the LLM
46
  template = """
47
+ You are a helpful medical assistant.
48
+
49
+ Here is a prescription text extracted from an image:
50
 
51
  {prescription_text}
52
 
53
+ Please do the following:
54
+
55
+ 1. Extract only the medicine names mentioned in the prescription (ignore any other text).
56
+ 2. For each medicine, provide:
57
+ - When to take it (timing and dosage)
58
  - Possible side effects
59
+ - Any special instructions
 
 
60
 
61
+ Format your answer as bullet points, listing only medicines and their details.
62
+ """
63
  prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
64
 
65
+ # Step 4: Load LLM
66
+ llm_model = HuggingFaceEndpoint(
67
+ repo_id="meta-llama/Llama-3.1-8B-Instruct",
68
+ provider="nebius",
69
+ temperature=0.6,
70
+ max_new_tokens=300,
71
+ task="conversational"
 
72
  )
73
 
74
+ model = ChatHuggingFace(
75
+ llm=llm_model,
76
+ repo_id="meta-llama/Llama-3.1-8B-Instruct",
77
+ provider="nebius",
78
+ temperature=0.6,
79
+ max_new_tokens=300,
80
+ task="conversational"
81
+ )
82
+
83
+ chain = LLMChain(llm=model, prompt=prompt)
84
+
85
+ # Step 5: Layout for output
86
+ col1, col2 = st.columns([1, 2])
87
+
88
+ with col1:
89
+ st.image(dilated, caption="🧾 Preprocessed Prescription", channels="GRAY", use_container_width=True)
90
+
91
+ with col2:
92
+ st.success("βœ… Prescription Uploaded & Preprocessed Successfully")
93
+ st.markdown("### πŸ“œ Extracted Text")
94
+ st.code(text)
95
 
96
+ if st.button("πŸ” Analyze Text"):
97
+ with st.spinner("Analyzing with LLM..."):
98
+ response = chain.run(prescription_text=text)
99
+ st.markdown("### πŸ’‘ AI-Powered Summary")
100
+ st.success(response)
101
 
102
+ # Cleanup temp files
103
+ os.remove(orig_path)
104
+ os.remove(dilated_path)
105
 
106
  else:
107
+ st.markdown("<center><i>Upload a prescription image to begin analysis.</i></center>", unsafe_allow_html=True)