amasood's picture
Update app.py
11168d5 verified
import os
import streamlit as st
from groq import Groq
from PIL import Image
from transformers import TrOCRProcessor, TrOCRForConditionalGeneration
import pytesseract
# Load the TrOCR model
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
model = TrOCRForConditionalGeneration.from_pretrained("microsoft/trocr-base-handwritten")
# Set up Groq API client
client = Groq(
api_key=os.environ.get("GROQ_API_KEY"),
)
# Function to extract text from image using TrOCR
def extract_text_from_image(image):
image = image.convert("RGB")
text = pytesseract.image_to_string(image)
return text
# Function to analyze the extracted text using Groq API
def analyze_report(text):
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": text}],
model="llama-3.3-70b-versatile",
)
return chat_completion.choices[0].message.content
# Streamlit app UI
st.title("Medical Test Report Analyzer")
st.write("""
Upload a medical test report in JPG format to get an analysis. The application will extract the text from the image
and provide a detailed explanation of the test results, including abnormal findings and recommended actions.
""")
# File uploader
uploaded_file = st.file_uploader("Upload a JPG file", type=["jpg", "jpeg"])
if uploaded_file is not None:
# Display the uploaded image
st.image(uploaded_file, caption="Uploaded Report", use_column_width=True)
# Extract text from the image
image = Image.open(uploaded_file)
extracted_text = extract_text_from_image(image)
if extracted_text:
st.subheader("Extracted Text:")
st.text(extracted_text)
# Send the extracted text to the LLM for analysis
analysis = analyze_report(extracted_text)
st.subheader("Test Report Analysis:")
st.write(analysis)
# Chatbot interface for user queries
st.subheader("Ask Questions About the Test Report:")
if "messages" not in st.session_state:
st.session_state.messages = []
user_input = st.text_input("Your question:")
if user_input:
# Add user's message to session state
st.session_state.messages.append({"role": "user", "content": user_input})
# Get response from the model
chat_response = client.chat.completions.create(
messages=st.session_state.messages,
model="llama-3.3-70b-versatile",
)
response_text = chat_response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": response_text})
# Display the conversation
for message in st.session_state.messages:
role = message["role"]
content = message["content"]
if role == "user":
st.write(f"**You:** {content}")
else:
st.write(f"**Assistant:** {content}")