Spaces:
Runtime error
Runtime error
File size: 3,104 Bytes
ab94c17 d03169e 3848a53 51a499f 11168d5 51a499f ab94c17 51a499f ab94c17 51a499f d03169e 51a499f d03169e 51a499f ab94c17 51a499f ab94c17 51a499f d03169e 51a499f d03169e 51a499f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 | import os
import streamlit as st
from groq import Groq
from PIL import Image
from transformers import TrOCRProcessor, TrOCRForConditionalGeneration
import pytesseract
# Load the TrOCR model
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
model = TrOCRForConditionalGeneration.from_pretrained("microsoft/trocr-base-handwritten")
# Set up Groq API client
client = Groq(
api_key=os.environ.get("GROQ_API_KEY"),
)
# Function to extract text from image using TrOCR
def extract_text_from_image(image):
image = image.convert("RGB")
text = pytesseract.image_to_string(image)
return text
# Function to analyze the extracted text using Groq API
def analyze_report(text):
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": text}],
model="llama-3.3-70b-versatile",
)
return chat_completion.choices[0].message.content
# Streamlit app UI
st.title("Medical Test Report Analyzer")
st.write("""
Upload a medical test report in JPG format to get an analysis. The application will extract the text from the image
and provide a detailed explanation of the test results, including abnormal findings and recommended actions.
""")
# File uploader
uploaded_file = st.file_uploader("Upload a JPG file", type=["jpg", "jpeg"])
if uploaded_file is not None:
# Display the uploaded image
st.image(uploaded_file, caption="Uploaded Report", use_column_width=True)
# Extract text from the image
image = Image.open(uploaded_file)
extracted_text = extract_text_from_image(image)
if extracted_text:
st.subheader("Extracted Text:")
st.text(extracted_text)
# Send the extracted text to the LLM for analysis
analysis = analyze_report(extracted_text)
st.subheader("Test Report Analysis:")
st.write(analysis)
# Chatbot interface for user queries
st.subheader("Ask Questions About the Test Report:")
if "messages" not in st.session_state:
st.session_state.messages = []
user_input = st.text_input("Your question:")
if user_input:
# Add user's message to session state
st.session_state.messages.append({"role": "user", "content": user_input})
# Get response from the model
chat_response = client.chat.completions.create(
messages=st.session_state.messages,
model="llama-3.3-70b-versatile",
)
response_text = chat_response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": response_text})
# Display the conversation
for message in st.session_state.messages:
role = message["role"]
content = message["content"]
if role == "user":
st.write(f"**You:** {content}")
else:
st.write(f"**Assistant:** {content}")
|