import streamlit as st from groq import Groq import base64 from PIL import Image import io import os from dotenv import load_dotenv from io import BytesIO import requests import streamlit.components.v1 as components st.set_page_config(page_title="My App", page_icon="", layout="wide", initial_sidebar_state="collapsed") import streamlit as st # Initialize session state for dark mode (default: True) if "dark_mode" not in st.session_state: st.session_state.dark_mode = True # Icon switch icon = "☀️" if st.session_state.dark_mode else "🌙" # Toggle button (tap to switch modes) if st.button(icon, key="dark_mode_toggle"): st.session_state.dark_mode = not st.session_state.dark_mode st.rerun() # Apply styles for dark & light modes if st.session_state.dark_mode: st.markdown( """ """, unsafe_allow_html=True, ) else: st.markdown( """ """, unsafe_allow_html=True, ) # Hides Streamlit UI elements with CSS hide_streamlit_style = """ """ st.markdown(hide_streamlit_style, unsafe_allow_html=True) # Load environment variables from .env file load_dotenv() # Retrieve Groq API key from environment variables GROQ_API_KEY = os.getenv("GROQ_API_KEY") # Check if API key is loaded correctly if not GROQ_API_KEY: st.error("Groq API key not found. Please set it in the .env file.") st.stop() # Initialize Groq client client = Groq(api_key=GROQ_API_KEY) def encode_image(image): """Convert a PIL Image object to base64-encoded JPEG format""" # Convert RGBA to RGB if needed if image.mode == "RGBA": image = image.convert("RGB") # Save image as JPEG in memory buffer = BytesIO() image.save(buffer, format="JPEG") base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8") return base64_image # Function to classify MRI image using Groq API def classify_mri_image(image): base64_image = encode_image(image) # Prompt for the Groq API prompt = "Analyze this MRI image and determine if it shows a brain tumor. Provide a clear classification (e.g., 'Tumor detected' or 'No tumor detected') and a brief explanation." # Call Groq API with Llama 3.2-90B Vision Preview response = client.chat.completions.create( model="llama-3.2-90b-vision-preview", messages=[ { "role": "user", "content": [ {"type": "text", "text": prompt}, {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}} ] } ], max_tokens=300 ) # Extract the response result = response.choices[0].message.content return result # Streamlit app def main(): st.title("MRI Brain Tumor Classifier") st.write("Upload an MRI image to classify whether it contains a brain tumor.") uploaded_file = st.file_uploader("Choose an MRI image...", type=["jpg", "jpeg", "png"]) if uploaded_file is not None: image = Image.open(uploaded_file) st.image(image, caption="Uploaded MRI Image", use_container_width=True) if st.button("Classify Image"): with st.spinner("Classifying..."): try: result = classify_mri_image(image) st.success("Classification Complete!") st.write("### Result:") st.write(result) except Exception as e: st.error(f"An error occurred: {str(e)}") if __name__ == "__main__": main() st.markdown("---") # Function to check if the question is related to brain tumors def is_brain_tumor_question(user_input): keywords = [ "brain tumor", "glioma", "meningioma", "astrocytoma", "medulloblastoma", "ependymoma", "oligodendroglioma", "pituitary tumor", "schwannoma", "craniopharyngioma", "cancer", "brain cancer", "malignant tumor", "benign tumor", "neurology", "oncology", "neurosurgeon", "brain MRI", "CT scan brain", "tumor diagnosis", "tumor treatment", "chemotherapy", "radiotherapy", "stereotactic radiosurgery", "brain surgery", "craniotomy", "tumor removal", "brain biopsy", "symptoms of brain tumor", "headache and tumor", "seizures and tumor", "brain tumor prognosis", "life expectancy brain tumor", "brain metastases", "tumor recurrence", "brain swelling", "intracranial pressure", "glioblastoma multiforme", "brain tumor in children", "brain tumor in adults", "radiation therapy for brain tumors", "brain", "brain-tumor" ] return any(keyword in user_input.lower() for keyword in keywords) # Function to interact with Groq chatbot def get_chatbot_response(user_input): if not is_brain_tumor_question(user_input): return "I can only answer brain tumor-related questions." url = "https://api.groq.com/openai/v1/chat/completions" headers = { "Authorization": f"Bearer {GROQ_API_KEY}", "Content-Type": "application/json" } payload = { "model": "llama3-8b-8192", "messages": [ {"role": "system", "content": "You are a helpful AI that answers only brain tumor-related questions. Keep responses a little bit brief if question doesn't demand explanations."}, {"role": "user", "content": user_input} ] } try: response = requests.post(url, headers=headers, json=payload) response_data = response.json() if response.status_code == 200: return response_data.get("choices", [{}])[0].get("message", {}).get("content", "No response generated.") else: return f"Error {response.status_code}: {response_data.get('error', {}).get('message', 'Unknown error')}" except requests.exceptions.RequestException as e: return f"Request failed: {e}" # Custom CSS for styling the input and send icon import streamlit as st st.markdown(""" """, unsafe_allow_html=True) st.write("Ask me any brain tumor-related questions") user_input = st.text_input("", placeholder="Enter your question...", key="input_box", label_visibility="collapsed") st.markdown("""
""", unsafe_allow_html=True) if user_input: st.write("") # Adds a space before generating the response with st.spinner("Thinking..."): response = get_chatbot_response(user_input) st.write(response)