import execution.euron_streamlit as st from src.helper import download_hugging_face_embeddings from langchain_pinecone import PineconeVectorStore from langchain.chains import create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain from langchain_core.prompts import ChatPromptTemplate from src.prompt import * from src.euron_chat import EuronChatModel from dotenv import load_dotenv import os # Load environment variables load_dotenv() pinecone_api_key = os.getenv("PINECONE_API_KEY") euron_api_key = os.getenv("EURON_API_KEY") if not pinecone_api_key or not euron_api_key: st.error("Missing keys in environment") st.stop() # ✅ Minimal CSS – Only apply Fira Code font globally + Sidebar styling st.markdown(""" """, unsafe_allow_html=True) # Sidebar Menu with st.sidebar: # st.title("🌟 Navigation") st.image("static/icon.png", width=120) # Image in sidebar st.markdown("---") menu = st.radio("Go to", ["About Me", "Chatbot"], label_visibility="hidden") st.markdown("---") st.caption("© 2025 Shanin Hossain") # st.markdown(""" #
# # # # # # # # # #
# """, unsafe_allow_html=True) # Initialize session state if "rag_chain" not in st.session_state: st.session_state.rag_chain = None st.session_state.embeddings = None st.session_state.retriever = None if "messages" not in st.session_state: st.session_state.messages = [] # Chat history def initialize_rag(): try: if st.session_state.rag_chain is None: st.session_state.embeddings = download_hugging_face_embeddings() index_name = "portfolio" docsearch = PineconeVectorStore.from_existing_index( index_name=index_name, embedding=st.session_state.embeddings ) st.session_state.retriever = docsearch.as_retriever( search_type="similarity", search_kwargs={"k": 3} ) chatModel = EuronChatModel() prompt = ChatPromptTemplate.from_messages( [ ("system", system_prompt), ("human", "{input}"), ] ) question_answer_chain = create_stuff_documents_chain(chatModel, prompt) st.session_state.rag_chain = create_retrieval_chain( st.session_state.retriever, question_answer_chain ) except Exception as e: st.error(f"Error initializing RAG: {str(e)}") st.stop() # About Me Section # About Me Section if menu == "About Me": st.write("Hey there 👋") st.title("👨 I'm Shanin Hossain") st.markdown(""" I'm an AI Engineer & Research Assistant passionate about: - Machine Learning, Deep Learning, and Generative AI - Computer Vision & Natural Language Processing - Healthcare Informatics and Medical Imaging 📌 I have worked on multiple AI projects, including OCR, Retrieval-Augmented Generation (RAG), and hybrid graph networks. """) st.success("👉 Navigate to **Chatbot** in the sidebar to chat with me!") st.header("My Projects", divider="gray") # --- Project Data --- projects = [ { "name": "Brain Glioma Grading System", "description": "Developed a hybrid graph neural network to grade glioma tumors from medical imaging data.", "tech_stack": ["PyTorch Geometric", "Graph Neural Networks", "Medical Imaging", "Python"] }, { "name": "OCR Automation", "description": "Built an OCR pipeline for document image understanding and text extraction.", "tech_stack": ["YOLOv8", "OpenCV", "Tesseract OCR", "FastAPI"] }, { "name": "Portfolio Chatbot", "description": "Created a RAG-powered chatbot integrated with Pinecone & custom embeddings for Q&A over portfolio data.", "tech_stack": ["Streamlit", "LangChain", "Pinecone", "Hugging Face"] }, ] # --- Render Projects with Badges --- for project in projects: with st.container(): st.subheader(project["name"]) st.write(project["description"]) # Create badges for each tech in stack badges_html = " ".join([ f"{tech}" for tech in project["tech_stack"] ]) st.markdown( f"
{badges_html}
", unsafe_allow_html=True ) st.markdown("---") # --- Badge Styling --- st.markdown(""" """, unsafe_allow_html=True) st.header("Publications", divider="gray") # --- Publication Data --- publications = [ { "title": "Automated Detection of Age-Related Macular Degeneration (AMD) Using Deep Learning", "venue": "Journal of Medical Imaging & Health Informatics, 2023", "description": "Published a deep learning-based pipeline to detect age-related macular degeneration from retinal images.", "link": "https://doi.org/xxxxxx" }, { "title": "Using Hyperdimensional Computing to Extract Features for the Detection of Type 2 Diabetes", "venue": "Conference on Health Informatics, 2024 (Under Review)", "description": "Explored hyperdimensional computing techniques to improve detection of Type 2 Diabetes from clinical data.", "link": "" } ] # --- Render Publications --- for pub in publications: with st.container(): st.subheader(pub["title"]) st.caption(pub["venue"]) st.write(pub["description"]) if pub["link"]: st.markdown(f"[🔗 View Publication]({pub['link']})") st.markdown("---") # Chatbot Section elif menu == "Chatbot": st.title("🤖 Shanin Chatbot") st.write("Ask me anything about my portfolio!") # Display previous messages for msg in st.session_state.messages: with st.chat_message(msg["role"]): st.write(msg["content"]) # Input box if prompt := st.chat_input("Type your question..."): # Add user message st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.write(prompt) # Initialize RAG if needed if st.session_state.rag_chain is None: with st.spinner("Initializing RAG pipeline..."): initialize_rag() try: with st.spinner("Generating response..."): response = st.session_state.rag_chain.invoke({"input": prompt}) answer = response["answer"] # Add assistant response st.session_state.messages.append({"role": "assistant", "content": answer}) with st.chat_message("assistant"): st.write(answer) except Exception as e: st.error(f"Error processing request: {str(e)}")