import execution.euron_streamlit as st from src.helper import download_hugging_face_embeddings from langchain_pinecone import PineconeVectorStore from langchain.chains import create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain from langchain_core.prompts import ChatPromptTemplate from src.prompt import * from src.euron_chat import EuronChatModel from dotenv import load_dotenv import os # Load environment variables load_dotenv() pinecone_api_key = os.getenv("PINECONE_API_KEY") euron_api_key = os.getenv("EURON_API_KEY") if not pinecone_api_key or not euron_api_key: st.error("Missing keys in environment") st.stop() # ✅ Minimal CSS – Only apply Fira Code font globally + Sidebar styling st.markdown(""" """, unsafe_allow_html=True) # Sidebar Menu with st.sidebar: # st.title("🌟 Navigation") st.image("static/icon.png", width=120) # Image in sidebar st.markdown("---") menu = st.radio("Go to", ["About Me", "Chatbot"], label_visibility="hidden") st.markdown("---") st.caption("© 2025 Shanin Hossain") # st.markdown(""" #
# """, unsafe_allow_html=True) # Initialize session state if "rag_chain" not in st.session_state: st.session_state.rag_chain = None st.session_state.embeddings = None st.session_state.retriever = None if "messages" not in st.session_state: st.session_state.messages = [] # Chat history def initialize_rag(): try: if st.session_state.rag_chain is None: st.session_state.embeddings = download_hugging_face_embeddings() index_name = "portfolio" docsearch = PineconeVectorStore.from_existing_index( index_name=index_name, embedding=st.session_state.embeddings ) st.session_state.retriever = docsearch.as_retriever( search_type="similarity", search_kwargs={"k": 3} ) chatModel = EuronChatModel() prompt = ChatPromptTemplate.from_messages( [ ("system", system_prompt), ("human", "{input}"), ] ) question_answer_chain = create_stuff_documents_chain(chatModel, prompt) st.session_state.rag_chain = create_retrieval_chain( st.session_state.retriever, question_answer_chain ) except Exception as e: st.error(f"Error initializing RAG: {str(e)}") st.stop() # About Me Section # About Me Section if menu == "About Me": st.write("Hey there 👋") st.title("👨 I'm Shanin Hossain") st.markdown(""" I'm an AI Engineer & Research Assistant passionate about: - Machine Learning, Deep Learning, and Generative AI - Computer Vision & Natural Language Processing - Healthcare Informatics and Medical Imaging 📌 I have worked on multiple AI projects, including OCR, Retrieval-Augmented Generation (RAG), and hybrid graph networks. """) st.success("👉 Navigate to **Chatbot** in the sidebar to chat with me!") st.header("My Projects", divider="gray") # --- Project Data --- projects = [ { "name": "Brain Glioma Grading System", "description": "Developed a hybrid graph neural network to grade glioma tumors from medical imaging data.", "tech_stack": ["PyTorch Geometric", "Graph Neural Networks", "Medical Imaging", "Python"] }, { "name": "OCR Automation", "description": "Built an OCR pipeline for document image understanding and text extraction.", "tech_stack": ["YOLOv8", "OpenCV", "Tesseract OCR", "FastAPI"] }, { "name": "Portfolio Chatbot", "description": "Created a RAG-powered chatbot integrated with Pinecone & custom embeddings for Q&A over portfolio data.", "tech_stack": ["Streamlit", "LangChain", "Pinecone", "Hugging Face"] }, ] # --- Render Projects with Badges --- for project in projects: with st.container(): st.subheader(project["name"]) st.write(project["description"]) # Create badges for each tech in stack badges_html = " ".join([ f"{tech}" for tech in project["tech_stack"] ]) st.markdown( f"