Commit ·
b4f404b
1
Parent(s): 8c40d02
Add all files including PDFs with Git LFS
Browse files- .env +2 -0
- .gitattributes +1 -0
- data/Medical_book.pdf +3 -0
- data/amdnet23.pdf +3 -0
- execution/euron_html.py +70 -0
- execution/euron_streamlit.py +294 -0
- execution/gemma_Ollama.py +82 -0
- execution/web.py +72 -0
- lab.ipynb +0 -0
- main.py +70 -0
- requirements.txt +13 -0
- setup.py +10 -0
- src/__pycache__/euron_chat.cpython-312.pyc +0 -0
- src/__pycache__/helper.cpython-312.pyc +0 -0
- src/__pycache__/prompt.cpython-312.pyc +0 -0
- src/euron_chat.py +152 -0
- src/helper.py +41 -0
- src/prompt.py +9 -0
- src/store_index.py +35 -0
- static/icon.png +0 -0
- static/style.css +113 -0
- templates/chat.html +72 -0
- templates/index.html +87 -0
.env
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
PINECONE_API_KEY = "pcsk_3wGqdG_Srx49fCB4fS64N23j8a32jVeVYhW99ABaHWubzrhMWAbntQizXRXrDZpNFerMaP"
|
| 2 |
+
EURON_API_KEY = "euri-bfbd93d283a5b6c3e5f62b04d5ae9242f98c07aef2f2b11971a9ed4a8ff603d3"
|
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
data/Medical_book.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:753cd53b7a3020bbd91f05629b0e3ddcfb6a114d7bbedb22c2298b66f5dd00cc
|
| 3 |
+
size 16127037
|
data/amdnet23.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3d039d4b2fa76b225171a4d9bc86447e1e56fe9a22f4fed9ecc0a16a3e9224b
|
| 3 |
+
size 1507090
|
execution/euron_html.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, render_template, request
|
| 2 |
+
from src.helper import download_hugging_face_embeddings
|
| 3 |
+
from langchain_pinecone import PineconeVectorStore
|
| 4 |
+
from langchain.chains import create_retrieval_chain
|
| 5 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
| 6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 7 |
+
from src.prompt import *
|
| 8 |
+
from src.euron_chat import EuronChatModel
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
# Load environment variables from .env file (for local dev)
|
| 13 |
+
load_dotenv()
|
| 14 |
+
|
| 15 |
+
# Verify API keys (optional: remove in full production if confident)
|
| 16 |
+
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
|
| 17 |
+
euron_api_key = os.environ.get("EURON_API_KEY")
|
| 18 |
+
if not pinecone_api_key or not euron_api_key:
|
| 19 |
+
raise ValueError("Missing PINECONE_API_KEY or EURON_API_KEY in environment variables.")
|
| 20 |
+
|
| 21 |
+
app = Flask(__name__)
|
| 22 |
+
|
| 23 |
+
# -----------------------------
|
| 24 |
+
# Load embeddings and Pinecone index
|
| 25 |
+
# -----------------------------
|
| 26 |
+
embeddings = download_hugging_face_embeddings()
|
| 27 |
+
index_name = "portfolio"
|
| 28 |
+
|
| 29 |
+
docsearch = PineconeVectorStore.from_existing_index(
|
| 30 |
+
index_name=index_name,
|
| 31 |
+
embedding=embeddings
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
retriever = docsearch.as_retriever(search_type="similarity", search_kwargs={"k": 3})
|
| 35 |
+
|
| 36 |
+
# -----------------------------
|
| 37 |
+
# Initialize EuronChatModel & RAG chain
|
| 38 |
+
# -----------------------------
|
| 39 |
+
chatModel = EuronChatModel()
|
| 40 |
+
|
| 41 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 42 |
+
[
|
| 43 |
+
("system", system_prompt),
|
| 44 |
+
("human", "{input}"),
|
| 45 |
+
]
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
question_answer_chain = create_stuff_documents_chain(chatModel, prompt)
|
| 49 |
+
rag_chain = create_retrieval_chain(retriever, question_answer_chain)
|
| 50 |
+
|
| 51 |
+
# -----------------------------
|
| 52 |
+
# Flask routes
|
| 53 |
+
# -----------------------------
|
| 54 |
+
@app.route("/")
|
| 55 |
+
def index():
|
| 56 |
+
return render_template('index.html')
|
| 57 |
+
|
| 58 |
+
@app.route("/get", methods=["GET", "POST"])
|
| 59 |
+
def chat():
|
| 60 |
+
msg = request.form["msg"]
|
| 61 |
+
print("User Input:", msg)
|
| 62 |
+
|
| 63 |
+
response = rag_chain.invoke({"input": msg})
|
| 64 |
+
print("Response:", response["answer"])
|
| 65 |
+
|
| 66 |
+
return str(response["answer"])
|
| 67 |
+
|
| 68 |
+
if __name__ == '__main__':
|
| 69 |
+
port = int(os.environ.get('PORT', 2025)) # Use Render's PORT or default to 8080 locally
|
| 70 |
+
app.run(host="0.0.0.0", port=port, debug=False) # Disable debug for production
|
execution/euron_streamlit.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import execution.euron_streamlit as st
|
| 2 |
+
from src.helper import download_hugging_face_embeddings
|
| 3 |
+
from langchain_pinecone import PineconeVectorStore
|
| 4 |
+
from langchain.chains import create_retrieval_chain
|
| 5 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
| 6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 7 |
+
from src.prompt import *
|
| 8 |
+
from src.euron_chat import EuronChatModel
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
# Load environment variables
|
| 13 |
+
load_dotenv()
|
| 14 |
+
pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
| 15 |
+
euron_api_key = os.getenv("EURON_API_KEY")
|
| 16 |
+
|
| 17 |
+
if not pinecone_api_key or not euron_api_key:
|
| 18 |
+
st.error("Missing keys in environment")
|
| 19 |
+
st.stop()
|
| 20 |
+
|
| 21 |
+
# ✅ Minimal CSS – Only apply Fira Code font globally + Sidebar styling
|
| 22 |
+
st.markdown("""
|
| 23 |
+
<style>
|
| 24 |
+
@import url('https://fonts.googleapis.com/css2?family=Fira+Code:wght@300;400;500;600;700&display=swap');
|
| 25 |
+
|
| 26 |
+
/* Global font */
|
| 27 |
+
* {
|
| 28 |
+
font-family: 'Fira Code', monospace !important;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
/* Sidebar background & padding */
|
| 32 |
+
[data-testid="stSidebar"] {
|
| 33 |
+
background-color: #f0f2f6 !important; /* Light ash */
|
| 34 |
+
border-radius: 0 20px 20px 0;
|
| 35 |
+
padding: 25px 15px;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
/* Sidebar title */
|
| 39 |
+
[data-testid="stSidebar"] .css-1d391kg {
|
| 40 |
+
color: #1f2937 !important;
|
| 41 |
+
font-size: 2rem;
|
| 42 |
+
font-weight: 700;
|
| 43 |
+
text-align: center;
|
| 44 |
+
margin-bottom: 25px;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
/* Sidebar radio buttons as styled buttons */
|
| 48 |
+
[data-testid="stSidebar"] .stRadio > div {
|
| 49 |
+
display: flex;
|
| 50 |
+
flex-direction: column;
|
| 51 |
+
gap: 12px;
|
| 52 |
+
}
|
| 53 |
+
[data-testid="stSidebar"] .stRadio label {
|
| 54 |
+
background-color: rgba(31,41,55,0.1) !important; /* subtle gray */
|
| 55 |
+
color: #1f2937 !important;
|
| 56 |
+
padding: 12px 18px;
|
| 57 |
+
border-radius: 12px;
|
| 58 |
+
transition: all 0.3s ease;
|
| 59 |
+
cursor: pointer;
|
| 60 |
+
font-size: 1.1rem;
|
| 61 |
+
font-weight: 500;
|
| 62 |
+
}
|
| 63 |
+
[data-testid="stSidebar"] .stRadio label:hover {
|
| 64 |
+
background-color: rgba(31,41,55,0.2) !important;
|
| 65 |
+
transform: translateX(5px);
|
| 66 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.2);
|
| 67 |
+
}
|
| 68 |
+
[data-testid="stSidebar"] .stRadio input:checked + label {
|
| 69 |
+
background-color: #4B5563 !important; /* Darker for selected */
|
| 70 |
+
color: #ffffff !important;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
/* Sidebar image */
|
| 74 |
+
[data-testid="stSidebar"] img {
|
| 75 |
+
border-radius: 15px;
|
| 76 |
+
margin-bottom: 20px;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
/* Sidebar divider */
|
| 80 |
+
[data-testid="stSidebar"] hr {
|
| 81 |
+
border: 1px solid rgba(0,0,0,0.2) !important;
|
| 82 |
+
margin: 20px 0;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
/* Sidebar caption */
|
| 86 |
+
[data-testid="stSidebar"] .css-1x8i1po {
|
| 87 |
+
text-align: center;
|
| 88 |
+
font-size: 0.9rem;
|
| 89 |
+
color: #4b5563 !important;
|
| 90 |
+
margin-top: 15px;
|
| 91 |
+
}
|
| 92 |
+
</style>
|
| 93 |
+
""", unsafe_allow_html=True)
|
| 94 |
+
|
| 95 |
+
# Sidebar Menu
|
| 96 |
+
with st.sidebar:
|
| 97 |
+
# st.title("🌟 Navigation")
|
| 98 |
+
st.image("static/icon.png", width=120) # Image in sidebar
|
| 99 |
+
st.markdown("---")
|
| 100 |
+
menu = st.radio("Go to", ["About Me", "Chatbot"], label_visibility="hidden")
|
| 101 |
+
st.markdown("---")
|
| 102 |
+
st.caption("© 2025 Shanin Hossain")
|
| 103 |
+
# st.markdown("""
|
| 104 |
+
# <div style="text-align:center; margin-top: 15px;">
|
| 105 |
+
# <a href="https://www.linkedin.com/in/shanin-hossain" target="_blank">
|
| 106 |
+
# <img src="https://cdn.jsdelivr.net/gh/simple-icons/simple-icons/icons/linkedin.svg" width="25" style="margin:0;">
|
| 107 |
+
# </a>
|
| 108 |
+
# <a href="https://github.com/shaninhossain" target="_blank">
|
| 109 |
+
# <img src="https://cdn.jsdelivr.net/gh/simple-icons/simple-icons/icons/github.svg" width="25" style="margin:0;">
|
| 110 |
+
# </a>
|
| 111 |
+
# <a href="https://www.facebook.com/shaninhossain" target="_blank">
|
| 112 |
+
# <img src="https://cdn.jsdelivr.net/gh/simple-icons/simple-icons/icons/facebook.svg" width="25" style="margin:0;">
|
| 113 |
+
# </a>
|
| 114 |
+
# </div>
|
| 115 |
+
# """, unsafe_allow_html=True)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
# Initialize session state
|
| 120 |
+
if "rag_chain" not in st.session_state:
|
| 121 |
+
st.session_state.rag_chain = None
|
| 122 |
+
st.session_state.embeddings = None
|
| 123 |
+
st.session_state.retriever = None
|
| 124 |
+
if "messages" not in st.session_state:
|
| 125 |
+
st.session_state.messages = [] # Chat history
|
| 126 |
+
|
| 127 |
+
def initialize_rag():
|
| 128 |
+
try:
|
| 129 |
+
if st.session_state.rag_chain is None:
|
| 130 |
+
st.session_state.embeddings = download_hugging_face_embeddings()
|
| 131 |
+
index_name = "portfolio"
|
| 132 |
+
docsearch = PineconeVectorStore.from_existing_index(
|
| 133 |
+
index_name=index_name,
|
| 134 |
+
embedding=st.session_state.embeddings
|
| 135 |
+
)
|
| 136 |
+
st.session_state.retriever = docsearch.as_retriever(
|
| 137 |
+
search_type="similarity", search_kwargs={"k": 3}
|
| 138 |
+
)
|
| 139 |
+
chatModel = EuronChatModel()
|
| 140 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 141 |
+
[
|
| 142 |
+
("system", system_prompt),
|
| 143 |
+
("human", "{input}"),
|
| 144 |
+
]
|
| 145 |
+
)
|
| 146 |
+
question_answer_chain = create_stuff_documents_chain(chatModel, prompt)
|
| 147 |
+
st.session_state.rag_chain = create_retrieval_chain(
|
| 148 |
+
st.session_state.retriever, question_answer_chain
|
| 149 |
+
)
|
| 150 |
+
except Exception as e:
|
| 151 |
+
st.error(f"Error initializing RAG: {str(e)}")
|
| 152 |
+
st.stop()
|
| 153 |
+
|
| 154 |
+
# About Me Section
|
| 155 |
+
# About Me Section
|
| 156 |
+
if menu == "About Me":
|
| 157 |
+
st.write("Hey there 👋")
|
| 158 |
+
st.title("👨 I'm Shanin Hossain")
|
| 159 |
+
st.markdown("""
|
| 160 |
+
I'm an AI Engineer & Research Assistant passionate about:
|
| 161 |
+
- Machine Learning, Deep Learning, and Generative AI
|
| 162 |
+
- Computer Vision & Natural Language Processing
|
| 163 |
+
- Healthcare Informatics and Medical Imaging
|
| 164 |
+
|
| 165 |
+
📌 I have worked on multiple AI projects, including OCR, Retrieval-Augmented Generation (RAG), and hybrid graph networks.
|
| 166 |
+
""")
|
| 167 |
+
st.success("👉 Navigate to **Chatbot** in the sidebar to chat with me!")
|
| 168 |
+
st.header("My Projects", divider="gray")
|
| 169 |
+
|
| 170 |
+
# --- Project Data ---
|
| 171 |
+
projects = [
|
| 172 |
+
{
|
| 173 |
+
"name": "Brain Glioma Grading System",
|
| 174 |
+
"description": "Developed a hybrid graph neural network to grade glioma tumors from medical imaging data.",
|
| 175 |
+
"tech_stack": ["PyTorch Geometric", "Graph Neural Networks", "Medical Imaging", "Python"]
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"name": "OCR Automation",
|
| 179 |
+
"description": "Built an OCR pipeline for document image understanding and text extraction.",
|
| 180 |
+
"tech_stack": ["YOLOv8", "OpenCV", "Tesseract OCR", "FastAPI"]
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"name": "Portfolio Chatbot",
|
| 184 |
+
"description": "Created a RAG-powered chatbot integrated with Pinecone & custom embeddings for Q&A over portfolio data.",
|
| 185 |
+
"tech_stack": ["Streamlit", "LangChain", "Pinecone", "Hugging Face"]
|
| 186 |
+
},
|
| 187 |
+
]
|
| 188 |
+
|
| 189 |
+
# --- Render Projects with Badges ---
|
| 190 |
+
for project in projects:
|
| 191 |
+
with st.container():
|
| 192 |
+
st.subheader(project["name"])
|
| 193 |
+
st.write(project["description"])
|
| 194 |
+
|
| 195 |
+
# Create badges for each tech in stack
|
| 196 |
+
badges_html = " ".join([
|
| 197 |
+
f"<span class='badge'>{tech}</span>" for tech in project["tech_stack"]
|
| 198 |
+
])
|
| 199 |
+
|
| 200 |
+
st.markdown(
|
| 201 |
+
f"<div style=''>{badges_html}</div>",
|
| 202 |
+
unsafe_allow_html=True
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
st.markdown("---")
|
| 206 |
+
|
| 207 |
+
# --- Badge Styling ---
|
| 208 |
+
st.markdown("""
|
| 209 |
+
<style>
|
| 210 |
+
.badge {
|
| 211 |
+
display: inline-block;
|
| 212 |
+
padding: 6px 12px;
|
| 213 |
+
font-size: 0.70rem;
|
| 214 |
+
font-weight: 600;
|
| 215 |
+
color: white;
|
| 216 |
+
border-radius: 12px;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
/* Randomized color palette */
|
| 220 |
+
.badge:nth-child(5n+1) { background-color: #2563EB; } /* Blue */
|
| 221 |
+
.badge:nth-child(5n+2) { background-color: #059669; } /* Green */
|
| 222 |
+
.badge:nth-child(5n+3) { background-color: #D97706; } /* Orange */
|
| 223 |
+
.badge:nth-child(5n+4) { background-color: #9333EA; } /* Purple */
|
| 224 |
+
.badge:nth-child(5n+5) { background-color: #DC2626; } /* Red */
|
| 225 |
+
</style>
|
| 226 |
+
""", unsafe_allow_html=True)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
st.header("Publications", divider="gray")
|
| 230 |
+
|
| 231 |
+
# --- Publication Data ---
|
| 232 |
+
publications = [
|
| 233 |
+
{
|
| 234 |
+
"title": "Automated Detection of Age-Related Macular Degeneration (AMD) Using Deep Learning",
|
| 235 |
+
"venue": "Journal of Medical Imaging & Health Informatics, 2023",
|
| 236 |
+
"description": "Published a deep learning-based pipeline to detect age-related macular degeneration from retinal images.",
|
| 237 |
+
"link": "https://doi.org/xxxxxx"
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"title": "Using Hyperdimensional Computing to Extract Features for the Detection of Type 2 Diabetes",
|
| 241 |
+
"venue": "Conference on Health Informatics, 2024 (Under Review)",
|
| 242 |
+
"description": "Explored hyperdimensional computing techniques to improve detection of Type 2 Diabetes from clinical data.",
|
| 243 |
+
"link": ""
|
| 244 |
+
}
|
| 245 |
+
]
|
| 246 |
+
|
| 247 |
+
# --- Render Publications ---
|
| 248 |
+
for pub in publications:
|
| 249 |
+
with st.container():
|
| 250 |
+
st.subheader(pub["title"])
|
| 251 |
+
st.caption(pub["venue"])
|
| 252 |
+
st.write(pub["description"])
|
| 253 |
+
|
| 254 |
+
if pub["link"]:
|
| 255 |
+
st.markdown(f"[🔗 View Publication]({pub['link']})")
|
| 256 |
+
|
| 257 |
+
st.markdown("---")
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# Chatbot Section
|
| 262 |
+
elif menu == "Chatbot":
|
| 263 |
+
st.title("🤖 Shanin Chatbot")
|
| 264 |
+
st.write("Ask me anything about my portfolio!")
|
| 265 |
+
|
| 266 |
+
# Display previous messages
|
| 267 |
+
for msg in st.session_state.messages:
|
| 268 |
+
with st.chat_message(msg["role"]):
|
| 269 |
+
st.write(msg["content"])
|
| 270 |
+
|
| 271 |
+
# Input box
|
| 272 |
+
if prompt := st.chat_input("Type your question..."):
|
| 273 |
+
# Add user message
|
| 274 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 275 |
+
with st.chat_message("user"):
|
| 276 |
+
st.write(prompt)
|
| 277 |
+
|
| 278 |
+
# Initialize RAG if needed
|
| 279 |
+
if st.session_state.rag_chain is None:
|
| 280 |
+
with st.spinner("Initializing RAG pipeline..."):
|
| 281 |
+
initialize_rag()
|
| 282 |
+
|
| 283 |
+
try:
|
| 284 |
+
with st.spinner("Generating response..."):
|
| 285 |
+
response = st.session_state.rag_chain.invoke({"input": prompt})
|
| 286 |
+
answer = response["answer"]
|
| 287 |
+
|
| 288 |
+
# Add assistant response
|
| 289 |
+
st.session_state.messages.append({"role": "assistant", "content": answer})
|
| 290 |
+
with st.chat_message("assistant"):
|
| 291 |
+
st.write(answer)
|
| 292 |
+
|
| 293 |
+
except Exception as e:
|
| 294 |
+
st.error(f"Error processing request: {str(e)}")
|
execution/gemma_Ollama.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, render_template, request, jsonify
|
| 2 |
+
from flask_cors import CORS
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
import os
|
| 5 |
+
from langchain_pinecone import PineconeVectorStore
|
| 6 |
+
from langchain.chains import create_retrieval_chain
|
| 7 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
| 8 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 9 |
+
from langchain_community.chat_models import ChatOllama
|
| 10 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 11 |
+
|
| 12 |
+
# Download the Embeddings from HuggingFace
|
| 13 |
+
def download_hugging_face_embeddings():
|
| 14 |
+
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2') # this model returns 384 dimensions
|
| 15 |
+
return embeddings
|
| 16 |
+
|
| 17 |
+
# Define the system prompt
|
| 18 |
+
system_prompt = (
|
| 19 |
+
"You are an intelligent Personal Portfolio Assistant that answers questions about the user's background, work, and projects. "
|
| 20 |
+
"Use the retrieved context below to provide accurate and natural responses. "
|
| 21 |
+
"If the context does not contain the answer, respond with 'I'm not sure about that.' "
|
| 22 |
+
"Keep your answer concise."
|
| 23 |
+
"\n\n"
|
| 24 |
+
"Context:\n{context}"
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
load_dotenv()
|
| 29 |
+
|
| 30 |
+
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
|
| 31 |
+
if not pinecone_api_key:
|
| 32 |
+
raise ValueError("Missing PINECONE_API_KEY in environment variables.")
|
| 33 |
+
|
| 34 |
+
app = Flask(__name__)
|
| 35 |
+
CORS(app) # ✅ Allow external web app to access this Flask API
|
| 36 |
+
|
| 37 |
+
# Load embeddings
|
| 38 |
+
embeddings = download_hugging_face_embeddings()
|
| 39 |
+
index_name = "portfolio"
|
| 40 |
+
|
| 41 |
+
docsearch = PineconeVectorStore.from_existing_index(index_name=index_name, embedding=embeddings)
|
| 42 |
+
retriever = docsearch.as_retriever(search_type="similarity", search_kwargs={"k": 3})
|
| 43 |
+
|
| 44 |
+
# Model
|
| 45 |
+
chatModel = ChatOllama(model="gemma3:1b", temperature=0.1, max_tokens=512)
|
| 46 |
+
|
| 47 |
+
# Prompt
|
| 48 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 49 |
+
[
|
| 50 |
+
("system", system_prompt),
|
| 51 |
+
("human", "{input}"),
|
| 52 |
+
]
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
question_answer_chain = create_stuff_documents_chain(chatModel, prompt)
|
| 56 |
+
rag_chain = create_retrieval_chain(retriever, question_answer_chain)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@app.route("/")
|
| 60 |
+
def index():
|
| 61 |
+
return "✅ RAG server running"
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@app.route("/get", methods=["POST"])
|
| 65 |
+
def chat():
|
| 66 |
+
user_msg = request.form.get("msg") or request.json.get("msg")
|
| 67 |
+
|
| 68 |
+
if not user_msg:
|
| 69 |
+
return jsonify({"error": "No message sent"}), 400
|
| 70 |
+
|
| 71 |
+
try:
|
| 72 |
+
response = rag_chain.invoke({"input": user_msg})
|
| 73 |
+
answer = response.get("answer", "Sorry, I couldn’t find an answer.")
|
| 74 |
+
return jsonify({"reply": answer})
|
| 75 |
+
except Exception as e:
|
| 76 |
+
print("Error:", e)
|
| 77 |
+
return jsonify({"reply": f"Server Error: {str(e)}"})
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
if __name__ == '__main__':
|
| 81 |
+
port = int(os.environ.get('PORT', 2025))
|
| 82 |
+
app.run(host="0.0.0.0", port=port, debug=False)
|
execution/web.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
from flask import Flask, render_template, request
|
| 4 |
+
from src.helper import download_hugging_face_embeddings
|
| 5 |
+
from langchain_pinecone import PineconeVectorStore
|
| 6 |
+
from langchain.chains import create_retrieval_chain
|
| 7 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
| 8 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 9 |
+
from src.prompt import *
|
| 10 |
+
from src.euron_chat import EuronChatModel
|
| 11 |
+
from dotenv import load_dotenv
|
| 12 |
+
import os
|
| 13 |
+
|
| 14 |
+
# Load environment variables from .env file (for local dev)
|
| 15 |
+
load_dotenv()
|
| 16 |
+
|
| 17 |
+
# Verify API keys (optional: remove in full production if confident)
|
| 18 |
+
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
|
| 19 |
+
euron_api_key = os.environ.get("EURON_API_KEY")
|
| 20 |
+
if not pinecone_api_key or not euron_api_key:
|
| 21 |
+
raise ValueError("Missing PINECONE_API_KEY or EURON_API_KEY in environment variables.")
|
| 22 |
+
|
| 23 |
+
app = Flask(__name__)
|
| 24 |
+
|
| 25 |
+
# -----------------------------
|
| 26 |
+
# Load embeddings and Pinecone index
|
| 27 |
+
# -----------------------------
|
| 28 |
+
embeddings = download_hugging_face_embeddings()
|
| 29 |
+
index_name = "portfolio"
|
| 30 |
+
|
| 31 |
+
docsearch = PineconeVectorStore.from_existing_index(
|
| 32 |
+
index_name=index_name,
|
| 33 |
+
embedding=embeddings
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
retriever = docsearch.as_retriever(search_type="similarity", search_kwargs={"k": 3})
|
| 37 |
+
|
| 38 |
+
# -----------------------------
|
| 39 |
+
# Initialize EuronChatModel & RAG chain
|
| 40 |
+
# -----------------------------
|
| 41 |
+
chatModel = EuronChatModel()
|
| 42 |
+
|
| 43 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 44 |
+
[
|
| 45 |
+
("system", system_prompt),
|
| 46 |
+
("human", "{input}"),
|
| 47 |
+
]
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
question_answer_chain = create_stuff_documents_chain(chatModel, prompt)
|
| 51 |
+
rag_chain = create_retrieval_chain(retriever, question_answer_chain)
|
| 52 |
+
|
| 53 |
+
# -----------------------------
|
| 54 |
+
# Flask routes
|
| 55 |
+
# -----------------------------
|
| 56 |
+
@app.route("/")
|
| 57 |
+
def index():
|
| 58 |
+
return render_template('chat.html')
|
| 59 |
+
|
| 60 |
+
@app.route("/get", methods=["GET", "POST"])
|
| 61 |
+
def chat():
|
| 62 |
+
msg = request.form["msg"]
|
| 63 |
+
print("User Input:", msg)
|
| 64 |
+
|
| 65 |
+
response = rag_chain.invoke({"input": msg})
|
| 66 |
+
print("Response:", response["answer"])
|
| 67 |
+
|
| 68 |
+
return str(response["answer"])
|
| 69 |
+
|
| 70 |
+
if __name__ == '__main__':
|
| 71 |
+
port = int(os.environ.get('PORT', 8080)) # Use Render's PORT or default to 8080 locally
|
| 72 |
+
app.run(host="0.0.0.0", port=port, debug=False) # Disable debug for production
|
lab.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
main.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, render_template, request
|
| 2 |
+
from src.helper import download_hugging_face_embeddings
|
| 3 |
+
from langchain_pinecone import PineconeVectorStore
|
| 4 |
+
from langchain.chains import create_retrieval_chain
|
| 5 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
| 6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 7 |
+
from src.prompt import *
|
| 8 |
+
from src.euron_chat import EuronChatModel
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
# Load environment variables from .env file (for local dev)
|
| 13 |
+
load_dotenv()
|
| 14 |
+
|
| 15 |
+
# Verify API keys (optional: remove in full production if confident)
|
| 16 |
+
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
|
| 17 |
+
euron_api_key = os.environ.get("EURON_API_KEY")
|
| 18 |
+
if not pinecone_api_key or not euron_api_key:
|
| 19 |
+
raise ValueError("Missing PINECONE_API_KEY or EURON_API_KEY in environment variables.")
|
| 20 |
+
|
| 21 |
+
app = Flask(__name__)
|
| 22 |
+
|
| 23 |
+
# -----------------------------
|
| 24 |
+
# Load embeddings and Pinecone index
|
| 25 |
+
# -----------------------------
|
| 26 |
+
embeddings = download_hugging_face_embeddings()
|
| 27 |
+
index_name = "medicaldb"
|
| 28 |
+
|
| 29 |
+
docsearch = PineconeVectorStore.from_existing_index(
|
| 30 |
+
index_name=index_name,
|
| 31 |
+
embedding=embeddings
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
retriever = docsearch.as_retriever(search_type="similarity", search_kwargs={"k": 3})
|
| 35 |
+
|
| 36 |
+
# -----------------------------
|
| 37 |
+
# Initialize EuronChatModel & RAG chain
|
| 38 |
+
# -----------------------------
|
| 39 |
+
chatModel = EuronChatModel()
|
| 40 |
+
|
| 41 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 42 |
+
[
|
| 43 |
+
("system", system_prompt),
|
| 44 |
+
("human", "{input}"),
|
| 45 |
+
]
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
question_answer_chain = create_stuff_documents_chain(chatModel, prompt)
|
| 49 |
+
rag_chain = create_retrieval_chain(retriever, question_answer_chain)
|
| 50 |
+
|
| 51 |
+
# -----------------------------
|
| 52 |
+
# Flask routes
|
| 53 |
+
# -----------------------------
|
| 54 |
+
@app.route("/")
|
| 55 |
+
def index():
|
| 56 |
+
return render_template('index.html')
|
| 57 |
+
|
| 58 |
+
@app.route("/get", methods=["GET", "POST"])
|
| 59 |
+
def chat():
|
| 60 |
+
msg = request.form["msg"]
|
| 61 |
+
print("User Input:", msg)
|
| 62 |
+
|
| 63 |
+
response = rag_chain.invoke({"input": msg})
|
| 64 |
+
print("Response:", response["answer"])
|
| 65 |
+
|
| 66 |
+
return str(response["answer"])
|
| 67 |
+
|
| 68 |
+
if __name__ == '__main__':
|
| 69 |
+
port = int(os.environ.get('PORT', 2025)) # Use Render's PORT or default to 8080 locally
|
| 70 |
+
app.run(host="0.0.0.0", port=port, debug=False) # Disable debug for production
|
requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
langchain==0.3.26
|
| 2 |
+
sentence-transformers==4.1.0
|
| 3 |
+
pypdf==5.6.1
|
| 4 |
+
python-dotenv==1.1.0
|
| 5 |
+
langchain-pinecone==0.2.8
|
| 6 |
+
langchain-community==0.3.26
|
| 7 |
+
langchain-huggingface==0.1.0
|
| 8 |
+
requests==2.32.3
|
| 9 |
+
# streamlit==1.39.0
|
| 10 |
+
flask==3.1.1
|
| 11 |
+
# ollama==0.6.0
|
| 12 |
+
|
| 13 |
+
-e . # for this setup.py file
|
setup.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import find_packages, setup
|
| 2 |
+
|
| 3 |
+
setup(
|
| 4 |
+
name="Medical Chatbot",
|
| 5 |
+
version="0.1.0",
|
| 6 |
+
author="Aiyub Ali",
|
| 7 |
+
author_email="aiyubali15-13456@diu.edu.bd",
|
| 8 |
+
packages=find_packages(),
|
| 9 |
+
install_requires=[]
|
| 10 |
+
)
|
src/__pycache__/euron_chat.cpython-312.pyc
ADDED
|
Binary file (3.33 kB). View file
|
|
|
src/__pycache__/helper.cpython-312.pyc
ADDED
|
Binary file (1.95 kB). View file
|
|
|
src/__pycache__/prompt.cpython-312.pyc
ADDED
|
Binary file (516 Bytes). View file
|
|
|
src/euron_chat.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import requests
|
| 3 |
+
from langchain_core.language_models import BaseChatModel
|
| 4 |
+
from langchain_core.messages import AIMessage
|
| 5 |
+
from langchain_core.outputs import ChatResult, ChatGeneration
|
| 6 |
+
from typing import List, Optional
|
| 7 |
+
|
| 8 |
+
def generate_completion(messages, model="gpt-4.1-nano", max_tokens=1000, temperature=0.7):
|
| 9 |
+
"""
|
| 10 |
+
Generate a response from Euron API
|
| 11 |
+
"""
|
| 12 |
+
euron_api_key = os.getenv("EURON_API_KEY", "").strip()
|
| 13 |
+
if not euron_api_key:
|
| 14 |
+
raise ValueError("EURON_API_KEY is missing. Please set it in your environment.")
|
| 15 |
+
|
| 16 |
+
url = "https://api.euron.one/api/v1/euri/chat/completions"
|
| 17 |
+
headers = {
|
| 18 |
+
"Content-Type": "application/json",
|
| 19 |
+
"Authorization": f"Bearer {euron_api_key}"
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# Convert LangChain messages to API format
|
| 24 |
+
api_messages = []
|
| 25 |
+
for message in messages:
|
| 26 |
+
if hasattr(message, 'type'):
|
| 27 |
+
role = message.type
|
| 28 |
+
if role == "human":
|
| 29 |
+
role = "user"
|
| 30 |
+
elif role == "ai":
|
| 31 |
+
role = "assistant"
|
| 32 |
+
api_messages.append({"role": role, "content": message.content})
|
| 33 |
+
else:
|
| 34 |
+
api_messages.append(message)
|
| 35 |
+
|
| 36 |
+
payload = {
|
| 37 |
+
"messages": api_messages,
|
| 38 |
+
"model": model,
|
| 39 |
+
"max_tokens": max_tokens,
|
| 40 |
+
"temperature": temperature
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
response = requests.post(url, headers=headers, json=payload)
|
| 44 |
+
if response.status_code != 200:
|
| 45 |
+
raise Exception(f"Euron API Error: {response.status_code} {response.text}")
|
| 46 |
+
|
| 47 |
+
return response.json()
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class EuronChatModel(BaseChatModel):
|
| 51 |
+
"""
|
| 52 |
+
LangChain compatible chat model using Euron API
|
| 53 |
+
"""
|
| 54 |
+
model_name: str = "gpt-4.1-nano"
|
| 55 |
+
|
| 56 |
+
def _generate(self, messages: List, stop: Optional[List[str]] = None) -> ChatResult:
|
| 57 |
+
response = generate_completion(messages, model=self.model_name)
|
| 58 |
+
|
| 59 |
+
# Extract AI message
|
| 60 |
+
ai_content = response['choices'][0]['message']['content']
|
| 61 |
+
|
| 62 |
+
# Wrap in LangChain objects
|
| 63 |
+
ai_message = AIMessage(content=ai_content)
|
| 64 |
+
generation = ChatGeneration(message=ai_message)
|
| 65 |
+
|
| 66 |
+
return ChatResult(generations=[generation])
|
| 67 |
+
|
| 68 |
+
def _llm_type(self) -> str:
|
| 69 |
+
return "euron-chat"
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# Optional: simple chat function
|
| 73 |
+
def simple_chat_completion(user_message: str):
|
| 74 |
+
messages = [{"role": "user", "content": user_message}]
|
| 75 |
+
response = generate_completion(messages)
|
| 76 |
+
return response['choices'][0]['message']['content']
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# import os
|
| 80 |
+
# import requests
|
| 81 |
+
# from langchain_core.language_models import BaseChatModel
|
| 82 |
+
# from langchain_core.messages import AIMessage
|
| 83 |
+
# from langchain_core.outputs import ChatResult, ChatGeneration
|
| 84 |
+
# from typing import List, Optional
|
| 85 |
+
|
| 86 |
+
# EURON_API_KEY = os.environ.get("EURON_API_KEY")
|
| 87 |
+
|
| 88 |
+
# def generate_completion(messages, model="gpt-4.1-nano", max_tokens=1000, temperature=0.7):
|
| 89 |
+
# """
|
| 90 |
+
# Generate a response from Euron API
|
| 91 |
+
# """
|
| 92 |
+
# url = "https://api.euron.one/api/v1/euri/chat/completions"
|
| 93 |
+
# headers = {
|
| 94 |
+
# "Content-Type": "application/json",
|
| 95 |
+
# "Authorization": f"Bearer {EURON_API_KEY}"
|
| 96 |
+
# }
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# # Convert LangChain messages to API format
|
| 100 |
+
# api_messages = []
|
| 101 |
+
# for message in messages:
|
| 102 |
+
# if hasattr(message, 'type'):
|
| 103 |
+
# role = message.type
|
| 104 |
+
# if role == "human":
|
| 105 |
+
# role = "user"
|
| 106 |
+
# elif role == "ai":
|
| 107 |
+
# role = "assistant"
|
| 108 |
+
# api_messages.append({"role": role, "content": message.content})
|
| 109 |
+
# else:
|
| 110 |
+
# api_messages.append(message)
|
| 111 |
+
|
| 112 |
+
# payload = {
|
| 113 |
+
# "messages": api_messages,
|
| 114 |
+
# "model": model,
|
| 115 |
+
# "max_tokens": max_tokens,
|
| 116 |
+
# "temperature": temperature
|
| 117 |
+
# }
|
| 118 |
+
|
| 119 |
+
# response = requests.post(url, headers=headers, json=payload)
|
| 120 |
+
# if response.status_code != 200:
|
| 121 |
+
# raise Exception(f"Euron API Error: {response.status_code} {response.text}")
|
| 122 |
+
|
| 123 |
+
# return response.json()
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
# class EuronChatModel(BaseChatModel):
|
| 127 |
+
# """
|
| 128 |
+
# LangChain compatible chat model using Euron API
|
| 129 |
+
# """
|
| 130 |
+
# model_name: str = "gpt-4.1-nano"
|
| 131 |
+
|
| 132 |
+
# def _generate(self, messages: List, stop: Optional[List[str]] = None) -> ChatResult:
|
| 133 |
+
# response = generate_completion(messages, model=self.model_name)
|
| 134 |
+
|
| 135 |
+
# # Extract AI message
|
| 136 |
+
# ai_content = response['choices'][0]['message']['content']
|
| 137 |
+
|
| 138 |
+
# # Wrap in LangChain objects
|
| 139 |
+
# ai_message = AIMessage(content=ai_content)
|
| 140 |
+
# generation = ChatGeneration(message=ai_message)
|
| 141 |
+
|
| 142 |
+
# return ChatResult(generations=[generation])
|
| 143 |
+
|
| 144 |
+
# def _llm_type(self) -> str:
|
| 145 |
+
# return "euron-chat"
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# # Optional: simple chat function
|
| 149 |
+
# def simple_chat_completion(user_message: str):
|
| 150 |
+
# messages = [{"role": "user", "content": user_message}]
|
| 151 |
+
# response = generate_completion(messages)
|
| 152 |
+
# return response['choices'][0]['message']['content']
|
src/helper.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
|
| 2 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 3 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 4 |
+
from typing import List
|
| 5 |
+
from langchain.schema import Document
|
| 6 |
+
|
| 7 |
+
# Extract Data From the PDF File
|
| 8 |
+
def load_pdf_file(data):
|
| 9 |
+
loader = DirectoryLoader(data,
|
| 10 |
+
glob="*.pdf",
|
| 11 |
+
loader_cls=PyPDFLoader)
|
| 12 |
+
documents = loader.load()
|
| 13 |
+
return documents
|
| 14 |
+
|
| 15 |
+
# Filter to minimal documents
|
| 16 |
+
def filter_to_minimal_docs(docs: List[Document]) -> List[Document]:
|
| 17 |
+
"""
|
| 18 |
+
Given a list of Document objects, return a new list of Document objects
|
| 19 |
+
containing only 'source' in metadata and the original page_content.
|
| 20 |
+
"""
|
| 21 |
+
minimal_docs: List[Document] = []
|
| 22 |
+
for doc in docs:
|
| 23 |
+
src = doc.metadata.get("source")
|
| 24 |
+
minimal_docs.append(
|
| 25 |
+
Document(
|
| 26 |
+
page_content=doc.page_content,
|
| 27 |
+
metadata={"source": src}
|
| 28 |
+
)
|
| 29 |
+
)
|
| 30 |
+
return minimal_docs
|
| 31 |
+
|
| 32 |
+
# Split the Data into Text Chunks
|
| 33 |
+
def text_split(extracted_data):
|
| 34 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
|
| 35 |
+
text_chunks = text_splitter.split_documents(extracted_data)
|
| 36 |
+
return text_chunks
|
| 37 |
+
|
| 38 |
+
# Download the Embeddings from HuggingFace
|
| 39 |
+
def download_hugging_face_embeddings():
|
| 40 |
+
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2') # this model returns 384 dimensions
|
| 41 |
+
return embeddings
|
src/prompt.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Define the system prompt
|
| 2 |
+
system_prompt = (
|
| 3 |
+
"You are an intelligent Personal Portfolio Assistant that answers questions about the user's background, work, and projects. "
|
| 4 |
+
"Use the retrieved context below to provide accurate and natural responses. "
|
| 5 |
+
"If the context does not contain the answer, respond with 'I'm not sure about that.' "
|
| 6 |
+
"Keep your answer concise."
|
| 7 |
+
"\n\n"
|
| 8 |
+
"Context:\n{context}"
|
| 9 |
+
)
|
src/store_index.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
import os
|
| 3 |
+
from pinecone import Pinecone
|
| 4 |
+
from langchain_pinecone import PineconeVectorStore
|
| 5 |
+
from src.helper import load_pdf_file, filter_to_minimal_docs, text_split, download_hugging_face_embeddings
|
| 6 |
+
|
| 7 |
+
# Load environment variables from .env file
|
| 8 |
+
load_dotenv()
|
| 9 |
+
|
| 10 |
+
# Retrieve Pinecone API key from environment variables
|
| 11 |
+
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
|
| 12 |
+
if not pinecone_api_key:
|
| 13 |
+
raise ValueError("PINECONE_API_KEY not found in environment variables")
|
| 14 |
+
|
| 15 |
+
# Initialize Pinecone client
|
| 16 |
+
pc = Pinecone(api_key=pinecone_api_key)
|
| 17 |
+
|
| 18 |
+
# Set up index
|
| 19 |
+
index_name = "portfolio"
|
| 20 |
+
index = pc.Index(index_name)
|
| 21 |
+
|
| 22 |
+
# Load and process documents
|
| 23 |
+
extracted_data = load_pdf_file(data='data/')
|
| 24 |
+
filter_data = filter_to_minimal_docs(extracted_data)
|
| 25 |
+
text_chunks = text_split(filter_data)
|
| 26 |
+
|
| 27 |
+
# Download embeddings
|
| 28 |
+
embeddings = download_hugging_face_embeddings()
|
| 29 |
+
|
| 30 |
+
# Create Pinecone vector store
|
| 31 |
+
docsearch = PineconeVectorStore.from_documents(
|
| 32 |
+
documents=text_chunks,
|
| 33 |
+
index_name=index_name,
|
| 34 |
+
embedding=embeddings,
|
| 35 |
+
)
|
static/icon.png
ADDED
|
|
static/style.css
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Fonts & Body */
|
| 2 |
+
body {
|
| 3 |
+
margin: 0;
|
| 4 |
+
font-family: 'Poppins', sans-serif;
|
| 5 |
+
background: linear-gradient(135deg, #1f2c34, #1b1b2f);
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
/* Chat container */
|
| 9 |
+
.chat-container {
|
| 10 |
+
padding: 20px;
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
/* Chat box */
|
| 14 |
+
.chat-box {
|
| 15 |
+
width: 450px;
|
| 16 |
+
max-width: 95%;
|
| 17 |
+
height: 650px;
|
| 18 |
+
background-color: #222831;
|
| 19 |
+
border-radius: 20px;
|
| 20 |
+
display: flex;
|
| 21 |
+
flex-direction: column;
|
| 22 |
+
overflow: hidden;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
/* Chat header */
|
| 26 |
+
.chat-header {
|
| 27 |
+
background-color: #30475e;
|
| 28 |
+
color: white;
|
| 29 |
+
border-top-left-radius: 20px;
|
| 30 |
+
border-top-right-radius: 20px;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
.chat-avatar {
|
| 34 |
+
width: 55px;
|
| 35 |
+
height: 55px;
|
| 36 |
+
border-radius: 50%;
|
| 37 |
+
border: 2px solid #00adb5;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
/* Chat body */
|
| 41 |
+
.chat-body {
|
| 42 |
+
flex: 1;
|
| 43 |
+
overflow-y: auto;
|
| 44 |
+
padding: 15px;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
/* Chat footer */
|
| 48 |
+
.chat-footer {
|
| 49 |
+
background-color: #393e46;
|
| 50 |
+
border-bottom-left-radius: 20px;
|
| 51 |
+
border-bottom-right-radius: 20px;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
/* Messages */
|
| 55 |
+
.msg-text {
|
| 56 |
+
position: relative;
|
| 57 |
+
max-width: 80%;
|
| 58 |
+
word-wrap: break-word;
|
| 59 |
+
box-shadow: 0 2px 6px rgba(0,0,0,0.2);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
.msg-text .time {
|
| 63 |
+
font-size: 10px;
|
| 64 |
+
position: absolute;
|
| 65 |
+
bottom: -15px;
|
| 66 |
+
right: 8px;
|
| 67 |
+
color: rgba(255,255,255,0.5);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
/* User & Bot message */
|
| 71 |
+
.user-message .msg-text {
|
| 72 |
+
background-color: #00adb5;
|
| 73 |
+
color: white;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
.bot-message .msg-text {
|
| 77 |
+
background-color: #393e46;
|
| 78 |
+
color: #eeeeee;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
/* Input & Button */
|
| 82 |
+
input.form-control {
|
| 83 |
+
border-radius: 25px;
|
| 84 |
+
padding: 12px 15px;
|
| 85 |
+
background-color: #222831;
|
| 86 |
+
border: 1px solid #00adb5;
|
| 87 |
+
color: #eee;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
input.form-control:focus {
|
| 91 |
+
box-shadow: none;
|
| 92 |
+
outline: none;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
.btn-accent {
|
| 96 |
+
border-radius: 25px;
|
| 97 |
+
background-color: #00adb5;
|
| 98 |
+
color: white;
|
| 99 |
+
padding: 0 18px;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
.btn-accent:hover {
|
| 103 |
+
background-color: #00b8c4;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
/* Responsive */
|
| 107 |
+
@media (max-width: 500px) {
|
| 108 |
+
.chat-box {
|
| 109 |
+
height: 80vh;
|
| 110 |
+
width: 95%;
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
|
templates/chat.html
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Aiyub's Medicalbot</title>
|
| 7 |
+
<link href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap/5.3.2/css/bootstrap.min.css" rel="stylesheet">
|
| 8 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
|
| 9 |
+
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600&display=swap" rel="stylesheet">
|
| 10 |
+
<link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
|
| 11 |
+
</head>
|
| 12 |
+
<body>
|
| 13 |
+
|
| 14 |
+
<div class="chat-container d-flex justify-content-center align-items-center vh-100">
|
| 15 |
+
<div class="chat-box shadow-lg">
|
| 16 |
+
<div class="chat-header d-flex align-items-center p-3">
|
| 17 |
+
<img src="/static/icon.png" class="chat-avatar me-3">
|
| 18 |
+
<div>
|
| 19 |
+
<h5 class="mb-0">Aiyub's Medicalbot</h5>
|
| 20 |
+
<small class="text-light">Ask me anything about me!</small>
|
| 21 |
+
</div>
|
| 22 |
+
</div>
|
| 23 |
+
|
| 24 |
+
<div id="messageFormeight" class="chat-body p-3">
|
| 25 |
+
<!-- Messages will appear here -->
|
| 26 |
+
</div>
|
| 27 |
+
|
| 28 |
+
<div class="chat-footer p-3">
|
| 29 |
+
<form id="messageArea" class="d-flex">
|
| 30 |
+
<input type="text" id="text" name="msg" class="form-control me-2" placeholder="Type a message..." required>
|
| 31 |
+
<button type="submit" class="btn btn-accent"><i class="fas fa-paper-plane"></i></button>
|
| 32 |
+
</form>
|
| 33 |
+
</div>
|
| 34 |
+
</div>
|
| 35 |
+
</div>
|
| 36 |
+
|
| 37 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.7.1/jquery.min.js"></script>
|
| 38 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap/5.3.2/js/bootstrap.bundle.min.js"></script>
|
| 39 |
+
|
| 40 |
+
<script>
|
| 41 |
+
$(document).ready(function() {
|
| 42 |
+
$("#messageArea").on("submit", function(event) {
|
| 43 |
+
event.preventDefault();
|
| 44 |
+
|
| 45 |
+
const date = new Date();
|
| 46 |
+
const str_time = date.getHours().toString().padStart(2,'0') + ":" + date.getMinutes().toString().padStart(2,'0');
|
| 47 |
+
const rawText = $("#text").val();
|
| 48 |
+
$("#text").val("");
|
| 49 |
+
|
| 50 |
+
const userHtml = `
|
| 51 |
+
<div class="message user-message mb-3 d-flex justify-content-end">
|
| 52 |
+
<div class="msg-text bg-accent text-white p-3 rounded-4">
|
| 53 |
+
${rawText} <span class="time">${str_time}</span>
|
| 54 |
+
</div>
|
| 55 |
+
</div>`;
|
| 56 |
+
$("#messageFormeight").append(userHtml).scrollTop($("#messageFormeight")[0].scrollHeight);
|
| 57 |
+
|
| 58 |
+
$.post("/get", { msg: rawText }, function(data) {
|
| 59 |
+
const botHtml = `
|
| 60 |
+
<div class="message bot-message mb-3 d-flex justify-content-start">
|
| 61 |
+
<div class="msg-text bg-dark-light text-light p-3 rounded-4">
|
| 62 |
+
${data} <span class="time">${str_time}</span>
|
| 63 |
+
</div>
|
| 64 |
+
</div>`;
|
| 65 |
+
$("#messageFormeight").append(botHtml).scrollTop($("#messageFormeight")[0].scrollHeight);
|
| 66 |
+
});
|
| 67 |
+
});
|
| 68 |
+
});
|
| 69 |
+
</script>
|
| 70 |
+
|
| 71 |
+
</body>
|
| 72 |
+
</html>
|
templates/index.html
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Shanin Portfolio</title>
|
| 7 |
+
|
| 8 |
+
<!-- Tailwind CSS CDN -->
|
| 9 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
| 10 |
+
<!-- Font Awesome -->
|
| 11 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
|
| 12 |
+
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600&display=swap" rel="stylesheet">
|
| 13 |
+
</head>
|
| 14 |
+
<body class="bg-gradient-to-br from-gray-900 via-slate-900 to-gray-800 text-gray-200 font-poppins">
|
| 15 |
+
|
| 16 |
+
<div class="flex justify-center items-center h-screen p-4">
|
| 17 |
+
<div class="flex flex-col w-full max-w-xl h-[650px] bg-slate-800/60 border border-slate-700 rounded-2xl shadow-xl overflow-hidden">
|
| 18 |
+
|
| 19 |
+
<!-- Header -->
|
| 20 |
+
<div class="flex justify-between items-center px-5 py-4 bg-slate-900/70 border-b border-slate-700">
|
| 21 |
+
<div class="flex items-center">
|
| 22 |
+
<img src="/static/icon.png" alt="Avatar" class="w-12 h-12 rounded-full border-2 border-cyan-400 mr-4">
|
| 23 |
+
<div>
|
| 24 |
+
<h5 class="text-white text-lg font-semibold">💬 Aiyub's Medical Assistant</h5>
|
| 25 |
+
<p class="text-gray-400 text-sm">Ask me anything about my work!</p>
|
| 26 |
+
</div>
|
| 27 |
+
</div>
|
| 28 |
+
<button class="text-cyan-400 hover:text-cyan-300 border-0 p-0">
|
| 29 |
+
<i class="fas fa-arrow-left"></i>
|
| 30 |
+
</button>
|
| 31 |
+
</div>
|
| 32 |
+
|
| 33 |
+
<!-- Chat Body -->
|
| 34 |
+
<div id="messageFormeight" class="flex-1 overflow-y-auto p-4 space-y-4 bg-slate-950/50 scroll-smooth"></div>
|
| 35 |
+
|
| 36 |
+
<!-- Footer / Input -->
|
| 37 |
+
<div class="border-t border-slate-700 bg-slate-900/70 p-4">
|
| 38 |
+
<form id="messageArea" class="flex items-center space-x-3">
|
| 39 |
+
<input type="text" id="text" name="msg" placeholder="Type a message..." required
|
| 40 |
+
class="flex-1 px-4 py-3 rounded-full bg-slate-800 border border-cyan-500/40 focus:outline-none focus:ring-2 focus:ring-cyan-400 text-gray-100 placeholder-gray-400">
|
| 41 |
+
<button type="submit" class="bg-cyan-500 hover:bg-cyan-400 transition rounded-full p-3 text-white">
|
| 42 |
+
<i class="fas fa-paper-plane"></i>
|
| 43 |
+
</button>
|
| 44 |
+
</form>
|
| 45 |
+
</div>
|
| 46 |
+
|
| 47 |
+
</div>
|
| 48 |
+
</div>
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
<!-- Scripts -->
|
| 52 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.7.1/jquery.min.js"></script>
|
| 53 |
+
<script>
|
| 54 |
+
$(document).ready(function() {
|
| 55 |
+
$("#messageArea").on("submit", function(event) {
|
| 56 |
+
event.preventDefault();
|
| 57 |
+
|
| 58 |
+
const date = new Date();
|
| 59 |
+
const str_time = date.getHours().toString().padStart(2,'0') + ":" + date.getMinutes().toString().padStart(2,'0');
|
| 60 |
+
const rawText = $("#text").val();
|
| 61 |
+
$("#text").val("");
|
| 62 |
+
|
| 63 |
+
const userHtml = `
|
| 64 |
+
<div class="message user-message mb-3 flex justify-end">
|
| 65 |
+
<div class="msg-text bg-cyan-500 text-white p-3 rounded-2xl relative shadow-sm max-w-[80%]">
|
| 66 |
+
${rawText}
|
| 67 |
+
<span class="absolute text-[10px] text-white/50 bottom-[-15px] right-2">${str_time}</span>
|
| 68 |
+
</div>
|
| 69 |
+
</div>`;
|
| 70 |
+
$("#messageFormeight").append(userHtml).scrollTop($("#messageFormeight")[0].scrollHeight);
|
| 71 |
+
|
| 72 |
+
$.post("/get", { msg: rawText }, function(data) {
|
| 73 |
+
const botHtml = `
|
| 74 |
+
<div class="message bot-message mb-3 flex justify-start">
|
| 75 |
+
<div class="msg-text bg-slate-800/80 text-gray-200 p-3 rounded-2xl relative shadow-sm border border-slate-700 max-w-[80%]">
|
| 76 |
+
${data}
|
| 77 |
+
<span class="absolute text-[10px] text-white/50 bottom-[-15px] right-2">${str_time}</span>
|
| 78 |
+
</div>
|
| 79 |
+
</div>`;
|
| 80 |
+
$("#messageFormeight").append(botHtml).scrollTop($("#messageFormeight")[0].scrollHeight);
|
| 81 |
+
});
|
| 82 |
+
});
|
| 83 |
+
});
|
| 84 |
+
</script>
|
| 85 |
+
|
| 86 |
+
</body>
|
| 87 |
+
</html>
|