File size: 7,887 Bytes
1a4a292
 
0108a14
1a4a292
 
0108a14
 
 
 
1a4a292
 
 
 
 
0108a14
 
1a4a292
0108a14
1a4a292
0108a14
 
12af9a4
0108a14
 
 
1a4a292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0108a14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a4a292
 
 
 
 
 
 
 
0108a14
 
1a4a292
 
 
0108a14
003d8eb
1a4a292
 
988a763
 
 
003d8eb
 
 
 
 
9a13963
 
 
003d8eb
 
 
 
 
 
 
 
 
 
 
850c0a2
988a763
9a13963
 
 
 
 
 
988a763
 
 
850c0a2
988a763
 
9a13963
 
 
 
988a763
9a13963
988a763
 
1a4a292
850c0a2
988a763
850c0a2
 
988a763
850c0a2
988a763
850c0a2
 
 
988a763
 
850c0a2
 
988a763
 
 
850c0a2
 
 
 
 
988a763
850c0a2
 
 
 
 
 
 
 
 
 
 
988a763
850c0a2
988a763
9a13963
 
 
988a763
850c0a2
 
 
988a763
 
 
 
fb50275
850c0a2
 
 
fb50275
0d5b142
003d8eb
 
 
 
 
 
850c0a2
 
988a763
 
 
850c0a2
 
 
 
 
1a4a292
 
 
7f043be
003d8eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a4a292
 
0108a14
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
import os
from pathlib import Path
from typing import Optional, List, Any
import gradio as gr

# Fix OpenMP environment variable issue BEFORE other imports
os.environ["OMP_NUM_THREADS"] = "1"

# NO langchain.chains anymore (hopefully this fixes it)
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_text_splitters import MarkdownHeaderTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.language_models.llms import LLM
from huggingface_hub import InferenceClient

# Get HF token
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")

if not hf_token:
    print("ERROR: Token not found in environment variables!")
    exit(1)

print("Loading portfolio data...")

# Load the markdown portfolio
loader = UnstructuredMarkdownLoader("portfolio_data.md")
data = loader.load()

# Split text by Headers
headers_to_split_on = [
    ("#", "Header 1"),
    ("##", "Header 2"),
    ("###", "Header 3"),
]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
splits = markdown_splitter.split_text(data[0].page_content)

# Creating searchable "Embeddings"
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")

# Store embeddings in ChromaDB
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)

# Load system prompt
try:
    system_prompt_content = Path("system_prompt.txt").read_text(encoding="utf-8")
except FileNotFoundError:
    print("Warning: system_prompt.txt not found, using default.")
    system_prompt_content = "You are a helpful assistant."

# Create prompt template
template = """
{system_prompt}
Context: {context}
Question: {input}
Answer:"""

prompt = PromptTemplate(
    template=template,
    input_variables=["context", "input"],
    partial_variables={"system_prompt": system_prompt_content}
)

print("Initializing model...")

# Initialize InferenceClient
client = InferenceClient(token=hf_token)


# Custom LLM wrapper for InferenceClient
class HFInferenceClientLLM(LLM):
    client: Any
    model: str = "meta-llama/Llama-3.2-3B-Instruct"
    max_new_tokens: int = 512

    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        messages = [{"role": "user", "content": prompt}]
        response = self.client.chat_completion(
            messages=messages,
            model=self.model,
            max_tokens=self.max_new_tokens,
            temperature=0.7
        )
        return response.choices[0].message.content

    @property
    def _llm_type(self) -> str:
        return "huggingface_inference_client"


# Create LLM instance
llm = HFInferenceClientLLM(client=client, model="meta-llama/Llama-3.2-3B-Instruct")

# Create retriever
retriever = vectorstore.as_retriever()

# Build RAG chain manually - NO langchain.chains imports needed!
def format_docs(docs):
    return "\n\n".join([d.page_content for d in docs])

# Create RAG chain using RunnablePassthrough
rag_chain = (
    {
        "context": retriever | format_docs,
        "input": RunnablePassthrough()
    }
    | prompt
    | llm
    | StrOutputParser()
)

print("RAG system ready!")


# Gradio chat function
def chat_with_portfolio(message, history):
    """Process user message and return AI response"""
    try:
        response = rag_chain.invoke(message)
        return response
    except Exception as e:
        return f"Error: {str(e)}"


# Fixed CSS for Gradio 5.0 - properly sized icons and full background
custom_css = """
@import url('https://fonts.googleapis.com/css2?family=Quicksand:wght@400;600&display=swap');
* { 
    font-family: 'Quicksand', sans-serif !important; 
}
/* Fix for white space - apply gradient to body and html */
body, html {
    margin: 0 !important;
    padding: 0 !important;
    height: 100% !important;
    background: linear-gradient(135deg, #a78bfa 0%, #6366f1 50%, #3b82f6 100%) !important;
    background-attachment: fixed !important;
}
.gradio-container {
    background: transparent !important;
    min-height: 100vh !important;
}
.main {
    background: transparent !important;
}
/* Make the app container fill the space */
.app {
    background: transparent !important;
}
/* Chat messages */
.message-wrap {
    background: rgba(255, 255, 255, 0.15) !important;
    backdrop-filter: blur(12px) !important;
    -webkit-backdrop-filter: blur(12px) !important;
    border: 1px solid rgba(255, 255, 255, 0.2) !important;
    border-radius: 20px !important;
}
.message {
    color: white !important;
}
/* Input textarea */
.input-wrap textarea, 
textarea {
    background: rgba(255, 255, 255, 0.1) !important;
    backdrop-filter: blur(8px) !important;
    color: white !important;
    border: 1px solid rgba(255, 255, 255, 0.3) !important;
    border-radius: 12px !important;
}
textarea::placeholder {
    color: rgba(255, 255, 255, 0.6) !important;
}
/* Fix button and icon sizes for Gradio 5.0 */
button {
    height: 44px !important;
    min-height: 44px !important;
    max-height: 44px !important;
    padding: 8px 12px !important;
}
/* Target SVG icons inside buttons */
button svg,
button img {
    width: 20px !important;
    height: 20px !important;
    min-width: 20px !important;
    min-height: 20px !important;
    max-width: 20px !important;
    max-height: 20px !important;
}
/* Submit button specific sizing */
.submit-btn,
button[type="submit"],
.chatbot button[aria-label*="Submit"],
.chatbot button[aria-label*="Send"] {
    width: 60px !important;
    min-width: 60px !important;
    max-width: 60px !important;
}
/* Action buttons (clear, undo, etc) */
.chatbot .action-button,
.chatbot button[aria-label*="Clear"],
.chatbot button[aria-label*="Undo"],
.chatbot button[aria-label*="Delete"] {
    width: 44px !important;
    min-width: 44px !important;
    max-width: 44px !important;
}
/* Example buttons */
.examples button {
    background: rgba(255, 255, 255, 0.1) !important;
    border: 1px solid rgba(255, 255, 255, 0.2) !important;
    color: white !important;
    border-radius: 12px !important;
    height: auto !important;
    max-height: none !important;
    padding: 12px 16px !important;
}
.examples button:hover {
    background: rgba(255, 255, 255, 0.2) !important;
}
/* Chatbot container - INCREASED SIZE to 700px */
.chatbot {
    background: rgba(255, 255, 255, 0.05) !important;
    border-radius: 20px !important;
    min-height: 700px !important;
/* Delete this line - height: 700px !important; */
}
/* Center and constrain the content */
.contain {
    max-width: 1200px !important;
    margin: 0 auto !important;
    padding: 20px !important;
}
/* Hide footer */
footer { 
    display: none !important; 
}
/* Additional icon fix for any nested elements */
button * svg,
button * img {
    width: 20px !important;
    height: 20px !important;
}
"""

with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
    with gr.Column():
        gr.Markdown(
            "<h1 style='text-align: center; color: white; text-shadow: 2px 2px 4px rgba(0,0,0,0.3); margin-top: 20px;'>πŸˆβ€β¬› Chat with Adda-Bot</h1>"
        )
        
        gr.ChatInterface(
            fn=chat_with_portfolio,
            description="<span style='color: white; font-weight: 600;'>Ask me anything about Adda's portfolio!</span>",
            examples=[
                "What is Adda's experience with Python?",
                "Tell me about her education.",
                "What projects has Adda worked on?"
            ],
            type="messages",
            textbox=gr.Textbox(placeholder="Ask about Adda's portfolio...", scale=7),
            submit_btn="Send",
        )

if __name__ == "__main__":
    demo.launch()