Spaces:
Configuration error
Configuration error
Upload 14 files
Browse files- app.py +68 -0
- prompts/tool_prompts.json +17 -0
- requirements.txt +9 -0
- static/script.js +49 -0
- static/style.css +75 -0
- templates/index.html +21 -0
- tools/__pycache__/calculator.cpython-311.pyc +0 -0
- tools/__pycache__/code_inspector.cpython-311.pyc +0 -0
- tools/__pycache__/email_drafter.cpython-311.pyc +0 -0
- tools/__pycache__/pdf_qa.cpython-311.pyc +0 -0
- tools/calculator.py +28 -0
- tools/code_inspector.py +14 -0
- tools/email_drafter.py +8 -0
- tools/pdf_qa.py +66 -0
app.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from fastapi import FastAPI, File, UploadFile, Form
|
| 3 |
+
from fastapi.responses import HTMLResponse, JSONResponse
|
| 4 |
+
from fastapi.staticfiles import StaticFiles
|
| 5 |
+
from fastapi.templating import Jinja2Templates
|
| 6 |
+
import uvicorn
|
| 7 |
+
import os
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
import json
|
| 12 |
+
import google.generativeai as genai
|
| 13 |
+
from tools import calculator, email_drafter, pdf_qa, code_inspector
|
| 14 |
+
|
| 15 |
+
# Configure Gemini API
|
| 16 |
+
try:
|
| 17 |
+
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
| 18 |
+
model = genai.GenerativeModel('gemini-2.5-flash')
|
| 19 |
+
except Exception as e:
|
| 20 |
+
print(f"Error configuring Gemini or loading model: {e}")
|
| 21 |
+
model = None # Set model to None if configuration fails
|
| 22 |
+
|
| 23 |
+
app = FastAPI()
|
| 24 |
+
|
| 25 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
| 26 |
+
templates = Jinja2Templates(directory="templates")
|
| 27 |
+
|
| 28 |
+
@app.get("/", response_class=HTMLResponse)
|
| 29 |
+
async def read_root():
|
| 30 |
+
with open("templates/index.html") as f:
|
| 31 |
+
return HTMLResponse(content=f.read(), status_code=200)
|
| 32 |
+
|
| 33 |
+
@app.post("/mcp")
|
| 34 |
+
async def mcp_endpoint(prompt: str = Form(...), file: UploadFile = File(None)):
|
| 35 |
+
if file:
|
| 36 |
+
file_path = f"temp_{file.filename}"
|
| 37 |
+
with open(file_path, "wb") as buffer:
|
| 38 |
+
buffer.write(await file.read())
|
| 39 |
+
|
| 40 |
+
if "pdf" in file.content_type:
|
| 41 |
+
response = pdf_qa.answer_pdf_question(file_path, prompt, model)
|
| 42 |
+
else:
|
| 43 |
+
response = "Unsupported file type."
|
| 44 |
+
|
| 45 |
+
os.remove(file_path)
|
| 46 |
+
return JSONResponse(content={"response": response})
|
| 47 |
+
|
| 48 |
+
tool_prompts = json.load(open("prompts/tool_prompts.json"))
|
| 49 |
+
|
| 50 |
+
if "sum" in prompt.lower():
|
| 51 |
+
response = calculator.calculate(prompt, "sum")
|
| 52 |
+
elif "multiply" in prompt.lower():
|
| 53 |
+
response = calculator.calculate(prompt, "multiply")
|
| 54 |
+
elif "subtract" in prompt.lower():
|
| 55 |
+
response = calculator.calculate(prompt, "subtract")
|
| 56 |
+
elif "divide" in prompt.lower():
|
| 57 |
+
response = calculator.calculate(prompt, "divide")
|
| 58 |
+
elif "draft an email" in prompt.lower():
|
| 59 |
+
response = email_drafter.draft_email(prompt, model)
|
| 60 |
+
elif "check code" in prompt.lower():
|
| 61 |
+
response = code_inspector.check_code(prompt, model)
|
| 62 |
+
else:
|
| 63 |
+
response = "I'm sorry, I don't understand that request."
|
| 64 |
+
|
| 65 |
+
return JSONResponse(content={"response": response})
|
| 66 |
+
|
| 67 |
+
if __name__ == "__main__":
|
| 68 |
+
uvicorn.run(app, host="127.0.0.1", port=7860)
|
prompts/tool_prompts.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"calculator": {
|
| 3 |
+
"sum": "To calculate the sum of two numbers, please say something like 'sum 5 and 3'.",
|
| 4 |
+
"multiply": "To multiply two numbers, please say something like 'multiply 5 by 3'.",
|
| 5 |
+
"subtract": "To subtract two numbers, please say something like 'subtract 5 from 3'.",
|
| 6 |
+
"divide": "To divide two numbers, please say something like 'divide 5 by 3'."
|
| 7 |
+
},
|
| 8 |
+
"email_drafter": {
|
| 9 |
+
"draft": "To draft an email, please say something like 'draft an email to [recipient] about [subject and body]'."
|
| 10 |
+
},
|
| 11 |
+
"pdf_qa": {
|
| 12 |
+
"answer": "To answer questions about a PDF, please upload a PDF file and ask your question."
|
| 13 |
+
},
|
| 14 |
+
"code_inspector": {
|
| 15 |
+
"check": "To check Python code for syntax and indentation errors, please say 'check code: [your python code]'."
|
| 16 |
+
}
|
| 17 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
python-multipart
|
| 4 |
+
Jinja2
|
| 5 |
+
PyPDF2
|
| 6 |
+
google-generativeai
|
| 7 |
+
qdrant-client
|
| 8 |
+
sentence-transformers
|
| 9 |
+
python-dotenv
|
static/script.js
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
document.getElementById('mcp-form').addEventListener('submit', async (e) => {
|
| 2 |
+
e.preventDefault();
|
| 3 |
+
|
| 4 |
+
const promptInput = document.getElementById('prompt-input');
|
| 5 |
+
const fileInput = document.getElementById('file-input');
|
| 6 |
+
const chatBox = document.getElementById('chat-box');
|
| 7 |
+
|
| 8 |
+
const prompt = promptInput.value;
|
| 9 |
+
const file = fileInput.files[0];
|
| 10 |
+
|
| 11 |
+
const formData = new FormData();
|
| 12 |
+
formData.append('prompt', prompt);
|
| 13 |
+
if (file) {
|
| 14 |
+
formData.append('file', file);
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
// Display user message
|
| 18 |
+
const userMessageDiv = document.createElement('div');
|
| 19 |
+
userMessageDiv.classList.add('message', 'user-message');
|
| 20 |
+
userMessageDiv.textContent = prompt;
|
| 21 |
+
chatBox.appendChild(userMessageDiv);
|
| 22 |
+
|
| 23 |
+
promptInput.value = '';
|
| 24 |
+
fileInput.value = '';
|
| 25 |
+
|
| 26 |
+
try {
|
| 27 |
+
const response = await fetch('/mcp', {
|
| 28 |
+
method: 'POST',
|
| 29 |
+
body: formData,
|
| 30 |
+
});
|
| 31 |
+
const data = await response.json();
|
| 32 |
+
|
| 33 |
+
// Display MCP response
|
| 34 |
+
const mcpMessageDiv = document.createElement('div');
|
| 35 |
+
mcpMessageDiv.classList.add('message', 'mcp-message');
|
| 36 |
+
mcpMessageDiv.textContent = data.response;
|
| 37 |
+
chatBox.appendChild(mcpMessageDiv);
|
| 38 |
+
|
| 39 |
+
chatBox.scrollTop = chatBox.scrollHeight; // Scroll to bottom
|
| 40 |
+
|
| 41 |
+
} catch (error) {
|
| 42 |
+
console.error('Error:', error);
|
| 43 |
+
const errorMessageDiv = document.createElement('div');
|
| 44 |
+
errorMessageDiv.classList.add('message', 'mcp-message');
|
| 45 |
+
errorMessageDiv.textContent = 'Error: Could not connect to the server.';
|
| 46 |
+
chatBox.appendChild(errorMessageDiv);
|
| 47 |
+
chatBox.scrollTop = chatBox.scrollHeight; // Scroll to bottom
|
| 48 |
+
}
|
| 49 |
+
});
|
static/style.css
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
body {
|
| 2 |
+
font-family: Arial, sans-serif;
|
| 3 |
+
display: flex;
|
| 4 |
+
justify-content: center;
|
| 5 |
+
align-items: center;
|
| 6 |
+
min-height: 100vh;
|
| 7 |
+
background-color: #f4f4f4;
|
| 8 |
+
margin: 0;
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
.container {
|
| 12 |
+
background-color: #fff;
|
| 13 |
+
padding: 20px;
|
| 14 |
+
border-radius: 8px;
|
| 15 |
+
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
|
| 16 |
+
width: 90%;
|
| 17 |
+
max-width: 600px;
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
h1 {
|
| 21 |
+
text-align: center;
|
| 22 |
+
color: #333;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
.chat-box {
|
| 26 |
+
border: 1px solid #ddd;
|
| 27 |
+
padding: 10px;
|
| 28 |
+
height: 300px;
|
| 29 |
+
overflow-y: scroll;
|
| 30 |
+
margin-bottom: 10px;
|
| 31 |
+
border-radius: 4px;
|
| 32 |
+
background-color: #e9e9e9;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
.message {
|
| 36 |
+
margin-bottom: 8px;
|
| 37 |
+
padding: 5px 10px;
|
| 38 |
+
border-radius: 4px;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
.user-message {
|
| 42 |
+
background-color: #dcf8c6;
|
| 43 |
+
text-align: right;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
.mcp-message {
|
| 47 |
+
background-color: #c6e8f8;
|
| 48 |
+
text-align: left;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
form {
|
| 52 |
+
display: flex;
|
| 53 |
+
gap: 10px;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
input[type="text"],
|
| 57 |
+
input[type="file"] {
|
| 58 |
+
flex-grow: 1;
|
| 59 |
+
padding: 10px;
|
| 60 |
+
border: 1px solid #ddd;
|
| 61 |
+
border-radius: 4px;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
button {
|
| 65 |
+
padding: 10px 15px;
|
| 66 |
+
background-color: #007bff;
|
| 67 |
+
color: white;
|
| 68 |
+
border: none;
|
| 69 |
+
border-radius: 4px;
|
| 70 |
+
cursor: pointer;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
button:hover {
|
| 74 |
+
background-color: #0056b3;
|
| 75 |
+
}
|
templates/index.html
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>MCP Server</title>
|
| 7 |
+
<link rel="stylesheet" href="/static/style.css">
|
| 8 |
+
</head>
|
| 9 |
+
<body>
|
| 10 |
+
<div class="container">
|
| 11 |
+
<h1>MCP Server Interaction</h1>
|
| 12 |
+
<div class="chat-box" id="chat-box"></div>
|
| 13 |
+
<form id="mcp-form">
|
| 14 |
+
<input type="text" id="prompt-input" placeholder="Enter your prompt..." required>
|
| 15 |
+
<input type="file" id="file-input">
|
| 16 |
+
<button type="submit">Send</button>
|
| 17 |
+
</form>
|
| 18 |
+
</div>
|
| 19 |
+
<script src="/static/script.js"></script>
|
| 20 |
+
</body>
|
| 21 |
+
</html>
|
tools/__pycache__/calculator.cpython-311.pyc
ADDED
|
Binary file (1.69 kB). View file
|
|
|
tools/__pycache__/code_inspector.cpython-311.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
tools/__pycache__/email_drafter.cpython-311.pyc
ADDED
|
Binary file (739 Bytes). View file
|
|
|
tools/__pycache__/pdf_qa.cpython-311.pyc
ADDED
|
Binary file (4.11 kB). View file
|
|
|
tools/calculator.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def calculate(prompt, operation):
|
| 2 |
+
try:
|
| 3 |
+
# Extract numbers from the prompt
|
| 4 |
+
numbers = [int(s) for s in prompt.split() if s.isdigit()]
|
| 5 |
+
if len(numbers) < 2:
|
| 6 |
+
return "Please provide at least two numbers for calculation."
|
| 7 |
+
|
| 8 |
+
num1 = numbers[0]
|
| 9 |
+
num2 = numbers[1]
|
| 10 |
+
|
| 11 |
+
if operation == "sum":
|
| 12 |
+
result = num1 + num2
|
| 13 |
+
return f"The sum of {num1} and {num2} is {result}."
|
| 14 |
+
elif operation == "multiply":
|
| 15 |
+
result = num1 * num2
|
| 16 |
+
return f"The product of {num1} and {num2} is {result}."
|
| 17 |
+
elif operation == "subtract":
|
| 18 |
+
result = num1 - num2
|
| 19 |
+
return f"The difference between {num1} and {num2} is {result}."
|
| 20 |
+
elif operation == "divide":
|
| 21 |
+
if num2 == 0:
|
| 22 |
+
return "Cannot divide by zero."
|
| 23 |
+
result = num1 / num2
|
| 24 |
+
return f"The division of {num1} by {num2} is {result}."
|
| 25 |
+
else:
|
| 26 |
+
return "Invalid operation."
|
| 27 |
+
except Exception as e:
|
| 28 |
+
return f"Error during calculation: {e}"
|
tools/code_inspector.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import google.generativeai as genai
|
| 2 |
+
|
| 3 |
+
def check_code(prompt, model):
|
| 4 |
+
code_prefix = "check code:"
|
| 5 |
+
if code_prefix in prompt.lower():
|
| 6 |
+
code = prompt.lower().split(code_prefix, 1)[1].strip()
|
| 7 |
+
else:
|
| 8 |
+
return "Please provide the Python code after 'check code:'."
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
response = model.generate_content(f"Analyze the following Python code for syntax errors, logical errors, and best practice violations. Provide detailed explanations and suggest fixes:\n\n```python\n{code}\n```")
|
| 12 |
+
return response.text
|
| 13 |
+
except Exception as e:
|
| 14 |
+
return f"Error checking code with Gemini: {e}"
|
tools/email_drafter.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import google.generativeai as genai
|
| 2 |
+
|
| 3 |
+
def draft_email(prompt, model):
|
| 4 |
+
try:
|
| 5 |
+
response = model.generate_content(f"Draft an email based on the following request: {prompt}")
|
| 6 |
+
return response.text
|
| 7 |
+
except Exception as e:
|
| 8 |
+
return f"Error drafting email with Gemini: {e}"
|
tools/pdf_qa.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import PyPDF2
|
| 2 |
+
import google.generativeai as genai
|
| 3 |
+
from qdrant_client import QdrantClient, models
|
| 4 |
+
from sentence_transformers import SentenceTransformer
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
# Initialize Qdrant client and Sentence Transformer model
|
| 8 |
+
client = QdrantClient(":memory:") # Use in-memory Qdrant for simplicity
|
| 9 |
+
encoder = SentenceTransformer('all-MiniLM-L6-v2')
|
| 10 |
+
|
| 11 |
+
COLLECTION_NAME = "pdf_documents"
|
| 12 |
+
|
| 13 |
+
def create_collection_if_not_exists():
|
| 14 |
+
try:
|
| 15 |
+
client.get_collection(collection_name=COLLECTION_NAME)
|
| 16 |
+
except Exception:
|
| 17 |
+
client.create_collection(
|
| 18 |
+
collection_name=COLLECTION_NAME,
|
| 19 |
+
vectors_config=models.VectorParams(size=encoder.get_sentence_embedding_dimension(), distance=models.Distance.COSINE),
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
create_collection_if_not_exists()
|
| 23 |
+
|
| 24 |
+
def answer_pdf_question(pdf_path, question, gemini_model):
|
| 25 |
+
try:
|
| 26 |
+
# Extract text from PDF
|
| 27 |
+
text = ""
|
| 28 |
+
with open(pdf_path, 'rb') as file:
|
| 29 |
+
reader = PyPDF2.PdfReader(file)
|
| 30 |
+
for page_num in range(len(reader.pages)):
|
| 31 |
+
text += reader.pages[page_num].extract_text()
|
| 32 |
+
|
| 33 |
+
# Chunk text and generate embeddings
|
| 34 |
+
chunks = [text[i:i+500] for i in range(0, len(text), 500)] # Simple chunking
|
| 35 |
+
points = []
|
| 36 |
+
for i, chunk in enumerate(chunks):
|
| 37 |
+
points.append(
|
| 38 |
+
models.PointStruct(
|
| 39 |
+
id=i,
|
| 40 |
+
vector=encoder.encode(chunk).tolist(),
|
| 41 |
+
payload={"text": chunk}
|
| 42 |
+
)
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
client.upsert(
|
| 46 |
+
collection_name=COLLECTION_NAME,
|
| 47 |
+
points=points,
|
| 48 |
+
wait=True
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# Search for relevant chunks
|
| 52 |
+
query_vector = encoder.encode(question).tolist()
|
| 53 |
+
search_result = client.search(
|
| 54 |
+
collection_name=COLLECTION_NAME,
|
| 55 |
+
query_vector=query_vector,
|
| 56 |
+
limit=3 # Retrieve top 3 most relevant chunks
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
context = " ".join([hit.payload['text'] for hit in search_result])
|
| 60 |
+
|
| 61 |
+
# Use Gemini to answer the question based on context
|
| 62 |
+
response = gemini_model.generate_content(f"Context: {context}\n\nQuestion: {question}\n\nAnswer:")
|
| 63 |
+
return response.text
|
| 64 |
+
|
| 65 |
+
except Exception as e:
|
| 66 |
+
return f"Error processing PDF or answering question: {e}"
|