Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- rag/qdrant.js +64 -0
- rag/researchRag.js +52 -0
- rag/synthesisRag.js +50 -0
rag/qdrant.js
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
const { QdrantVectorStore } = require("@langchain/qdrant");
|
| 2 |
+
const { OpenAIEmbeddings } = require("@langchain/openai");
|
| 3 |
+
const { QdrantClient } = require("@qdrant/js-client-rest");
|
| 4 |
+
|
| 5 |
+
let client = null;
|
| 6 |
+
|
| 7 |
+
try {
|
| 8 |
+
const url = process.env.QDRANT_URL;
|
| 9 |
+
if (url && (url.startsWith('http://') || url.startsWith('https://'))) {
|
| 10 |
+
client = new QdrantClient({
|
| 11 |
+
url: url,
|
| 12 |
+
apiKey: process.env.QDRANT_API_KEY,
|
| 13 |
+
});
|
| 14 |
+
console.log("Qdrant Client Initialized Successfully with URL:", url);
|
| 15 |
+
} else {
|
| 16 |
+
console.warn("Warning: Invalid or missing QDRANT_URL. RAG functionality will be disabled.");
|
| 17 |
+
}
|
| 18 |
+
} catch (e) {
|
| 19 |
+
console.error("Failed to initialize Qdrant Client:", e.message);
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
async function getVectorStore(collectionName) {
|
| 23 |
+
if (!client) {
|
| 24 |
+
throw new Error("Qdrant client is not initialized. Check QDRANT_URL in .env");
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
const embeddings = new OpenAIEmbeddings({
|
| 28 |
+
modelName: "text-embedding-3-small",
|
| 29 |
+
});
|
| 30 |
+
|
| 31 |
+
return await QdrantVectorStore.fromExistingCollection(
|
| 32 |
+
embeddings,
|
| 33 |
+
{
|
| 34 |
+
client,
|
| 35 |
+
collectionName,
|
| 36 |
+
}
|
| 37 |
+
).catch(async (e) => {
|
| 38 |
+
// If error is about missing collection, try to create it
|
| 39 |
+
try {
|
| 40 |
+
const collections = await client.getCollections();
|
| 41 |
+
const exists = collections.collections.some(c => c.name === collectionName);
|
| 42 |
+
|
| 43 |
+
if (!exists) {
|
| 44 |
+
console.log(`Creating collection: ${collectionName}`);
|
| 45 |
+
await client.createCollection(collectionName, {
|
| 46 |
+
vectors: {
|
| 47 |
+
size: 1536, // OpenAI text-embedding-3-small dimension
|
| 48 |
+
distance: 'Cosine'
|
| 49 |
+
}
|
| 50 |
+
});
|
| 51 |
+
}
|
| 52 |
+
} catch (err) {
|
| 53 |
+
console.warn(`Could not verify/create collection '${collectionName}':`, err.message);
|
| 54 |
+
// Fallthrough to attempt returning the store anyway or re-throw
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
return new QdrantVectorStore(embeddings, {
|
| 58 |
+
client,
|
| 59 |
+
collectionName,
|
| 60 |
+
});
|
| 61 |
+
});
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
module.exports = { getVectorStore, client };
|
rag/researchRag.js
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
const { getVectorStore } = require('./qdrant');
|
| 2 |
+
const { RecursiveCharacterTextSplitter } = require("@langchain/textsplitters");
|
| 3 |
+
const { model } = require('../model');
|
| 4 |
+
const { HumanMessage, SystemMessage } = require("@langchain/core/messages");
|
| 5 |
+
|
| 6 |
+
const COLLECTION_NAME = "research_collection";
|
| 7 |
+
|
| 8 |
+
async function storeResearch(text) {
|
| 9 |
+
try {
|
| 10 |
+
const vectorStore = await getVectorStore(COLLECTION_NAME);
|
| 11 |
+
const splitter = new RecursiveCharacterTextSplitter({
|
| 12 |
+
chunkSize: 500,
|
| 13 |
+
chunkOverlap: 50,
|
| 14 |
+
});
|
| 15 |
+
|
| 16 |
+
const documents = await splitter.createDocuments([text]);
|
| 17 |
+
await vectorStore.addDocuments(documents);
|
| 18 |
+
console.log(`[Research RAG] Stored ${documents.length} chunks.`);
|
| 19 |
+
} catch (error) {
|
| 20 |
+
console.error("[Research RAG] Storage Error:", error);
|
| 21 |
+
}
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
async function chatResearch(query, history = []) {
|
| 25 |
+
try {
|
| 26 |
+
const vectorStore = await getVectorStore(COLLECTION_NAME);
|
| 27 |
+
const results = await vectorStore.similaritySearch(query, 3);
|
| 28 |
+
const context = results.map(r => r.pageContent).join("\n\n");
|
| 29 |
+
|
| 30 |
+
const systemPrompt = `You are a Research Assistant. Answer the user's question based strictly on the provided context.
|
| 31 |
+
|
| 32 |
+
Context:
|
| 33 |
+
${context}`;
|
| 34 |
+
|
| 35 |
+
// Convert simple history to messages if needed, here assuming just query for now or simple chat
|
| 36 |
+
// Ideally we append history messages.
|
| 37 |
+
const messages = [
|
| 38 |
+
new SystemMessage(systemPrompt),
|
| 39 |
+
...history.map(msg => msg.role === 'user' ? new HumanMessage(msg.content) : new SystemMessage(msg.content)), // simplified
|
| 40 |
+
new HumanMessage(query)
|
| 41 |
+
];
|
| 42 |
+
|
| 43 |
+
const response = await model.invoke(messages);
|
| 44 |
+
return response.content;
|
| 45 |
+
|
| 46 |
+
} catch (error) {
|
| 47 |
+
console.error("[Research RAG] Chat Error:", error);
|
| 48 |
+
return "I encountered an error accessing the research data.";
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
module.exports = { storeResearch, chatResearch };
|
rag/synthesisRag.js
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
const { getVectorStore } = require('./qdrant');
|
| 2 |
+
const { RecursiveCharacterTextSplitter } = require("@langchain/textsplitters");
|
| 3 |
+
const { model } = require('../model');
|
| 4 |
+
const { HumanMessage, SystemMessage } = require("@langchain/core/messages");
|
| 5 |
+
|
| 6 |
+
const COLLECTION_NAME = "synthesis_collection";
|
| 7 |
+
|
| 8 |
+
async function storeInsights(text) {
|
| 9 |
+
try {
|
| 10 |
+
const vectorStore = await getVectorStore(COLLECTION_NAME);
|
| 11 |
+
const splitter = new RecursiveCharacterTextSplitter({
|
| 12 |
+
chunkSize: 500,
|
| 13 |
+
chunkOverlap: 50,
|
| 14 |
+
});
|
| 15 |
+
|
| 16 |
+
const documents = await splitter.createDocuments([text]);
|
| 17 |
+
await vectorStore.addDocuments(documents);
|
| 18 |
+
console.log(`[Synthesis RAG] Stored ${documents.length} chunks.`);
|
| 19 |
+
} catch (error) {
|
| 20 |
+
console.error("[Synthesis RAG] Storage Error:", error);
|
| 21 |
+
}
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
async function chatInsights(query, history = []) {
|
| 25 |
+
try {
|
| 26 |
+
const vectorStore = await getVectorStore(COLLECTION_NAME);
|
| 27 |
+
const results = await vectorStore.similaritySearch(query, 3);
|
| 28 |
+
const context = results.map(r => r.pageContent).join("\n\n");
|
| 29 |
+
|
| 30 |
+
const systemPrompt = `You are a Strategy Consultant. Answer the user's question based strictly on the provided context.
|
| 31 |
+
|
| 32 |
+
Context:
|
| 33 |
+
${context}`;
|
| 34 |
+
|
| 35 |
+
const messages = [
|
| 36 |
+
new SystemMessage(systemPrompt),
|
| 37 |
+
...history.map(msg => msg.role === 'user' ? new HumanMessage(msg.content) : new SystemMessage(msg.content)),
|
| 38 |
+
new HumanMessage(query)
|
| 39 |
+
];
|
| 40 |
+
|
| 41 |
+
const response = await model.invoke(messages);
|
| 42 |
+
return response.content;
|
| 43 |
+
|
| 44 |
+
} catch (error) {
|
| 45 |
+
console.error("[Synthesis RAG] Chat Error:", error);
|
| 46 |
+
return "I encountered an error accessing the strategy insights.";
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
module.exports = { storeInsights, chatInsights };
|