Spaces:
Runtime error
Runtime error
File size: 2,361 Bytes
861129c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 | from pinecone.grpc import PineconeGRPC as Pinecone
from dotenv import load_dotenv
import os
from langchain_community.document_loaders import TextLoader
from langchain_google_genai import GoogleGenerativeAI
from langchain_core.prompts import PromptTemplate
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_pinecone import PineconeVectorStore
from langchain_community.embeddings import SentenceTransformerEmbeddings
from sentence_transformers import SentenceTransformer
load_dotenv()
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
PINECONE_API_ENV = os.getenv('PINECONE_API_ENV')
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
os.environ['PINECONE_API_ENV'] = PINECONE_API_ENV
os.environ['PINECONE_API_KEY'] = PINECONE_API_KEY
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
# Initialize Pinecone
pinecone = Pinecone(api_key=PINECONE_API_KEY)
def query_pinecone(index_name, query, embeddings):
try:
docsearch = PineconeVectorStore(index_name=index_name, embedding=embeddings)
result = docsearch.similarity_search(query)
return result
except Exception as e:
print(f"Error querying Pinecone: {e}")
return []
def get_context_from_pinecone(query):
INDEX_NAME_1 = "cve-data-googlembeddings"
INDEX_NAME_2 = "cve-data"
embeddings1 = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
embeddings2 = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
results_1 = query_pinecone(INDEX_NAME_1, query, embeddings2)
results_2 = query_pinecone(INDEX_NAME_2, query, embeddings1)
context = ""
for match in results_1 + results_2:
context += match.page_content
return context
def get_chatbot_response(user_question):
llm = GoogleGenerativeAI(model="gemini-1.5-pro", google_api_key=os.getenv("GOOGLE_API_KEY"))
context = get_context_from_pinecone(user_question)
template = """Context: {context}
Question: {user_question}
Answer: Let's think step by step.
"""
prompt = PromptTemplate.from_template(template)
chain = prompt | llm
template_data = {
"context": context,
"user_question": user_question
}
res = chain.invoke(template_data)
return res
|