from langchain_community.document_loaders import TextLoader # %% from langchain_text_splitters import CharacterTextSplitter from langchain_openai import OpenAIEmbeddings # %% from langchain_chroma import Chroma # %% from dotenv import load_dotenv load_dotenv() # %% [markdown] # It will show true when we have the keys for open ai and hugging face stored in the .env file and here dotenv will write them to the environment # %% import pandas as pd books = pd.read_csv("books_cleaned.csv") # %% books.head(5) # %% books["tagged_description"] # %% [markdown] # # We created the tagged description so that we can build our vector search which requires a unique identity. # %% books["tagged_description"].str.cat(sep='\n') with open("tagged_description.txt", "w") as f: f.write(books["tagged_description"].str.cat(sep='\n')) # %% [markdown] # # In langchain it does not work with pandas dataframe so we need to save only the tag descriptions in the text file. # %% [markdown] # we did not use string match because it is not efficient and slow # %% [markdown] # # Ask for manshu explain the code # %% raw_documents = TextLoader("tagged_description.txt").load() text_splitter = CharacterTextSplitter(chunk_size=1, chunk_overlap=0, separator="\n") documents = text_splitter.split_documents(raw_documents) # %% [markdown] # # The rason we are setting it to chunk size 0 is because it first tries to look for the closest separator to the index number indicated by the chunks nad basically if this is more than one there's a chance it may not split on a new line it will split by chunk size so by setting it to zero we make sure that it priortize splitting on the separator rather than trying to split on the chunk size # %% [markdown] # # Chunk size 0 did not work but it worked fine for chunk size=1 # %% documents[0] # %% db_books = Chroma.from_documents( documents, embedding=OpenAIEmbeddings()) # %% query = "A book to teach children about nature" docs = db_books.similarity_search(query, k = 10) docs # %% books[books["isbn13"] == int(docs[0].page_content.split()[0].strip())] # %% def retrieve_semantic_recommendations( query: str, top_k: int = 10, ) -> pd.DataFrame: recs = db_books.similarity_search(query, k = 50) books_list = [] for i in range(0, len(recs)): books_list += [int(recs[i].page_content.strip('"').split()[0])] return books[books["isbn13"].isin(books_list)] retrieve_semantic_recommendations("A book to teach children about nature") # %%