HQ_Project_EN / pages /Project_2.2_-_Langchain_VectorDB.py
1mpreccable's picture
added llm analyse for web page
e44b3be
raw
history blame
751 Bytes
import os
from dotenv import load_dotenv
import streamlit as st
from src.functions_langchain import graph_init, initialize_inmemory_vector_store, load_and_split_documents_from_web
load_dotenv()
st.title("Langchain VectorDB")
st.write("This is a simple demonstration of the Langchain VectorDB.")
vector_store = initialize_inmemory_vector_store()
all_splits = load_and_split_documents_from_web("https://www.gutenberg.org/files/1342/1342-h/1342-h.htm")
# Index chunks
_ = vector_store.add_documents(documents=all_splits)
graph = graph_init(vector_store)
question = st.text_input("Enter a question:")
if st.button("Ask"):
st.write("Searching for an answer...")
response = graph.invoke({"question": question})
st.write(response["answer"])