import os import chromadb from llama_index.core import VectorStoreIndex from llama_index.vector_stores.chroma import ChromaVectorStore from llama_index.core.tools import QueryEngineTool from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI from llama_index.core.agent.workflow import ReActAgent def initialize_web_agent(llm: HuggingFaceInferenceAPI): hf_token = os.environ.get('HF_TOKEN') db = chromadb.PersistentClient(path="./chat_db") chroma_collection = db.get_or_create_collection("chat") vector_store = ChromaVectorStore(chroma_collection=chroma_collection) embedding_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5", device="cpu") index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embedding_model) query_engine = index.as_query_engine( llm=llm, similarity_top_k=3 ) query_engine_tool = QueryEngineTool.from_defaults( query_engine=query_engine, name="my_query_engine", description="Query engine for the agent", return_direct=False ) return ReActAgent( name="query_engine", description="Query engine for the agent", tools=[query_engine_tool], system_prompt="You are a calculator assistant. Use your tools for any math operation.", llm=llm )