zeusmadeit's picture
added requirements.txt file as we as performed some basic setup
a7e9858
raw
history blame
1.18 kB
import streamlit as st
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from dotenv import load_dotenv
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings
import os
import base64
# Load environment variables
load_dotenv()
# Configure the Llama index settings
Settings.llm = HuggingFaceInferenceAPI(
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
context_window=3900,
token=os.getenv("HF_TOKEN"),
max_new_tokens=1024,
generate_kwargs={"temperature": 0.1},
)
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-large-en-v1.5"
)
# Declare directory's for data and persistent storage
PERSIST_DIR = "./db"
DATA_DIR = "data"
# Ensure data directory exists
os.makedirs(DATA_DIR, exist_ok=True)
os.makedirs(PERSIST_DIR, exist_ok=True)
# Streamlit app initialization
st.title("Chat with your Doc - PDF Assistant πŸ“„")
st.markdown("Get insights from your data – just chat!πŸ‘‡")