|
|
import os |
|
|
import streamlit as st |
|
|
from groq import Groq |
|
|
from langchain.vectorstores import FAISS |
|
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
|
from langchain.chains import RetrievalQA |
|
|
from langchain.prompts import PromptTemplate |
|
|
from langchain.document_loaders import TextLoader |
|
|
from langchain.text_splitter import CharacterTextSplitter |
|
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
|
|
|
GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
|
|
|
|
|
|
|
|
groq_client = Groq(api_key=GROQ_API_KEY) |
|
|
|
|
|
|
|
|
st.set_page_config(page_title="GEO MVP - Generative Engine Optimization", layout="wide") |
|
|
st.title("π GEO: Generative Engine Optimization") |
|
|
|
|
|
|
|
|
uploaded_file = st.file_uploader("π Upload a .txt file", type=["txt"]) |
|
|
|
|
|
if uploaded_file: |
|
|
|
|
|
with open("data.txt", "wb") as f: |
|
|
f.write(uploaded_file.read()) |
|
|
|
|
|
|
|
|
loader = TextLoader("data.txt") |
|
|
documents = loader.load() |
|
|
splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50) |
|
|
docs = splitter.split_documents(documents) |
|
|
|
|
|
|
|
|
st.info("π Generating embeddings...") |
|
|
embeddings = HuggingFaceEmbeddings() |
|
|
vectorstore = FAISS.from_documents(docs, embeddings) |
|
|
|
|
|
|
|
|
retriever = vectorstore.as_retriever() |
|
|
|
|
|
|
|
|
prompt_template = PromptTemplate.from_template( |
|
|
"You are an expert assistant. Use the following context to answer accurately:\n\n{context}\n\nQ: {question}\nA:" |
|
|
) |
|
|
|
|
|
st.success("β
Data embedded and ready.") |
|
|
|
|
|
|
|
|
user_query = st.text_input("π¬ Ask a question based on your uploaded file") |
|
|
|
|
|
if user_query: |
|
|
|
|
|
results = retriever.get_relevant_documents(user_query) |
|
|
context = "\n\n".join([doc.page_content for doc in results[:3]]) |
|
|
|
|
|
|
|
|
prompt = prompt_template.format(context=context, question=user_query) |
|
|
response = groq_client.chat.completions.create( |
|
|
messages=[{"role": "user", "content": prompt}], |
|
|
model="mixtral-8x7b-32768", |
|
|
) |
|
|
|
|
|
answer = response.choices[0].message.content |
|
|
st.markdown("### π₯ Answer") |
|
|
st.write(answer) |
|
|
|