File size: 2,253 Bytes
736448d 1a7b2d4 736448d 1a7b2d4 736448d 1a7b2d4 736448d 1a7b2d4 736448d 1a7b2d4 736448d 1a7b2d4 736448d 1a7b2d4 736448d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import os
import streamlit as st
from groq import Groq
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from huggingface_hub import hf_hub_download
# API key from Hugging Face secrets
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
# Init Groq client
groq_client = Groq(api_key=GROQ_API_KEY)
# UI setup
st.set_page_config(page_title="GEO MVP - Generative Engine Optimization", layout="wide")
st.title("🔍 GEO: Generative Engine Optimization")
# Upload document
uploaded_file = st.file_uploader("📄 Upload a .txt file", type=["txt"])
if uploaded_file:
# Save file
with open("data.txt", "wb") as f:
f.write(uploaded_file.read())
# Load and split
loader = TextLoader("data.txt")
documents = loader.load()
splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = splitter.split_documents(documents)
# Embed
st.info("🔎 Generating embeddings...")
embeddings = HuggingFaceEmbeddings()
vectorstore = FAISS.from_documents(docs, embeddings)
# Build retriever
retriever = vectorstore.as_retriever()
# Prompt setup
prompt_template = PromptTemplate.from_template(
"You are an expert assistant. Use the following context to answer accurately:\n\n{context}\n\nQ: {question}\nA:"
)
st.success("✅ Data embedded and ready.")
# Query box
user_query = st.text_input("💬 Ask a question based on your uploaded file")
if user_query:
# Retrieve
results = retriever.get_relevant_documents(user_query)
context = "\n\n".join([doc.page_content for doc in results[:3]])
# Call Groq
prompt = prompt_template.format(context=context, question=user_query)
response = groq_client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="mixtral-8x7b-32768", # Or another Groq-supported model
)
answer = response.choices[0].message.content
st.markdown("### 📥 Answer")
st.write(answer)
|