MBilal-72 commited on
Commit
736448d
Β·
verified Β·
1 Parent(s): e26e11c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -53
app.py CHANGED
@@ -1,63 +1,68 @@
 
1
  import streamlit as st
2
  from groq import Groq
3
- from sentence_transformers import SentenceTransformer
4
- import faiss
5
- import numpy as np
6
- import os
7
- from dotenv import load_dotenv
 
 
8
 
9
- load_dotenv()
10
-
11
- # Load API key from .env or Hugging Face secret
12
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
13
 
14
- # Initialize Groq client
15
  groq_client = Groq(api_key=GROQ_API_KEY)
16
 
17
- # Sample knowledge base
18
- docs = [
19
- "Generative Engine Optimization improves AI answers for SEO.",
20
- "RAG combines retrieval with generation for accurate responses.",
21
- "Groq provides ultra-fast inference for LLMs.",
22
- "Streamlit is great for building quick ML apps.",
23
- "Hugging Face offers powerful transformer models and APIs."
24
- ]
25
-
26
- # Load embedding model
27
- embed_model = SentenceTransformer("all-MiniLM-L6-v2")
28
- doc_embeddings = embed_model.encode(docs)
29
-
30
- # Create FAISS index
31
- index = faiss.IndexFlatL2(doc_embeddings.shape[1])
32
- index.add(np.array(doc_embeddings))
33
-
34
- # Streamlit UI
35
- st.set_page_config(page_title="GEO Optimizer MVP", layout="centered")
36
- st.title("πŸ” GEO Optimization Assistant")
37
-
38
- query = st.text_input("Ask a question or enter a topic:")
39
- if st.button("Generate Answer") and query:
40
- query_embedding = embed_model.encode([query])
41
- _, I = index.search(np.array(query_embedding), k=2)
42
-
43
- context = "\n".join([docs[i] for i in I[0]])
44
-
45
- prompt = f"""You are a helpful assistant. Use the following context to answer the question.
46
-
47
- Context:
48
- {context}
49
-
50
- Question: {query}
51
-
52
- Answer:"""
53
-
54
- try:
 
 
 
 
 
55
  response = groq_client.chat.completions.create(
56
- model="llama3-8b-8192", # or whatever is available
57
- messages=[{"role": "user", "content": prompt}]
58
  )
 
59
  answer = response.choices[0].message.content
60
- st.markdown("### βœ… Answer")
61
- st.success(answer)
62
- except Exception as e:
63
- st.error(f"Error: {str(e)}")
 
1
+ import os
2
  import streamlit as st
3
  from groq import Groq
4
+ from langchain.vectorstores import FAISS
5
+ from langchain.embeddings import HuggingFaceEmbeddings
6
+ from langchain.chains import RetrievalQA
7
+ from langchain.prompts import PromptTemplate
8
+ from langchain.document_loaders import TextLoader
9
+ from langchain.text_splitter import CharacterTextSplitter
10
+ from huggingface_hub import hf_hub_download
11
 
12
+ # API key from Hugging Face secrets
 
 
13
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
14
 
15
+ # Init Groq client
16
  groq_client = Groq(api_key=GROQ_API_KEY)
17
 
18
+ # UI setup
19
+ st.set_page_config(page_title="GEO MVP - Generative Engine Optimization", layout="wide")
20
+ st.title("πŸ” GEO: Generative Engine Optimization")
21
+
22
+ # Upload document
23
+ uploaded_file = st.file_uploader("πŸ“„ Upload a .txt file", type=["txt"])
24
+
25
+ if uploaded_file:
26
+ # Save file
27
+ with open("data.txt", "wb") as f:
28
+ f.write(uploaded_file.read())
29
+
30
+ # Load and split
31
+ loader = TextLoader("data.txt")
32
+ documents = loader.load()
33
+ splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
34
+ docs = splitter.split_documents(documents)
35
+
36
+ # Embed
37
+ st.info("πŸ”Ž Generating embeddings...")
38
+ embeddings = HuggingFaceEmbeddings()
39
+ vectorstore = FAISS.from_documents(docs, embeddings)
40
+
41
+ # Build retriever
42
+ retriever = vectorstore.as_retriever()
43
+
44
+ # Prompt setup
45
+ prompt_template = PromptTemplate.from_template(
46
+ "You are an expert assistant. Use the following context to answer accurately:\n\n{context}\n\nQ: {question}\nA:"
47
+ )
48
+
49
+ st.success("βœ… Data embedded and ready.")
50
+
51
+ # Query box
52
+ user_query = st.text_input("πŸ’¬ Ask a question based on your uploaded file")
53
+
54
+ if user_query:
55
+ # Retrieve
56
+ results = retriever.get_relevant_documents(user_query)
57
+ context = "\n\n".join([doc.page_content for doc in results[:3]])
58
+
59
+ # Call Groq
60
+ prompt = prompt_template.format(context=context, question=user_query)
61
  response = groq_client.chat.completions.create(
62
+ messages=[{"role": "user", "content": prompt}],
63
+ model="mixtral-8x7b-32768", # Or another Groq-supported model
64
  )
65
+
66
  answer = response.choices[0].message.content
67
+ st.markdown("### πŸ“₯ Answer")
68
+ st.write(answer)