Spaces:
Sleeping
Sleeping
Delete utils.py
Browse files
utils.py
DELETED
|
@@ -1,124 +0,0 @@
|
|
| 1 |
-
import os, tempfile, streamlit as st
|
| 2 |
-
from typing import List, IO, Tuple
|
| 3 |
-
from dotenv import load_dotenv
|
| 4 |
-
from PyPDF2 import PdfReader
|
| 5 |
-
from docx import Document
|
| 6 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 7 |
-
from langchain.schema import Document as LangchainDocument
|
| 8 |
-
from langchain_community.vectorstores import FAISS
|
| 9 |
-
from cerebras.cloud.sdk import Cerebras # <-- NEW
|
| 10 |
-
from langchain.prompts import PromptTemplate
|
| 11 |
-
from langchain_together.embeddings import TogetherEmbeddings
|
| 12 |
-
|
| 13 |
-
load_dotenv()
|
| 14 |
-
|
| 15 |
-
# ---------- Helpers ---------------------------------------------------------
|
| 16 |
-
def get_cerebras_api_key() -> str:
|
| 17 |
-
key = os.environ.get("CEREBRAS_API_KEY") or st.secrets.get("CEREBRAS_API_KEY", None)
|
| 18 |
-
if not key:
|
| 19 |
-
raise EnvironmentError("CEREBRAS_API_KEY not found in env or Streamlit secrets.")
|
| 20 |
-
return key
|
| 21 |
-
|
| 22 |
-
# ---------- File-reading utilities -----------------------------------------
|
| 23 |
-
def get_pdf_text(pdf_docs: List[IO[bytes]]) -> str:
|
| 24 |
-
txt = ""
|
| 25 |
-
for pdf in pdf_docs:
|
| 26 |
-
for page in PdfReader(pdf).pages:
|
| 27 |
-
if (t := page.extract_text()):
|
| 28 |
-
txt += t + "\n"
|
| 29 |
-
return txt
|
| 30 |
-
|
| 31 |
-
def get_docx_text(docx_docs: List[IO[bytes]]) -> str:
|
| 32 |
-
txt = ""
|
| 33 |
-
for d in docx_docs:
|
| 34 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".docx") as tmp:
|
| 35 |
-
tmp.write(d.getvalue()); tmp.flush()
|
| 36 |
-
try:
|
| 37 |
-
doc = Document(tmp.name)
|
| 38 |
-
txt += "\n".join(p.text for p in doc.paragraphs) + "\n"
|
| 39 |
-
finally:
|
| 40 |
-
os.unlink(tmp.name)
|
| 41 |
-
return txt
|
| 42 |
-
|
| 43 |
-
def get_text_chunks(text: str) -> List[str]:
|
| 44 |
-
return RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200).split_text(text)
|
| 45 |
-
|
| 46 |
-
# ---------- Vector-store build & save --------------------------------------
|
| 47 |
-
def get_vector_store(text_chunks: List[str]) -> None:
|
| 48 |
-
api_key = get_together_api_key()
|
| 49 |
-
embeddings = TogetherEmbeddings(model="BAAI/bge-base-en-v1.5", api_key=api_key)
|
| 50 |
-
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
|
| 51 |
-
vector_store.save_local("faiss_index")
|
| 52 |
-
|
| 53 |
-
# ---------- QA chain helpers ----------------------------------------------
|
| 54 |
-
def get_conversational_chain() -> Tuple[Cerebras, PromptTemplate]:
|
| 55 |
-
# Cerebras client is instantiated here, prompt template unchanged
|
| 56 |
-
client = Cerebras(api_key=get_cerebras_api_key())
|
| 57 |
-
prompt = PromptTemplate(
|
| 58 |
-
template=(
|
| 59 |
-
"As a professional assistant, provide a detailed and formally written "
|
| 60 |
-
"answer to the question using the provided context.\n\nContext:\n{context}\n\n"
|
| 61 |
-
"Question:\n{question}\n\nAnswer:"
|
| 62 |
-
),
|
| 63 |
-
input_variables=["context", "question"]
|
| 64 |
-
)
|
| 65 |
-
return client, prompt
|
| 66 |
-
|
| 67 |
-
def self_assess(question: str) -> str:
|
| 68 |
-
client = Cerebras(api_key=get_cerebras_api_key())
|
| 69 |
-
msgs = [
|
| 70 |
-
{"role": "system", "content": "You are an expert assistant…"},
|
| 71 |
-
{"role": "user", "content": (
|
| 72 |
-
"If you can confidently answer the following question from your own "
|
| 73 |
-
"knowledge, do so; otherwise reply with 'NEED_RETRIEVAL'.\n\n"
|
| 74 |
-
f"Question: {question}"
|
| 75 |
-
)}
|
| 76 |
-
]
|
| 77 |
-
result = client.chat.completions.create(
|
| 78 |
-
messages=msgs,
|
| 79 |
-
model="llama-3.3-70b",
|
| 80 |
-
max_completion_tokens=1024,
|
| 81 |
-
temperature=0.2,
|
| 82 |
-
top_p=1,
|
| 83 |
-
stream=False
|
| 84 |
-
)
|
| 85 |
-
return result.choices[0].message.content.strip()
|
| 86 |
-
|
| 87 |
-
def process_docs_for_query(docs: List[LangchainDocument], question: str) -> str:
|
| 88 |
-
if not docs:
|
| 89 |
-
return "Sorry, I couldn’t find relevant info in the documents."
|
| 90 |
-
ctx = "\n\n".join(d.page_content for d in docs)
|
| 91 |
-
client, prompt = get_conversational_chain()
|
| 92 |
-
prompt_text = prompt.format(context=ctx, question=question)
|
| 93 |
-
result = client.chat.completions.create(
|
| 94 |
-
messages=[{"role": "user", "content": prompt_text}],
|
| 95 |
-
model="llama-3.3-70b",
|
| 96 |
-
max_completion_tokens=1024,
|
| 97 |
-
temperature=0.2,
|
| 98 |
-
top_p=1,
|
| 99 |
-
stream=False
|
| 100 |
-
)
|
| 101 |
-
return result.choices[0].message.content
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
# ---------- Main user-query orchestrator -----------------------------------
|
| 105 |
-
def user_input(user_question: str) -> None:
|
| 106 |
-
assessment = self_assess(user_question)
|
| 107 |
-
need_retrieval = assessment.upper() == "NEED_RETRIEVAL"
|
| 108 |
-
st.info("🔍 Searching documents…" if need_retrieval else "💡 Using model knowledge…")
|
| 109 |
-
|
| 110 |
-
try:
|
| 111 |
-
if need_retrieval:
|
| 112 |
-
# Embeddings usage remains, need to replace TogetherEmbeddings if you want Cerebras embedding alternative
|
| 113 |
-
api_key = get_cerebras_api_key()
|
| 114 |
-
# Comment or replace TogetherEmbeddings below if unsupported
|
| 115 |
-
embeddings = TogetherEmbeddings(model="BAAI/bge-base-en-v1.5", api_key=api_key)
|
| 116 |
-
vs = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
|
| 117 |
-
docs = vs.similarity_search(user_question)
|
| 118 |
-
answer = process_docs_for_query(docs, user_question)
|
| 119 |
-
else:
|
| 120 |
-
answer = assessment
|
| 121 |
-
st.markdown("### Answer")
|
| 122 |
-
st.markdown(answer)
|
| 123 |
-
except Exception as e:
|
| 124 |
-
st.error(f"⚠️ Error: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|