File size: 2,945 Bytes
a2bcf25
2cee6d0
 
a2bcf25
 
 
4581856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2bcf25
 
4581856
a2bcf25
 
4581856
 
 
 
a2bcf25
 
 
 
 
 
4581856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2bcf25
 
 
 
 
 
 
 
4581856
a2bcf25
 
 
 
4581856
a2bcf25
 
4581856
 
 
a2bcf25
 
4581856
a2bcf25
 
 
 
4581856
 
 
 
a2bcf25
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import streamlit as st
st.set_page_config(page_title="KizDar Festival AI", layout="centered")

from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from transformers import pipeline
import datetime, os, requests, zipfile

# ----------------------------
# Setup
# ----------------------------
DB_URL = "https://huggingface.co/Bur3hani/kizdarFestival_Assistant/resolve/main/chroma_db.zip"
DB_DIR = "chroma_db"

# Download and extract chroma_db if not present
if not os.path.exists(DB_DIR):
    with st.spinner("πŸ”„ Downloading knowledge base..."):
        r = requests.get(DB_URL)
        with open("chroma_db.zip", "wb") as f:
            f.write(r.content)
        with zipfile.ZipFile("chroma_db.zip", 'r') as zip_ref:
            zip_ref.extractall(DB_DIR)

embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
db = Chroma(persist_directory=DB_DIR, embedding_function=embedding)
qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")

# ----------------------------
# Streamlit Config
# ----------------------------

if "history" not in st.session_state:
    st.session_state.history = []

if "log" not in st.session_state:
    st.session_state.log = []

# ----------------------------
# UI Header
# ----------------------------
st.image("https://placehold.it/150x150", width=150)
st.title("KizDar Festival AI Assistant")
st.caption("Ask about the schedule, DJs, dress code, venues, volunteering, and more.")

# ----------------------------
# Question Box
# ----------------------------
question = st.text_input("🎀 Ask your question:")

# ----------------------------
# Get Response Logic
# ----------------------------
def get_response(question):
    docs = db.similarity_search(question, k=4)
    context = "\n".join(doc.page_content for doc in docs)
    prompt = f"Answer this question clearly and fully:\nQuestion: {question}\nContext: {context}"
    result = qa_pipeline(prompt, max_length=512, do_sample=False)
    return result[0]['generated_text'].strip()

if question:
    with st.spinner("πŸ€– Generating answer..."):
        answer = get_response(question)
    timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    st.session_state.history.append((question, answer))
    st.session_state.log.append(f"{timestamp} | Q: {question} | A: {answer[:80]}...")
    st.markdown("### βœ… Answer:")
    st.success(answer)

# ----------------------------
# Previous Questions
# ----------------------------
if st.session_state.history:
    st.markdown("---")
    st.markdown("### πŸ•“ Recent Q&A:")
    for i, (q, a) in enumerate(reversed(st.session_state.history[-5:]), 1):
        st.markdown(f"**{i}. {q}**")
        st.markdown(f"*{a}*")

# ----------------------------
# Log View
# ----------------------------
with st.expander("πŸ“œ View Usage Log"):
    for entry in st.session_state.log[-10:]:
        st.code(entry)