|
|
import streamlit as st |
|
|
from transformers import pipeline |
|
|
import random |
|
|
from gtts import gTTS |
|
|
import os |
|
|
import time |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.set_page_config( |
|
|
page_title="ZealAI", |
|
|
page_icon="🕊️", |
|
|
layout="wide" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_CUSTOM_CSS = """ |
|
|
#MainMenu {visibility: hidden;} |
|
|
footer {visibility: hidden;} |
|
|
.block-container {max-width: 980px; padding-top: 1.5rem;} |
|
|
""" |
|
|
st.markdown(f"<style>{_CUSTOM_CSS}</style>", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@st.cache_resource(show_spinner=True) |
|
|
def load_model(): |
|
|
return pipeline( |
|
|
"text-generation", |
|
|
model="togethercomputer/RedPajama-INCITE-7B-Chat", |
|
|
device_map="auto", |
|
|
temperature=0.7, |
|
|
max_new_tokens=300 |
|
|
) |
|
|
|
|
|
chatbot = load_model() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if "chat_history" not in st.session_state: |
|
|
st.session_state.chat_history = [] |
|
|
|
|
|
BIBLE_FACTS = [ |
|
|
"The Bible has around 611,000 words.", |
|
|
"Psalm 119 is the longest chapter with 176 verses!", |
|
|
"The shortest verse is John 11:35: 'Jesus wept.'", |
|
|
"The word 'Christian' appears only three times.", |
|
|
"Job is believed to be the oldest book." |
|
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_ai_response(user_text): |
|
|
|
|
|
if "bible fact" in user_text.lower() or "tell me something cool" in user_text.lower(): |
|
|
reply = random.choice(BIBLE_FACTS) |
|
|
else: |
|
|
|
|
|
prompt = "You are ZEAL AI, a Bible-based assistant.\n" |
|
|
for msg in st.session_state.chat_history[-5:]: |
|
|
role = "User" if msg["role"] == "user" else "ZEAL AI" |
|
|
prompt += f"{role}: {msg['content']}\n" |
|
|
prompt += f"User: {user_text}\nZEAL AI:" |
|
|
|
|
|
response = chatbot(prompt) |
|
|
reply = response[0]["generated_text"].split("ZEAL AI:")[-1].strip() |
|
|
|
|
|
|
|
|
st.session_state.chat_history.append({"role": "user", "content": user_text}) |
|
|
st.session_state.chat_history.append({"role": "assistant", "content": reply}) |
|
|
return reply |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def text_to_speech(text): |
|
|
tts = gTTS(text=text, lang="en") |
|
|
path = "response.mp3" |
|
|
tts.save(path) |
|
|
return path |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.title("🕊️ ZealAI - Bible-Based Assistant") |
|
|
|
|
|
for msg in st.session_state.chat_history: |
|
|
with st.chat_message(msg["role"]): |
|
|
st.markdown(msg["content"]) |
|
|
|
|
|
user_input = st.chat_input("Message ZEAL AI…") |
|
|
|
|
|
if user_input: |
|
|
with st.chat_message("user"): |
|
|
st.markdown(user_input) |
|
|
|
|
|
reply = get_ai_response(user_input) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
st.markdown(reply) |
|
|
|
|
|
audio_file = text_to_speech(reply) |
|
|
st.audio(audio_file, format="audio/mp3") |
|
|
|