Spaces:
Sleeping
Sleeping
Vlad Bastina
commited on
Commit
·
b5d2247
0
Parent(s):
first commit
Browse files- .gitattributes +3 -0
- .gitignore +2 -0
- .streamlit/config.toml +0 -0
- Files/ZEGA AI Capabilities Overview for Clients (1).docx +3 -0
- Files/ZEGA AI Document Capabilities Overview for Clients (1).docx +3 -0
- app.py +80 -0
- query_chat.py +73 -0
- zega_logo.PNG +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.PNG filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.docx filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.streamlit/secrets.toml
|
| 2 |
+
__pycache__
|
.streamlit/config.toml
ADDED
|
File without changes
|
Files/ZEGA AI Capabilities Overview for Clients (1).docx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e9846b0d831376616c6c92b7e65455a5e930e5a6b4126d36746e9876ffb2d24
|
| 3 |
+
size 199448
|
Files/ZEGA AI Document Capabilities Overview for Clients (1).docx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7651fbd13a38467e77598d01b7e0c893ff243810f1c958e6f6163029d34ded94
|
| 3 |
+
size 9748
|
app.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import os
|
| 3 |
+
from query_chat import GeminiQanA
|
| 4 |
+
from docx import Document
|
| 5 |
+
|
| 6 |
+
# Function to read text from a Word document
|
| 7 |
+
def extract_text_from_docx(file_path):
|
| 8 |
+
doc = Document(file_path)
|
| 9 |
+
return " ".join([para.text.strip() for para in doc.paragraphs if para.text.strip()])
|
| 10 |
+
|
| 11 |
+
# Streamlit App Configuration
|
| 12 |
+
st.set_page_config(page_title="Zega AI Sales Agent", page_icon="🤖", layout="centered")
|
| 13 |
+
|
| 14 |
+
# Sidebar with branding
|
| 15 |
+
st.sidebar.image("zega_logo.PNG", width=300)
|
| 16 |
+
st.sidebar.markdown("### Welcome to Zega AI Sales Agent!")
|
| 17 |
+
st.sidebar.markdown("Ask anything about our team's capabilities and projects.")
|
| 18 |
+
|
| 19 |
+
# Load API key safely
|
| 20 |
+
if "GOOGLE_API_KEY" in st.secrets:
|
| 21 |
+
os.environ["GOOGLE_API_KEY"] = st.secrets["GOOGLE_API_KEY"]
|
| 22 |
+
else:
|
| 23 |
+
st.error("API key missing! Please set up your Google API key in Streamlit secrets.")
|
| 24 |
+
|
| 25 |
+
# Load documents
|
| 26 |
+
with st.spinner("Loading project information..."):
|
| 27 |
+
doc1_text = extract_text_from_docx("Files/ZEGA AI Capabilities Overview for Clients (1).docx")
|
| 28 |
+
doc2_text = extract_text_from_docx("Files/ZEGA AI Document Capabilities Overview for Clients (1).docx")
|
| 29 |
+
|
| 30 |
+
# Initialize chatbot
|
| 31 |
+
chatbot = GeminiQanA(doc1_text, doc2_text)
|
| 32 |
+
|
| 33 |
+
# Initialize chat session
|
| 34 |
+
if "messages" not in st.session_state:
|
| 35 |
+
st.session_state.messages = []
|
| 36 |
+
|
| 37 |
+
# Chat UI
|
| 38 |
+
st.title("📄 Zega AI Sales Agent")
|
| 39 |
+
|
| 40 |
+
# Display chat history
|
| 41 |
+
for message in st.session_state.messages:
|
| 42 |
+
with st.chat_message(message["role"]):
|
| 43 |
+
st.markdown(message["content"])
|
| 44 |
+
|
| 45 |
+
# User Input
|
| 46 |
+
question = st.text_area("Ask a question about Zega AI:", height=100)
|
| 47 |
+
|
| 48 |
+
# Chat Actions
|
| 49 |
+
col1, col2 = st.columns([3, 1])
|
| 50 |
+
with col1:
|
| 51 |
+
ask_button = st.button("💬 Ask AI")
|
| 52 |
+
with col2:
|
| 53 |
+
clear_button = st.button("🗑️ Clear Chat")
|
| 54 |
+
|
| 55 |
+
# Clear chat history
|
| 56 |
+
if clear_button:
|
| 57 |
+
st.session_state.messages = []
|
| 58 |
+
st.rerun()
|
| 59 |
+
|
| 60 |
+
# Handle user input
|
| 61 |
+
if ask_button and question:
|
| 62 |
+
# Append user question
|
| 63 |
+
st.session_state.messages.append({"role": "user", "content": question})
|
| 64 |
+
|
| 65 |
+
# Display user message
|
| 66 |
+
with st.chat_message("user"):
|
| 67 |
+
st.markdown(question)
|
| 68 |
+
|
| 69 |
+
# Generate AI response
|
| 70 |
+
with st.spinner("💡 Thinking..."):
|
| 71 |
+
answer = chatbot.answer_question(question)
|
| 72 |
+
|
| 73 |
+
# Append AI response
|
| 74 |
+
st.session_state.messages.append({"role": "assistant", "content": answer})
|
| 75 |
+
|
| 76 |
+
# Display AI response
|
| 77 |
+
with st.chat_message("assistant"):
|
| 78 |
+
st.markdown(answer)
|
| 79 |
+
|
| 80 |
+
st.rerun()
|
query_chat.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import google.generativeai as genai
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
class GeminiQanA:
|
| 5 |
+
def __init__(self,text1:str='',text2:str=''):
|
| 6 |
+
"""Initializes the Gemini question answerer by loading the model."""
|
| 7 |
+
self.api_key = os.getenv("GOOGLE_API_KEY")
|
| 8 |
+
genai.configure(api_key=self.api_key)
|
| 9 |
+
self.model = self._load_model(text1,text2)
|
| 10 |
+
|
| 11 |
+
def _load_model(self,text1:str,text2:str):
|
| 12 |
+
"""Loads the generative AI model with a system instruction."""
|
| 13 |
+
final_prompt = f'''Role:
|
| 14 |
+
You are a sales agent responsible for assisting customers by answering questions about our team’s capabilities and the projects we offer. You have access to two brochures that detail the available projects and their features. Your goal is to provide accurate and honest responses based solely on the information within these brochures.
|
| 15 |
+
|
| 16 |
+
Guidelines for Responses:
|
| 17 |
+
1. Accuracy & Honesty
|
| 18 |
+
-Only provide responses based on the brochures.
|
| 19 |
+
-Do not overstate or exaggerate the capabilities of the team.
|
| 20 |
+
-If information is not available in the brochures, do not speculate—politely inform the customer that the requested details are not available.
|
| 21 |
+
2. Answering Questions About the Team’s Capabilities & Projects
|
| 22 |
+
-When a customer asks about what our team can do, provide information only from the brochures.
|
| 23 |
+
-If asked about past projects, refer only to those explicitly mentioned in the brochures.
|
| 24 |
+
-If the customer asks for additional details not found in the brochures, politely inform them that you can only share the information available.
|
| 25 |
+
3. Providing Solutions to Customer Problems
|
| 26 |
+
-If a customer presents a problem, check if a project in the brochures provides a direct solution.
|
| 27 |
+
-If a matching project exists, explain how it can address their problem.
|
| 28 |
+
-If an alternative but related project exists, suggest it as a partial solution, explaining its limitations.
|
| 29 |
+
-If no project can help, politely state that no suitable solution is available.
|
| 30 |
+
4. What Not to Do
|
| 31 |
+
-Do not create new information or assume additional capabilities.
|
| 32 |
+
-Do not make guarantees beyond what is stated in the brochures.
|
| 33 |
+
-Do not offer speculative solutions that are not explicitly supported by the documents.
|
| 34 |
+
Example Interactions:
|
| 35 |
+
Scenario 1: Customer Asks About a Team Capability
|
| 36 |
+
✅ Customer: "Does your team specialize in AI-powered automation?"
|
| 37 |
+
✅ Agent: "According to our brochure, our team specializes in [list relevant capabilities]. While AI-powered automation is not specifically mentioned, we do offer [related project] which may align with your needs."
|
| 38 |
+
|
| 39 |
+
Scenario 2: Customer Has a Specific Problem
|
| 40 |
+
✅ Customer: "I need a system to manage logistics for my e-commerce business. Do you have a solution?"
|
| 41 |
+
✅ Agent: "Yes, we offer [Project Name], which is designed for logistics management. It provides [brief relevant details from the brochure]. Would you like more information on its features?"
|
| 42 |
+
|
| 43 |
+
Scenario 3: No Matching Solution Available
|
| 44 |
+
✅ Customer: "Do you have a tool for automating customer sentiment analysis?"
|
| 45 |
+
✅ Agent: "I'm sorry, but our current projects do not include a tool specifically for sentiment analysis. However, we do have [Project Name], which provides [related functionality]. Would that be of interest?"
|
| 46 |
+
|
| 47 |
+
✅ Customer: "I need a blockchain-based security system. Can your team provide one?"
|
| 48 |
+
✅ Agent: "Unfortunately, we do not have a blockchain-based security system in our current offerings. I'm happy to help with any other inquiries regarding our available solutions."
|
| 49 |
+
|
| 50 |
+
Tone & Style:
|
| 51 |
+
Maintain a professional, helpful, and customer-focused tone.
|
| 52 |
+
Keep responses concise yet informative based on brochure content.
|
| 53 |
+
If a solution exists, explain how it meets the customer's needs without overselling.
|
| 54 |
+
If no solution exists, remain polite and transparent.
|
| 55 |
+
|
| 56 |
+
First Brochure:
|
| 57 |
+
{text1}
|
| 58 |
+
|
| 59 |
+
Second Brochure:
|
| 60 |
+
{text2}
|
| 61 |
+
'''
|
| 62 |
+
|
| 63 |
+
return genai.GenerativeModel("gemini-1.5-pro", system_instruction=final_prompt)
|
| 64 |
+
|
| 65 |
+
def answer_question(self, question: str) -> str:
|
| 66 |
+
"""Performs the API call to Gemini and returns the sentiment analysis response."""
|
| 67 |
+
response = self.model.generate_content(question)
|
| 68 |
+
return response.text
|
| 69 |
+
|
| 70 |
+
if __name__ == "__main__":
|
| 71 |
+
analyzer = GeminiQanA()
|
| 72 |
+
response = analyzer.answer_question("Hello,how are you?")
|
| 73 |
+
print(response)
|
zega_logo.PNG
ADDED
|
|
Git LFS Details
|