Spaces:
Build error
Build error
| import logging | |
| import os | |
| import streamlit as st | |
| from dotenv import load_dotenv | |
| import openai | |
| from langchain_openai import ChatOpenAI | |
| from langchain_community.vectorstores import FAISS | |
| from langchain_openai import OpenAIEmbeddings | |
| from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder | |
| from langchain.agents import tool, AgentExecutor | |
| from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser | |
| from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages | |
| from langchain_core.messages import AIMessage, HumanMessage | |
| from langchain_community.document_loaders import TextLoader | |
| from langchain_text_splitters import CharacterTextSplitter | |
| import serpapi | |
| import requests | |
| import streamlit.components.v1 as components | |
| import smtplib | |
| from email.mime.multipart import MIMEMultipart | |
| from datetime import datetime | |
| import pandas as pd | |
| # Initialize logging and load environment variables | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| load_dotenv() | |
| # Define and validate API keys | |
| openai_api_key = os.getenv("OPENAI_API_KEY") | |
| serper_api_key = os.getenv("SERPER_API_KEY") | |
| if not openai_api_key or not serper_api_key: | |
| logger.error("API keys are not set properly.") | |
| raise ValueError("API keys for OpenAI and SERPER must be set in the .env file.") | |
| openai.api_key = openai_api_key | |
| st.markdown(""" | |
| <style> | |
| .custom-image img { | |
| width: 100px; /* Set the width to make the image smaller */ | |
| height: auto; /* Keep the aspect ratio */ | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| if "chat_started" not in st.session_state: | |
| st.session_state["chat_started"] = False | |
| def copy_to_clipboard(text): | |
| """Creates a button to copy text to clipboard.""" | |
| escaped_text = text.replace('\n', '\\n').replace('"', '\\"') | |
| copy_icon_html = f""" | |
| <style> | |
| .copy-container {{ | |
| position: relative; | |
| margin-top: 10px; | |
| padding-bottom: 30px; /* Space for the button */ | |
| font-size: 0; /* Hide extra space */ | |
| }} | |
| .copy-button {{ | |
| background: none; | |
| border: none; | |
| color: #808080; /* Grey color */ | |
| cursor: pointer; | |
| font-size: 18px; /* Adjust icon size */ | |
| position: absolute; | |
| bottom: 0; | |
| right: 0; | |
| }} | |
| .copy-button:hover {{ | |
| color: #606060; /* Darker grey on hover */ | |
| }} | |
| .copy-message {{ | |
| font-size: 12px; | |
| color: #4CAF50; | |
| margin-left: 10px; | |
| display: none; | |
| }} | |
| </style> | |
| <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css"> | |
| <div class="copy-container"> | |
| <button class="copy-button" onclick="copyToClipboard()"> | |
| <i class="fas fa-copy"></i> | |
| </button> | |
| <span class="copy-message" id="copy_message">Copied!</span> | |
| </div> | |
| <script> | |
| function copyToClipboard() {{ | |
| var textArea = document.createElement("textarea"); | |
| textArea.value = "{escaped_text}"; | |
| document.body.appendChild(textArea); | |
| textArea.select(); | |
| document.execCommand("copy"); | |
| document.body.removeChild(textArea); | |
| var copyMessage = document.getElementById("copy_message"); | |
| copyMessage.style.display = "inline"; | |
| setTimeout(function() {{ | |
| copyMessage.style.display = "none"; | |
| }}, 2000); | |
| }} | |
| </script> | |
| """ | |
| components.html(copy_icon_html, height=60) | |
| def save_feedback_to_excel(name, email, feedback): | |
| """Saves feedback to an Excel file.""" | |
| feedback_file = 'feedbacks.xlsx' | |
| # Create a DataFrame with the feedback | |
| data = { | |
| 'Timestamp': [datetime.now()], | |
| 'Name': [name], | |
| 'Email': [email], | |
| 'Feedback': [feedback] | |
| } | |
| df = pd.DataFrame(data) | |
| if os.path.exists(feedback_file): | |
| try: | |
| # Attempt to load the existing file | |
| existing_df = pd.read_excel(feedback_file, engine='openpyxl') | |
| # Append the new feedback | |
| df = pd.concat([existing_df, df], ignore_index=True) | |
| except ValueError as e: | |
| st.error(f"ValueError: {e} - The file might be corrupted or not an Excel file.") | |
| # Create a new file if reading fails | |
| df.to_excel(feedback_file, index=False, engine='openpyxl') | |
| st.success("New feedback file created and feedback saved!") | |
| return | |
| except Exception as e: | |
| st.error(f"Error reading existing feedback file: {e}") | |
| # Create a new file if reading fails | |
| df.to_excel(feedback_file, index=False, engine='openpyxl') | |
| st.success("New feedback file created and feedback saved!") | |
| return | |
| else: | |
| # File doesn't exist, just create it | |
| df.to_excel(feedback_file, index=False, engine='openpyxl') | |
| st.success("Feedback file created and feedback saved!") | |
| return | |
| try: | |
| # Save to Excel using the openpyxl engine | |
| df.to_excel(feedback_file, index=False, engine='openpyxl') | |
| st.success("Feedback saved successfully!") | |
| except Exception as e: | |
| st.error(f"Error saving feedback to Excel: {e}") | |
| def side(): | |
| with st.sidebar.form(key='feedback_form'): | |
| st.image( "Trust Logic_Wheel_RGB_Standard.png") | |
| st.header("Let's create something great.") | |
| st.markdown("Our minds assess trust through Six Buckets of Trust® and determine their importance and order in a given situation. We then evaluate why we can or can’t trust someone in these Buckets. TrustAI®, trained on 20 years of TrustLogic® application, helps you identify reasons why your audience can trust you in each Bucket and create trust-optimised solutions. It’s copy AI with substance.") | |
| st.markdown(""" | |
| <style> | |
| .stability { color: rgb(7, 55, 99); font-size: 24px; font-weight: bold; } | |
| .development { color: rgb(241, 194, 50); font-size: 24px; font-weight: bold; } | |
| .relationship { color: rgb(204, 0, 0); font-size: 24px; font-weight: bold; } | |
| .benefit { color: rgb(56, 118, 29); font-size: 24px; font-weight: bold; } | |
| .vision { color: rgb(255, 153, 0); font-size: 24px; font-weight: bold; } | |
| .competence { color: rgb(111, 168, 220); font-size: 24px; font-weight: bold; } | |
| </style> | |
| <h3 class="stability">Stability Trust:</h3> | |
| <p>Why can I trust you to have built a strong and stable foundation?</p> | |
| <h3 class="development">Development Trust:</h3> | |
| <p>Why can I trust you to develop well in the future?</p> | |
| <h3 class="relationship">Relationship Trust:</h3> | |
| <p>What appealing relationship qualities can I trust you for?</p> | |
| <h3 class="benefit">Benefit Trust:</h3> | |
| <p>What benefits can I trust you for?</p> | |
| <h3 class="vision">Vision Trust:</h3> | |
| <p>What Vision and Values can I trust you for?</p> | |
| <h3 class="competence">Competence Trust:</h3> | |
| <p>What competencies can I trust you for?</p> | |
| """, unsafe_allow_html=True) | |
| st.markdown("For detailed descriptions, visit [Academy](https://www.trustlogic.info/academy)") | |
| feedback_name = st.text_input("Name") | |
| feedback_email_input = st.text_input("Email") | |
| feedback_text = st.text_area("Feedback") | |
| # Submit button within the form | |
| submit_button = st.form_submit_button("Submit Feedback") | |
| if submit_button: | |
| if feedback_name and feedback_email_input and feedback_text: | |
| with st.spinner('Sending email'): | |
| save_feedback_to_excel(feedback_name, feedback_email_input, feedback_text) | |
| st.success("Thank you for your feedback!") | |
| else: | |
| st.error("Please fill in all fields.") | |
| side() | |
| # Load knowledge base | |
| def load_knowledge_base(): | |
| try: | |
| loader = TextLoader("./data_source/time_to_rethink_trust_book.md") | |
| documents = loader.load() | |
| text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | |
| docs = text_splitter.split_documents(documents) | |
| return docs | |
| except Exception as e: | |
| logger.error(f"Error loading knowledge base: {e}") | |
| raise e | |
| knowledge_base = load_knowledge_base() | |
| # Initialize embeddings and FAISS index | |
| embeddings = OpenAIEmbeddings() | |
| db = FAISS.from_documents(knowledge_base, embeddings) | |
| # Define search functions | |
| def search_knowledge_base(query): | |
| try: | |
| output = db.similarity_search(query) | |
| return output | |
| except Exception as e: | |
| logger.error(f"Error searching knowledge base: {e}") | |
| return ["Error occurred during knowledge base search"] | |
| def google_search(query): | |
| try: | |
| search_client = serpapi.Client(api_key=serper_api_key) | |
| results = search_client.search({"engine": "google", "q": query}) | |
| snippets = [result["snippet"] for result in results.get("organic_results", [])] | |
| return snippets | |
| except Exception as e: | |
| logger.error(f"Error in Google search: {e}") | |
| return ["Error occurred during Google search"] | |
| # RAG response function | |
| def rag_response(query): | |
| try: | |
| retrieved_docs = search_knowledge_base(query) | |
| context = "\n".join(doc.page_content for doc in retrieved_docs) | |
| prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:" | |
| llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key) | |
| response = llm.invoke(prompt) | |
| return response.content | |
| except Exception as e: | |
| logger.error(f"Error generating RAG response: {e}") | |
| return "Error occurred during RAG response generation" | |
| # Define tools | |
| def knowledge_base_tool(query: str): | |
| """Query the knowledge base and retrieve a response.""" | |
| return rag_response(query) | |
| def google_search_tool(query: str): | |
| """Perform a Google search using the SERPER API.""" | |
| return google_search(query) | |
| tools = [knowledge_base_tool, google_search_tool] | |
| # Create the prompt template | |
| prompt_message = f""" You are a expert copywriter capable of generating responses based on general queries as well as trust-based queries. Your response will depend on the classification of the user's input. | |
| Classification: | |
| General Query: If the user asks for general information (e.g., blogs, posts, articles, reports etc), classify it as a general query. | |
| Trust-Based Query: If the user seeks proof points or compelling content related to particular trust bucket , classify it under a trust bucket. | |
| If user input is a General Query regarding blog/post/report/article etc. follow the instructions below : | |
| **Instructions:** | |
| *For general queries, you need to generate a response in natural language it should properly follow format of blogs , reports and articles for any organization. No need to literally follow the knowledge base here | |
| *Strictly DO NOT FOLLOW a specific trust bucket or proof points . follow the idea format of blog and article . | |
| *Strictly do not mention name of any trust bucket [ stability, development , relationship , competence , benefit , vision] in your response . | |
| If user input is Trust-Based or proof points or compelling copy Query : | |
| If the query is related to particular trust buckets or proof points in that case only refer to knowledge base :[knowledge base] and strictly follow the steps below: | |
| **Instructions :** | |
| *Identify Trust Bucket: Determine the relevant trust bucket from the user's query. Do not explicitly mention the trust bucket in your response. | |
| *Response Length: Generate a detailed, compelling copy between 1000-2000 words focused exclusively on the identified trust bucket. | |
| *Trust Statements/TrustBuilders: Use these terms interchangeably in place of "proof points" throughout your response. Do not overuse one term. | |
| *Proof Points: Use guiding principles or objectives from the knowledge base to support your points. | |
| *Sub-points : Strictly add sub-points in this case only | |
| *Facts and figures : Strictly be Specific with names, dates, locations, numbers, Dollars , currency , figures etc. in proof points . | |
| *Active Tonality: Write in an active, dynamic, and enthusiastic tone. | |
| *Source URL : With every point of information generated by you , always return correct source URL of that information in bracket with that particular point. | |
| **Heuristics and Creative Techniques:** | |
| When generating responses, incorporate the following heuristics and creative techniques as appropriate to enhance the quality and impact of the copy: | |
| Storytelling: Weave narratives or anecdotes to illustrate key points and engage the reader emotionally. | |
| Problem-Agitation-Solution: Present a problem, emphasize its impact, then offer a solution. | |
| Metaphors and Analogies: Use comparisons to explain complex concepts in relatable terms. | |
| Social Proof: Incorporate testimonials, case studies, or statistics to build credibility. | |
| Scarcity and Urgency: Create a sense of limited availability or time pressure when appropriate. | |
| Benefit-Focused Language: Emphasize "what's in it for the reader" rather than just features. | |
| Sensory Language: Use vivid, descriptive words that appeal to the five senses. | |
| Pattern Interrupts: Use unexpected elements or formatting to maintain reader interest. | |
| Future Pacing: Help readers envision positive future outcomes. | |
| Repetition with Variation: Reinforce key messages using different phrasings or contexts. | |
| Rhetorical Questions: Engage readers by prompting them to think actively about the topic. | |
| Power Words: Incorporate emotionally charged words to evoke specific feelings or reactions. | |
| Before-After-Bridge: Describe the current situation, the desired outcome, and how to get there. | |
| Bucket Brigades: Use short phrases or questions to maintain momentum and encourage continued reading. | |
| AIDA Framework: Structure content to grab Attention, generate Interest, create Desire, and prompt Action. | |
| [Always consider which of these techniques would be most appropriate and effective for the specific query and context when crafting your response] | |
| """ | |
| prompt_template = ChatPromptTemplate.from_messages([ | |
| ("system", prompt_message), | |
| MessagesPlaceholder(variable_name="chat_history"), | |
| ("user", "{input}"), | |
| MessagesPlaceholder(variable_name="agent_scratchpad"), | |
| ]) | |
| # Create Langchain Agent | |
| llm = ChatOpenAI(model="gpt-4o", temperature=0.5) | |
| llm_with_tools = llm.bind_tools(tools) | |
| # Define the agent pipeline | |
| agent = ( | |
| { | |
| "input": lambda x: x["input"], | |
| "agent_scratchpad": lambda x: format_to_openai_tool_messages(x["intermediate_steps"]), | |
| "chat_history": lambda x: x["chat_history"], | |
| } | |
| | prompt_template | |
| | llm_with_tools | |
| | OpenAIToolsAgentOutputParser() | |
| ) | |
| # Instantiate an AgentExecutor | |
| agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) | |
| # Streamlit app | |
| # Initialize chat history | |
| if 'chat_history' not in st.session_state: | |
| st.session_state.chat_history = [] | |
| # Display chat history | |
| for message in st.session_state.chat_history: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Chat input | |
| if not st.session_state.get("chat_started", False): | |
| st.markdown(""" | |
| <script> | |
| document.addEventListener('DOMContentLoaded', (event) => { | |
| const svgs = document.querySelectorAll('svg'); | |
| svgs.forEach(svg => { | |
| if (svg.getAttribute('xmlns') === 'http://www.w3.org/2000/svg' && svg.getAttribute('width') === '18' && svg.getAttribute('height') === '18') { | |
| svg.style.display = 'none'; | |
| } | |
| }); | |
| }); | |
| </script> | |
| <style> | |
| /* Hide all <a> elements inside elements with block-container and st-emotion-cache-1eo1tir ea3mdgi5 classes */ | |
| .block-container.st-emotion-cache-1eo1tir.ea3mdgi5 a { | |
| display: none !important; | |
| } | |
| /* Ensure links in the sidebar are visible and underlined */ | |
| .stSidebar a { | |
| display: inline !important; | |
| text-decoration: underline !important; | |
| color: inherit !important; | |
| } | |
| /* Additional styles */ | |
| .section-container { | |
| display: flex; | |
| justify-content: center; | |
| align-items: stretch; | |
| flex-wrap: wrap; | |
| gap: 4px; | |
| } | |
| .section { | |
| flex: 1; | |
| min-width: 150px; | |
| max-width: 90px; | |
| min-height: 150px; | |
| border: 1px solid #afafaf; | |
| border-radius: 10px; | |
| padding: 5px; | |
| background-color: transparent; | |
| margin: 3px; | |
| text-align: center; | |
| box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); | |
| box-sizing: border-box; | |
| font-size: 12px; | |
| transition: background-color 0.3s ease; | |
| } | |
| .section h2 { | |
| color: #afafaf; | |
| font-size: 14px; | |
| margin-bottom: 8px; | |
| border-bottom: 1px solid #afafaf; | |
| padding-bottom: 4px; | |
| text-align: center; /* Center headings */ | |
| } | |
| .section p { | |
| color: #afafaf; | |
| font-size: 11px; | |
| margin: 5px 0; | |
| line-height: 1.4; | |
| } | |
| @media (max-width: 100px) { | |
| .section { | |
| min-width: 90%; | |
| max-width: 90%; | |
| } | |
| } | |
| </style> | |
| <h1 style="text-align: center; background: #528186; -webkit-background-clip: text; color: transparent;">How can I help you today?</h1> | |
| <div class="section-container"> | |
| <div class="section"> | |
| <h2>Find</h2> | |
| <p>Discover all your great TrustBuilders®. <br> Example: Find Development Trust Builders® for World Vision | |
| </div> | |
| <div class="section"> | |
| <h2>Create</h2> | |
| <p>Generate trust-optimised solutions : <br>Example: Find World Vision development TrustBuilders®. Then use them to write a 200-word annual report article. Enthusiastic tone.</p> | |
| </div> | |
| <div class="section"> | |
| <h2>Trust-optimise</h2> | |
| <p>Paste your LinkedIn profile, EDM or blog and ask TrustAI® to improve it using specific Trust Buckets® and add your specific TrustBuilders® as examples.</p> | |
| </div> | |
| </div> | |
| <div style="height: 50px;"></div> <!-- Adds a gap of 50px after the section containers --> | |
| """, unsafe_allow_html=True) | |
| prompt = st.chat_input("") | |
| if prompt : | |
| st.session_state["chat_started"] = True | |
| # Add user message to chat history | |
| st.session_state.chat_history.append({"role": "user", "content": prompt}) | |
| # Display user message | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Generate AI response | |
| with st.chat_message("assistant"): | |
| full_response = "" | |
| try: | |
| # Generate response using the agent executor | |
| output = agent_executor.invoke({ | |
| "input": f"{prompt}Be specific with numbers, dates, people, and dollar amount.Search and Provide correct sources of all the results . Respond directly to the query without any introductory phrases or meta-commentary. Your response should be natural and read as if it's addressing the query immediately, without any preamble. Create Creative Headlines using main Trust Bucket targeted always use active language like 'Lets dicover'or 'Watch it grow' etc. ", | |
| "chat_history": st.session_state.chat_history | |
| }) | |
| full_response = output["output"] | |
| st.write(full_response, unsafe_allow_html=True) | |
| # Display the response | |
| #st.write(full_response) | |
| follow_up_output = agent_executor.invoke({ | |
| "input": f"Extract 1-2 follow-up questions from the following text: {full_response}", | |
| "chat_history": st.session_state.chat_history | |
| }) | |
| follow_up_questions = follow_up_output["output"] | |
| if follow_up_questions: | |
| st.markdown(f"Follow-up questions:\n{follow_up_questions}") | |
| except Exception as e: | |
| logger.error(f"Error generating response: {e}") | |
| full_response = "I apologize, but an error occurred while generating the response. Please try again." | |
| st.write(full_response) | |
| # Add AI response to chat history | |
| st.session_state.chat_history.append({"role": "assistant", "content": full_response}) | |
| copy_to_clipboard(full_response) |