| # import os | |
| # import streamlit as st | |
| # from dotenv import load_dotenv | |
| # import google.generativeai as gen_ai | |
| # | |
| # | |
| # # Load environvent variables | |
| # load_dotenv() | |
| # | |
| # # Configure Streamlit page settings | |
| # st.set_page_config( | |
| # page_title="Chat with Gemini-Pro!", | |
| # page_icon=":brain:", # Favicon emoji | |
| # layout="centered", # Page layout option | |
| # ) | |
| # | |
| # Google_API_Key = os.getenv("Google_API_Key") | |
| # | |
| # # Set up Google Gemni-Pro AI Model | |
| # gen_ai.configure(api_key=Google_API_Key) | |
| # model = gen_ai.GenerativeModel('gemini-pro') | |
| # | |
| # # Function to translate roles between Gemini-Pro and streamlit terminology | |
| # | |
| # def translate_role_for_streamlit(user_role): | |
| # if user_role == 'model': | |
| # return 'assistant' | |
| # else: return user_role | |
| # | |
| # # Initialize chat session in streamlit if not already present | |
| # if "chat_session" not in st.session_state: | |
| # st.session_state.chat_session = model.start_chat(history=[]) | |
| # | |
| # # Display the chatbot's title on the page | |
| # st.title("π€ Gemini Pro - ChatBot") | |
| # | |
| # # Display the chat history | |
| # for message in st.session_state.chat_session.history: | |
| # with st.chat_message(translate_role_for_streamlit(message.role)): | |
| # st.markdown(message.parts[0].text) | |
| # | |
| # | |
| # # Input field for user's message | |
| # user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # | |
| # if user_prompt: | |
| # # Add user's message to chat and display it | |
| # st.chat_message("user").markdown(user_prompt) | |
| # | |
| # # Send user's message to chat and display it | |
| # gemini_response = st.session_state.chat_session.send_message(user_prompt) | |
| # | |
| # # Display Gemini-Pro's response | |
| # with st.chat_message('assistant'): | |
| # st.markdown(gemini_response.text) | |
| # | |
| # import os | |
| # import json | |
| # import streamlit as st | |
| # import google.generativeai as gen_ai | |
| # from dotenv import load_dotenv | |
| # import speech_recognition as sr | |
| # import pyttsx3 | |
| # | |
| # # Load environment variables | |
| # load_dotenv() | |
| # Google_API_Key = os.getenv("Google_API_Key") | |
| # | |
| # # Configure Streamlit page settings | |
| # st.set_page_config( | |
| # page_title="Chat with Gemini-Pro!", | |
| # page_icon="π€", | |
| # layout="wide", | |
| # ) | |
| # | |
| # # Set up Google Gemini-Pro AI Model | |
| # gen_ai.configure(api_key=Google_API_Key) | |
| # model = gen_ai.GenerativeModel('gemini-pro') | |
| # | |
| # # Initialize chatbot memory | |
| # if "chat_history" not in st.session_state: | |
| # try: | |
| # with open("chat_history.json", "r") as f: | |
| # st.session_state.chat_history = json.load(f) | |
| # except FileNotFoundError: | |
| # st.session_state.chat_history = [] | |
| # | |
| # | |
| # # Save chat history | |
| # def save_chat_history(): | |
| # with open("chat_history.json", "w") as f: | |
| # json.dump(st.session_state.chat_history, f) | |
| # | |
| # | |
| # # Sidebar settings | |
| # with st.sidebar: | |
| # st.subheader("βοΈ Settings") | |
| # | |
| # # Theme selection | |
| # theme = st.radio("Select Theme", ["π Light", "π Dark"]) | |
| # | |
| # # Clear chat history button | |
| # if st.button("ποΈ Clear Chat History"): | |
| # st.session_state.chat_history = [] | |
| # save_chat_history() | |
| # st.experimental_rerun() | |
| # | |
| # # Apply dark mode styling | |
| # if theme == "π Dark": | |
| # st.markdown( | |
| # """ | |
| # <style> | |
| # body {background-color: #333; color: white;} | |
| # .stChatMessage {background-color: #444; color: white;} | |
| # </style> | |
| # """, | |
| # unsafe_allow_html=True | |
| # ) | |
| # | |
| # # Display chatbot title | |
| # st.title("π€ Gemini Pro - ChatBot") | |
| # | |
| # # Display chat history | |
| # for message in st.session_state.chat_history: | |
| # with st.chat_message(message["role"]): | |
| # st.markdown(message["content"]) | |
| # | |
| # # Speech recognition & text-to-speech setup | |
| # recognizer = sr.Recognizer() | |
| # engine = pyttsx3.init() | |
| # | |
| # # Voice input button | |
| # if st.button("π€ Speak"): | |
| # with sr.Microphone() as source: | |
| # st.write("Listening...") | |
| # audio = recognizer.listen(source) | |
| # try: | |
| # user_prompt = recognizer.recognize_google(audio) | |
| # st.chat_message("user").markdown(user_prompt) | |
| # except: | |
| # st.error("Could not understand. Try again.") | |
| # | |
| # # Text input | |
| # user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # | |
| # if user_prompt: | |
| # # Add user message to chat history | |
| # st.session_state.chat_history.append({"role": "user", "content": user_prompt}) | |
| # | |
| # # Display user message | |
| # st.chat_message("user").markdown(user_prompt) | |
| # | |
| # # Get response from Gemini-Pro | |
| # gemini_response = model.generate_content(user_prompt) | |
| # | |
| # # Add chatbot response to history | |
| # st.session_state.chat_history.append({"role": "assistant", "content": gemini_response.text}) | |
| # | |
| # # Display chatbot response | |
| # with st.chat_message("assistant"): | |
| # st.markdown(gemini_response.text) | |
| # | |
| # # Save chat history | |
| # save_chat_history() | |
| # | |
| # # Text-to-Speech response | |
| # engine.say(gemini_response.text) | |
| # engine.runAndWait() | |
| # import os | |
| # import json | |
| # import streamlit as st | |
| # import google.generativeai as gen_ai | |
| # from dotenv import load_dotenv | |
| # import speech_recognition as sr | |
| # import pyttsx3 | |
| # | |
| # # Load environment variables | |
| # load_dotenv() | |
| # Google_API_Key = os.getenv("Google_API_Key") | |
| # | |
| # # Configure Streamlit page settings | |
| # st.set_page_config( | |
| # page_title="Chat with Gemini-Pro!", | |
| # page_icon="π€", | |
| # layout="wide", | |
| # ) | |
| # | |
| # # Apply custom CSS for UI/UX improvements | |
| # st.markdown( | |
| # """ | |
| # <style> | |
| # .stChatMessage { padding: 10px; border-radius: 10px; margin: 5px 0; } | |
| # .user { background-color: #DCF8C6; } | |
| # .assistant { background-color: #E3E3E3; } | |
| # .sidebar .sidebar-content { background-color: #f0f2f6; } | |
| # .chat-container { max-width: 700px; margin: auto; } | |
| # </style> | |
| # """, | |
| # unsafe_allow_html=True | |
| # ) | |
| # | |
| # # Set up Google Gemini-Pro AI Model | |
| # gen_ai.configure(api_key=Google_API_Key) | |
| # model = gen_ai.GenerativeModel('gemini-pro') | |
| # | |
| # # Initialize chatbot memory | |
| # if "chat_history" not in st.session_state: | |
| # try: | |
| # with open("chat_history.json", "r") as f: | |
| # st.session_state.chat_history = json.load(f) | |
| # except (FileNotFoundError, json.JSONDecodeError): | |
| # st.session_state.chat_history = [] | |
| # | |
| # | |
| # # Save chat history | |
| # def save_chat_history(): | |
| # with open("chat_history.json", "w") as f: | |
| # json.dump(st.session_state.chat_history, f) | |
| # | |
| # | |
| # # Sidebar settings | |
| # with st.sidebar: | |
| # st.subheader("βοΈ Settings") | |
| # | |
| # # Theme selection | |
| # theme = st.radio("Select Theme", ["π Light", "π Dark"]) | |
| # | |
| # # Clear chat history button | |
| # if st.button("ποΈ Clear Chat History"): | |
| # st.session_state.chat_history = [] | |
| # save_chat_history() | |
| # st.experimental_rerun() | |
| # | |
| # # Apply dark mode styling | |
| # if theme == "π Dark": | |
| # st.markdown( | |
| # """ | |
| # <style> | |
| # body {background-color: #1e1e1e; color: white;} | |
| # .stChatMessage {background-color: #444; color: white;} | |
| # .sidebar .sidebar-content {background-color: #333;} | |
| # .stTextInput, .stButton, .stRadio {background-color: #333; color: white;} | |
| # </style> | |
| # """, | |
| # unsafe_allow_html=True | |
| # ) | |
| # | |
| # # Display chatbot title | |
| # st.title("π€ Gemini Pro - AI ChatBot") | |
| # | |
| # # Chat container | |
| # st.markdown('<div class="chat-container">', unsafe_allow_html=True) | |
| # | |
| # # Display chat history | |
| # for message in st.session_state.chat_history: | |
| # role = message["role"] | |
| # avatar = "π€" if role == "user" else "π€" | |
| # bg_color = "user" if role == "user" else "assistant" | |
| # | |
| # with st.chat_message(role): | |
| # st.markdown(f'<div class="{bg_color} stChatMessage">{avatar} {message["content"]}</div>', | |
| # unsafe_allow_html=True) | |
| # | |
| # # Speech recognition & text-to-speech setup | |
| # recognizer = sr.Recognizer() | |
| # # engine = pyttsx3.init() | |
| # engine = pyttsx3.init() | |
| # engine.setProperty('rate', 150) # Adjust speech rate | |
| # engine.setProperty('voice', engine.getProperty('voices')[0].id) # Set a specific voice | |
| # | |
| # # Voice input button | |
| # if st.button("π€ Speak"): | |
| # with sr.Microphone() as source: | |
| # st.write("Listening...") | |
| # audio = recognizer.listen(source) | |
| # try: | |
| # user_prompt = recognizer.recognize_google(audio) | |
| # st.session_state.chat_history.append({"role": "user", "content": user_prompt}) | |
| # st.chat_message("user").markdown(user_prompt) | |
| # # Trigger chatbot response | |
| # gemini_response = model.generate_content(user_prompt) | |
| # st.session_state.chat_history.append({"role": "assistant", "content": gemini_response.text}) | |
| # st.chat_message("assistant").markdown(gemini_response.text) | |
| # save_chat_history() | |
| # engine.say(gemini_response.text) | |
| # engine.runAndWait() | |
| # except sr.UnknownValueError: | |
| # st.error("Sorry, I could not understand the audio.") | |
| # except sr.RequestError: | |
| # st.error("Could not request results from the speech recognition service.") | |
| # # Text input | |
| # user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # | |
| # if user_prompt: | |
| # # Add user message to chat history | |
| # st.session_state.chat_history.append({"role": "user", "content": user_prompt}) | |
| # | |
| # # Display user message | |
| # st.chat_message("user").markdown(user_prompt) | |
| # | |
| # # Get response from Gemini-Pro | |
| # # gemini_response = model.generate_content(user_prompt) | |
| # try: | |
| # gemini_response = model.generate_content(user_prompt) | |
| # except Exception as e: | |
| # st.error(f"An error occurred: {e}") | |
| # gemini_response = type('Object', (), {'text': 'Sorry, I could not generate a response.'}) | |
| # | |
| # # Add chatbot response to history | |
| # st.session_state.chat_history.append({"role": "assistant", "content": gemini_response.text}) | |
| # | |
| # # Display chatbot response | |
| # with st.chat_message("assistant"): | |
| # st.markdown(gemini_response.text) | |
| # | |
| # # Save chat history | |
| # save_chat_history() | |
| # | |
| # # Text-to-Speech response | |
| # engine.say(gemini_response.text) | |
| # engine.runAndWait() | |
| # | |
| # st.markdown('</div>', unsafe_allow_html=True) | |
| # | |
| # import os | |
| # import streamlit as st | |
| # import google.generativeai as gen_ai | |
| # import pyttsx3 | |
| # import threading | |
| # from dotenv import load_dotenv | |
| # | |
| # # Load environment variables | |
| # load_dotenv() | |
| # | |
| # # Configure Streamlit page settings | |
| # st.set_page_config( | |
| # page_title="Gemini-Pro ChatBot", | |
| # page_icon="π€", # Favicon emoji | |
| # layout="centered", # Page layout option | |
| # ) | |
| # | |
| # # Retrieve Google API Key | |
| # Google_API_Key = os.getenv("Google_API_Key") | |
| # | |
| # # Set up Google Gemini-Pro AI Model | |
| # gen_ai.configure(api_key=Google_API_Key) | |
| # model = gen_ai.GenerativeModel('gemini-pro') | |
| # | |
| # # Function to translate roles between Gemini-Pro and Streamlit terminology | |
| # def translate_role_for_streamlit(user_role): | |
| # return "assistant" if user_role == "model" else user_role | |
| # | |
| # # Function to handle text-to-speech (TTS) in a separate thread | |
| # def speak_text(text): | |
| # engine = pyttsx3.init() | |
| # engine.say(text) | |
| # engine.runAndWait() | |
| # | |
| # # Initialize chat session in Streamlit if not already present | |
| # if "chat_session" not in st.session_state: | |
| # st.session_state.chat_session = model.start_chat(history=[]) | |
| # | |
| # # Display chatbot title and description | |
| # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π€ Gemini-Pro ChatBot</h1>", unsafe_allow_html=True) | |
| # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True) | |
| # | |
| # # Display chat history | |
| # for message in st.session_state.chat_session.history: | |
| # with st.chat_message(translate_role_for_streamlit(message.role)): | |
| # st.markdown(message.parts[0].text) | |
| # | |
| # # User input field | |
| # user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # | |
| # # If user enters a prompt | |
| # if user_prompt: | |
| # # Display user's message | |
| # st.chat_message("user").markdown(user_prompt) | |
| # | |
| # # Show a loading indicator while waiting for a response | |
| # with st.spinner("Thinking..."): | |
| # gemini_response = st.session_state.chat_session.send_message(user_prompt) | |
| # | |
| # # Display Gemini-Pro's response | |
| # with st.chat_message("assistant"): | |
| # st.markdown(gemini_response.text) | |
| # | |
| # # Run text-to-speech in the background | |
| # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start() | |
| # | |
| # | |
| # | |
| # | |
| # | |
| # | |
| # | |
| # | |
| # | |
| # | |
| # import os | |
| # import streamlit as st | |
| # import google.generativeai as gen_ai | |
| # import pyttsx3 | |
| # import threading | |
| # from dotenv import load_dotenv | |
| # | |
| # # Load environment variables | |
| # load_dotenv() | |
| # | |
| # # Configure Streamlit page settings | |
| # st.set_page_config( | |
| # page_title="Gemini-Pro ChatBot", | |
| # page_icon="π€", | |
| # layout="centered", | |
| # ) | |
| # | |
| # # Retrieve Google API Key | |
| # Google_API_Key = os.getenv("Google_API_Key") | |
| # | |
| # # Set up Google Gemini-Pro AI Model | |
| # gen_ai.configure(api_key=Google_API_Key) | |
| # model = gen_ai.GenerativeModel('gemini-pro') | |
| # | |
| # # Function to translate roles between Gemini-Pro and Streamlit terminology | |
| # def translate_role_for_streamlit(user_role): | |
| # return "assistant" if user_role == "model" else user_role | |
| # | |
| # # Initialize text-to-speech engine | |
| # if "tts_engine" not in st.session_state: | |
| # st.session_state.tts_engine = pyttsx3.init() | |
| # | |
| # def stop_speech(): | |
| # """Stop the current speech if running.""" | |
| # st.session_state.tts_engine.stop() | |
| # | |
| # def speak_text(text): | |
| # """Stop previous speech and start speaking new text.""" | |
| # stop_speech() # Stop any ongoing speech | |
| # st.session_state.tts_engine.say(text) | |
| # st.session_state.tts_engine.runAndWait() | |
| # | |
| # # Initialize chat session in Streamlit if not already present | |
| # if "chat_session" not in st.session_state: | |
| # st.session_state.chat_session = model.start_chat(history=[]) | |
| # | |
| # # Display chatbot title and description | |
| # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π€ Gemini-Pro ChatBot</h1>", unsafe_allow_html=True) | |
| # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True) | |
| # | |
| # # Display chat history | |
| # for message in st.session_state.chat_session.history: | |
| # with st.chat_message(translate_role_for_streamlit(message.role)): | |
| # st.markdown(message.parts[0].text) | |
| # | |
| # # User input field | |
| # user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # | |
| # # If user enters a prompt | |
| # if user_prompt: | |
| # # Display user's message | |
| # st.chat_message("user").markdown(user_prompt) | |
| # | |
| # # Show a loading indicator while waiting for a response | |
| # with st.spinner("Thinking..."): | |
| # gemini_response = st.session_state.chat_session.send_message(user_prompt) | |
| # | |
| # # Display Gemini-Pro's response | |
| # with st.chat_message("assistant"): | |
| # st.markdown(gemini_response.text) | |
| # | |
| # # Run text-to-speech in the background (stopping previous speech first) | |
| # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start() | |
| # import os | |
| # import streamlit as st | |
| # import google.generativeai as gen_ai | |
| # import pyttsx3 | |
| # import threading | |
| # from dotenv import load_dotenv | |
| # | |
| # # Load environment variables | |
| # load_dotenv() | |
| # | |
| # # Configure Streamlit page settings | |
| # st.set_page_config( | |
| # page_title="Gemini-Pro ChatBot", | |
| # page_icon="π€", | |
| # layout="centered", | |
| # ) | |
| # | |
| # # Retrieve Google API Key | |
| # Google_API_Key = os.getenv("Google_API_Key") | |
| # | |
| # # Set up Google Gemini-Pro AI Model | |
| # gen_ai.configure(api_key=Google_API_Key) | |
| # model = gen_ai.GenerativeModel('gemini-pro') | |
| # | |
| # # Function to translate roles between Gemini-Pro and Streamlit terminology | |
| # def translate_role_for_streamlit(user_role): | |
| # return "assistant" if user_role == "model" else user_role | |
| # | |
| # # Initialize text-to-speech engine | |
| # if "tts_engine" not in st.session_state: | |
| # st.session_state.tts_engine = pyttsx3.init() | |
| # | |
| # # Initialize threading event for speech control | |
| # if "speech_event" not in st.session_state: | |
| # st.session_state.speech_event = threading.Event() | |
| # | |
| # def stop_speech(): | |
| # """Stop the current speech if running.""" | |
| # st.session_state.speech_event.set() # Set the event to stop speech | |
| # st.session_state.tts_engine.stop() | |
| # | |
| # def speak_text(text): | |
| # """Stop previous speech and start speaking new text.""" | |
| # stop_speech() # Stop any ongoing speech | |
| # st.session_state.speech_event.clear() # Clear the event for new speech | |
| # st.session_state.tts_engine.say(text) | |
| # st.session_state.tts_engine.runAndWait() | |
| # | |
| # # Initialize chat session in Streamlit if not already present | |
| # if "chat_session" not in st.session_state: | |
| # st.session_state.chat_session = model.start_chat(history=[]) | |
| # | |
| # # Display chatbot title and description | |
| # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π€ Gemini-Pro ChatBot</h1>", unsafe_allow_html=True) | |
| # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True) | |
| # | |
| # # Display chat history | |
| # for message in st.session_state.chat_session.history: | |
| # with st.chat_message(translate_role_for_streamlit(message.role)): | |
| # st.markdown(message.parts[0].text) | |
| # | |
| # # User input field | |
| # user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # | |
| # # If user enters a prompt | |
| # if user_prompt: | |
| # # Display user's message | |
| # st.chat_message("user").markdown(user_prompt) | |
| # | |
| # # Show a loading indicator while waiting for a response | |
| # with st.spinner("Thinking..."): | |
| # gemini_response = st.session_state.chat_session.send_message(user_prompt) | |
| # | |
| # # Display Gemini-Pro's response | |
| # with st.chat_message("assistant"): | |
| # st.markdown(gemini_response.text) | |
| # | |
| # # Run text-to-speech in the background (stopping previous speech first) | |
| # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start() | |
| # | |
| # import os | |
| # import streamlit as st | |
| # import google.generativeai as gen_ai | |
| # import pyttsx3 | |
| # import threading | |
| # from dotenv import load_dotenv | |
| # | |
| # # Load environment variables | |
| # load_dotenv() | |
| # | |
| # # Configure Streamlit page settings | |
| # st.set_page_config( | |
| # page_title="Gemini-Pro ChatBot", | |
| # page_icon="π€", | |
| # layout="centered", | |
| # ) | |
| # | |
| # # Retrieve Google API Key | |
| # Google_API_Key = os.getenv("Google_API_Key") | |
| # | |
| # # Set up Google Gemini-Pro AI Model | |
| # gen_ai.configure(api_key=Google_API_Key) | |
| # model = gen_ai.GenerativeModel('gemini-pro') | |
| # | |
| # # Function to translate roles between Gemini-Pro and Streamlit terminology | |
| # def translate_role_for_streamlit(user_role): | |
| # return "assistant" if user_role == "model" else user_role | |
| # | |
| # # Initialize text-to-speech engine | |
| # if "tts_engine" not in st.session_state: | |
| # st.session_state.tts_engine = pyttsx3.init() | |
| # | |
| # def stop_speech(): | |
| # """Stop the current speech if running.""" | |
| # st.session_state.tts_engine.stop() | |
| # | |
| # def speak_text(text): | |
| # """Stop previous speech and start speaking new text.""" | |
| # stop_speech() # Stop any ongoing speech | |
| # st.session_state.tts_engine.say(text) | |
| # st.session_state.tts_engine.runAndWait() | |
| # | |
| # # Initialize chat session in Streamlit if not already present | |
| # if "chat_session" not in st.session_state: | |
| # st.session_state.chat_session = model.start_chat(history=[]) | |
| # | |
| # # Display chatbot title and description | |
| # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π€ Gemini-Pro ChatBot</h1>", unsafe_allow_html=True) | |
| # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True) | |
| # | |
| # # Display chat history | |
| # for message in st.session_state.chat_session.history: | |
| # with st.chat_message(translate_role_for_streamlit(message.role)): | |
| # st.markdown(message.parts[0].text) | |
| # | |
| # # User input field | |
| # user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # | |
| # # If user enters a prompt | |
| # if user_prompt: | |
| # # Display user's message | |
| # st.chat_message("user").markdown(user_prompt) | |
| # | |
| # # Show a loading indicator while waiting for a response | |
| # with st.spinner("Thinking..."): | |
| # gemini_response = st.session_state.chat_session.send_message(user_prompt) | |
| # | |
| # # Display Gemini-Pro's response | |
| # with st.chat_message("assistant"): | |
| # st.markdown(gemini_response.text) | |
| # | |
| # # Run text-to-speech in the background (stopping previous speech first) | |
| # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start() | |
| # | |
| # | |
| # | |
| # | |
| # | |
| # | |
| # import os | |
| # import streamlit as st | |
| # import google.generativeai as gen_ai | |
| # import pyttsx3 | |
| # import threading | |
| # from dotenv import load_dotenv | |
| # import speech_recognition as sr | |
| # | |
| # # Load environment variables | |
| # load_dotenv() | |
| # | |
| # # Configure Streamlit page settings | |
| # st.set_page_config( | |
| # page_title="Gemini-Pro ChatBot", | |
| # page_icon="π€", # Favicon emoji | |
| # layout="centered", # Page layout option | |
| # ) | |
| # | |
| # # Retrieve Google API Key | |
| # Google_API_Key = os.getenv("Google_API_Key") | |
| # | |
| # # Set up Google Gemini-Pro AI Model | |
| # gen_ai.configure(api_key=Google_API_Key) | |
| # model = gen_ai.GenerativeModel('gemini-pro') | |
| # | |
| # # Initialize text-to-speech engine | |
| # if "tts_engine" not in st.session_state: | |
| # st.session_state.tts_engine = pyttsx3.init() | |
| # | |
| # # Speech-to-text function | |
| # def listen_for_input(): | |
| # recognizer = sr.Recognizer() | |
| # with sr.Microphone() as source: | |
| # print("Listening...") | |
| # audio = recognizer.listen(source) | |
| # try: | |
| # return recognizer.recognize_google(audio) | |
| # except sr.UnknownValueError: | |
| # return "Sorry, I did not catch that." | |
| # except sr.RequestError: | |
| # return "Sorry, there was an error with the speech recognition service." | |
| # | |
| # # Stop previous speech and speak the new text | |
| # def speak_text(text): | |
| # stop_speech() # Stop any ongoing speech | |
| # st.session_state.tts_engine.say(text) | |
| # st.session_state.tts_engine.runAndWait() | |
| # | |
| # # Stop ongoing speech | |
| # def stop_speech(): | |
| # st.session_state.tts_engine.stop() | |
| # | |
| # # Function to translate roles between Gemini-Pro and Streamlit terminology | |
| # def translate_role_for_streamlit(user_role): | |
| # return "assistant" if user_role == "model" else user_role | |
| # | |
| # # Initialize chat session in Streamlit if not already present | |
| # if "chat_session" not in st.session_state: | |
| # st.session_state.chat_session = model.start_chat(history=[]) | |
| # | |
| # # Display chatbot title and description | |
| # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π€ Gemini-Pro ChatBot</h1>", unsafe_allow_html=True) | |
| # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True) | |
| # | |
| # # Display chat history | |
| # for message in st.session_state.chat_session.history: | |
| # with st.chat_message(translate_role_for_streamlit(message.role)): | |
| # st.markdown(message.parts[0].text) | |
| # | |
| # # User input field (with optional speech-to-text input) | |
| # user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # | |
| # if st.button("Use Voice Input"): | |
| # user_prompt = listen_for_input() | |
| # | |
| # # If user enters a prompt | |
| # if user_prompt: | |
| # # Display user's message | |
| # st.chat_message("user").markdown(user_prompt) | |
| # | |
| # # Show a loading indicator while waiting for a response | |
| # with st.spinner("Thinking..."): | |
| # gemini_response = st.session_state.chat_session.send_message(user_prompt) | |
| # | |
| # # Display Gemini-Pro's response | |
| # with st.chat_message("assistant"): | |
| # st.markdown(gemini_response.text) | |
| # | |
| # # Run text-to-speech in the background | |
| # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start() | |
| import os | |
| import streamlit as st | |
| import google.generativeai as gen_ai | |
| import pyttsx3 | |
| import threading | |
| from dotenv import load_dotenv | |
| # Load environment variables | |
| load_dotenv() | |
| # Configure Streamlit page settings | |
| st.set_page_config( | |
| page_title="Gemini-Pro ChatBot", | |
| page_icon="π€", # Favicon emoji | |
| layout="centered", # Page layout option | |
| ) | |
| # Retrieve Google API Key | |
| Google_API_Key = os.getenv("Google_API_Key") | |
| # Set up Google Gemini-Pro AI Model | |
| gen_ai.configure(api_key=Google_API_Key) | |
| model = gen_ai.GenerativeModel('gemini-pro') | |
| # Function to translate roles between Gemini-Pro and Streamlit terminology | |
| def translate_role_for_streamlit(user_role): | |
| return "assistant" if user_role == "model" else user_role | |
| # Initialize the TTS engine if not already present in session state | |
| if "tts_engine" not in st.session_state: | |
| st.session_state.tts_engine = pyttsx3.init() | |
| # Function to stop speech if any ongoing speech is happening | |
| def stop_speech(): | |
| if hasattr(st.session_state, "tts_engine"): | |
| st.session_state.tts_engine.stop() | |
| # Function to handle text-to-speech (TTS) in a separate thread | |
| def speak_text(text): | |
| try: | |
| stop_speech() # Stop any ongoing speech | |
| st.session_state.tts_engine.say(text) | |
| st.session_state.tts_engine.runAndWait() | |
| except Exception as e: | |
| st.error(f"Error in TTS: {e}") | |
| # Initialize chat session in Streamlit if not already present | |
| if "chat_session" not in st.session_state: | |
| st.session_state.chat_session = model.start_chat(history=[]) | |
| # Display chatbot title and description | |
| st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π€ Gemini-Pro ChatBot</h1>", unsafe_allow_html=True) | |
| st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True) | |
| # Display chat history | |
| for message in st.session_state.chat_session.history: | |
| with st.chat_message(translate_role_for_streamlit(message.role)): | |
| st.markdown(message.parts[0].text) | |
| # User input field | |
| user_prompt = st.chat_input("Ask Gemini Pro...") | |
| # If user enters a prompt | |
| if user_prompt: | |
| # Display user's message | |
| st.chat_message("user").markdown(user_prompt) | |
| # Show a loading indicator while waiting for a response | |
| with st.spinner("Thinking..."): | |
| gemini_response = st.session_state.chat_session.send_message(user_prompt) | |
| # Display Gemini-Pro's response | |
| with st.chat_message("assistant"): | |
| st.markdown(gemini_response.text) | |
| # Run text-to-speech in the background | |
| threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start() | |