Spaces:
Sleeping
Sleeping
| # import streamlit as st | |
| # from langchain_huggingface import HuggingFaceEndpoint | |
| # from langchain_core.prompts import ChatPromptTemplate | |
| # from langchain_core.output_parsers import StrOutputParser | |
| # from langchain_core.messages import HumanMessage, AIMessage | |
| # import os | |
| # # --- 1. SETUP AND CONFIGURATION --- | |
| # st.set_page_config(page_title="AI Chatbot", page_icon="🤖") | |
| # st.title("AI Chatbot") | |
| # hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| # if not hf_token: | |
| # st.error("Hugging Face API token is not set. Please add it to your Space's secrets.") | |
| # st.info("Go to 'Settings' > 'Secrets' and add a secret named 'HUGGINGFACEHUB_API_TOKEN'.") | |
| # st.stop() | |
| # # --- 2. LANGCHAIN MODEL AND CHAIN SETUP --- | |
| # try: | |
| # # THIS IS A TEST: Using a very small and fast model to check the connection. | |
| # llm = HuggingFaceEndpoint( | |
| # repo_id="distilgpt2", | |
| # huggingfacehub_api_token=hf_token, | |
| # temperature=0.7, | |
| # max_new_tokens=50 | |
| # ) | |
| # output_parser = StrOutputParser() | |
| # except Exception as e: | |
| # st.error(f"Failed to initialize the AI model. Please check your API token and model ID. Error: {e}") | |
| # st.stop() | |
| # # --- 3. SESSION STATE INITIALIZATION --- | |
| # if "messages" not in st.session_state: | |
| # st.session_state.messages = [ | |
| # AIMessage(content="Hello! I am your AI assistant. How can I help you today?") | |
| # ] | |
| # # --- 4. DISPLAY CHAT HISTORY --- | |
| # for msg in st.session_state.messages: | |
| # # Use the role from the message object for the chat message | |
| # if isinstance(msg, AIMessage): | |
| # st.chat_message("ai").write(msg.content) | |
| # elif isinstance(msg, HumanMessage): | |
| # st.chat_message("user").write(msg.content) | |
| # # --- 5. HANDLE USER INPUT AND GENERATE RESPONSE --- | |
| # if user_query := st.chat_input("Enter your message here..."): | |
| # # Add user message to history and display it | |
| # st.session_state.messages.append(HumanMessage(content=user_query)) | |
| # st.chat_message("user").write(user_query) | |
| # # Display AI response in a new chat message | |
| # with st.chat_message("ai"): | |
| # try: | |
| # # Construct the prompt with the entire conversation history | |
| # prompt = ChatPromptTemplate.from_messages([ | |
| # ("system", "You are a helpful assistant. Please respond to the user queries based on the provided chat history."), | |
| # *st.session_state.messages | |
| # ]) | |
| # chain = prompt | llm | output_parser | |
| # # --- THIS IS THE FIX: Use .stream() and st.write_stream() --- | |
| # # This streams the response word-by-word, preventing timeouts. | |
| # ai_response_stream = chain.stream({}) | |
| # full_response = st.write_stream(ai_response_stream) | |
| # # After the stream is complete, add the full response to the session state | |
| # st.session_state.messages.append(AIMessage(content=full_response)) | |
| # except Exception as e: | |
| # # Handle potential errors during the API call | |
| # error_message = f"Sorry, an error occurred: {e}. Please try again." | |
| # st.error(error_message) | |
| import streamlit as st | |
| from streamlit_chat import message | |
| from langchain_huggingface import HuggingFaceEndpoint | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langchain_core.output_parsers import StrOutputParser | |
| import os | |
| # --- 1. SETUP --- | |
| # Get the Hugging Face API token from the Space's secrets | |
| hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| # Streamlit page configuration | |
| st.set_page_config(page_title="AI Mentor") | |
| st.title("AI Mentor's") | |
| # Check for the token and stop if it's not found | |
| if not hf_token: | |
| st.error("Hugging Face API token not set. Please add it to your Space's secrets.") | |
| st.info("Go to your Space's 'Settings' > 'Secrets' and add a secret named 'HUGGINGFACEHUB_API_TOKEN'.") | |
| st.stop() | |
| # --- 2. SESSION STATE INITIALIZATION --- | |
| if 'entered_prompt' not in st.session_state: | |
| st.session_state['entered_prompt'] = "" | |
| if 'generated' not in st.session_state: | |
| st.session_state['generated'] = [] | |
| if 'past' not in st.session_state: | |
| st.session_state['past'] = [] | |
| if 'conversation_history' not in st.session_state: | |
| st.session_state['conversation_history'] = "" | |
| # --- 3. MODEL AND CHAIN SETUP --- | |
| prompt = ChatPromptTemplate.from_messages( | |
| [ | |
| ("system", "You are a helpful AI assistant named AI Mentor. Be polite and concise in your responses."), | |
| ("user", "{question}") | |
| ] | |
| ) | |
| try: | |
| llm = HuggingFaceEndpoint( | |
| repo_id="google/gemma-1.1-7b-it", | |
| huggingfacehub_api_token=hf_token, | |
| temperature=0.7, | |
| max_new_tokens=512 | |
| ) | |
| output_parser = StrOutputParser() | |
| chain = prompt | llm | output_parser | |
| except Exception as e: | |
| st.error(f"Failed to initialize the AI model. Error: {e}") | |
| st.stop() | |
| # --- 4. FUNCTIONS --- | |
| def submit(): | |
| st.session_state.entered_prompt = st.session_state.prompt_input | |
| st.session_state.prompt_input = "" | |
| def generate_response(user_query): | |
| """ | |
| Generate a response using the Hugging Face model with robust error handling. | |
| """ | |
| try: | |
| complete_prompt = st.session_state.conversation_history + f"\nUser: {user_query}\nAI:" | |
| # This is where the StopIteration error happens | |
| response = chain.invoke({"question": complete_prompt}) | |
| # Check for an empty response, which can also cause issues | |
| if not response or not response.strip(): | |
| return "I'm sorry, I received an empty response from the model. Please try again." | |
| response = response.strip() | |
| st.session_state.conversation_history += f"\nUser: {user_query}\nAI: {response}" | |
| return response | |
| except Exception as e: | |
| # Catch the StopIteration and other errors and provide a helpful message | |
| error_message = f"An API error occurred: {e}. This might be due to an invalid API token or a model cold start. Please check your token and try again in a moment." | |
| st.error(error_message) | |
| return "Sorry, I couldn't get a response. Please see the error above." | |
| # --- 5. APP LAYOUT AND LOGIC --- | |
| st.text_input('YOU: ', key='prompt_input', on_change=submit) | |
| if st.session_state.entered_prompt: | |
| user_query = st.session_state.entered_prompt | |
| st.session_state.past.append(user_query) | |
| with st.spinner("Thinking..."): | |
| output = generate_response(user_query) | |
| st.session_state.generated.append(output) | |
| # This is your original display loop, it was not removed. | |
| if st.session_state['generated']: | |
| for i in range(len(st.session_state['generated']) - 1, -1, -1): | |
| message(st.session_state["generated"][i], key=str(i)) | |
| message(st.session_state['past'][i], is_user=True, key=str(i) + '_user') | |