Spaces:
Sleeping
Sleeping
| # LC_Streaming.py | |
| from langchain.callbacks.base import BaseCallbackHandler | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.schema import HumanMessage, SystemMessage | |
| import streamlit as st | |
| import os | |
| # from langchain.prompts.chat import ( | |
| # ChatPromptTemplate, | |
| # SystemMessagePromptTemplate, | |
| # HumanMessagePromptTemplate, | |
| # ) | |
| class StreamHandler(BaseCallbackHandler): | |
| def __init__(self, container, initial_text="", display_method='markdown'): | |
| self.container = container | |
| self.text = initial_text | |
| self.display_method = display_method | |
| def on_llm_new_token(self, token: str, **kwargs) -> None: | |
| self.text += token # + "/" | |
| display_function = getattr(self.container, self.display_method, None) | |
| if display_function is not None: | |
| display_function(self.text) | |
| else: | |
| raise ValueError(f"Invalid display_method: {self.display_method}") | |
| def check_password(): | |
| """Returns `True` if the user had the correct password.""" | |
| def password_entered(): | |
| """Checks whether a password entered by the user is correct.""" | |
| if st.session_state["password"] == os.environ["USER_PWORD"]: | |
| st.session_state["password_correct"] = True | |
| del st.session_state["password"] # don't store password | |
| else: | |
| st.session_state["password_correct"] = False | |
| if "password_correct" not in st.session_state: | |
| # First run, show input for password. | |
| st.text_input( | |
| "Password", type="password", on_change=password_entered, key="password" | |
| ) | |
| return False | |
| elif not st.session_state["password_correct"]: | |
| # Password not correct, show input + error. | |
| st.text_input( | |
| "Password", type="password", on_change=password_entered, key="password" | |
| ) | |
| st.error("๐ Password incorrect") | |
| return False | |
| else: | |
| return True | |
| if check_password(): | |
| st.markdown("Get instant feedback on your research question") | |
| query = st.text_input("Input your research question", value="How do biases in AI student evaluations compare to documented biases in human evaluations?") | |
| ask_button = st.button("ask") | |
| st.markdown("### GPT-3.5 response") | |
| chat_box = st.empty() | |
| stream_handler = StreamHandler(chat_box, display_method='write') | |
| chat = ChatOpenAI(streaming=True, callbacks=[stream_handler]) | |
| query_combined = "Please provide constructive feedback in English on this proposed research question, suggesting how it might be improved: <research_question>" + query + "</research_question>." | |
| #st.markdown("### together box") | |
| messages = [ | |
| SystemMessage( | |
| content="You are a helpful research assistant that provides feedback to university students and " \ | |
| "researchers on their ideas for a research question, in particular for Masters students planning to write a Master's Thesis. " \ | |
| "A good research question should be narrow enough that it can be well-addressed in a 20-30 page " \ | |
| "literature review, and provides guidance to focus the literature review, and points towards specific areas " \ | |
| "of scientific literature that should be included. A good research question also provides guidance on the " \ | |
| "methodological (empirical) approach needed to answer the question. " | |
| ), | |
| HumanMessage( | |
| content=query_combined | |
| ), | |
| ] | |
| if query and ask_button: | |
| response = chat(messages) # [HumanMessage(content=query)]) | |