Spaces:
Running
Running
omrihaber
commited on
Commit
·
155af2c
1
Parent(s):
1a9ccf2
wip
Browse files- app.py +114 -0
- requirements.txt +2 -0
app.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
from openai import OpenAI
|
| 6 |
+
import pandas as pd
|
| 7 |
+
|
| 8 |
+
import elemeta.nlp.runners.metafeature_extractors_runner as metafeature_extractors_runner
|
| 9 |
+
|
| 10 |
+
from elemeta.nlp.runners.metafeature_extractors_runner import MetafeatureExtractorsRunner
|
| 11 |
+
from elemeta.nlp.extractors.high_level.text_length import TextLength
|
| 12 |
+
from elemeta.nlp.extractors.high_level.text_complexity import TextComplexity
|
| 13 |
+
from elemeta.nlp.extractors.high_level.word_count import WordCount
|
| 14 |
+
from elemeta.nlp.extractors.high_level.detect_language_langdetect import DetectLanguage
|
| 15 |
+
from elemeta.nlp.extractors.high_level.sentiment_polarity import SentimentPolarity
|
| 16 |
+
from elemeta.nlp.extractors.high_level.toxicity_extractor import ToxicityExtractor
|
| 17 |
+
runner = MetafeatureExtractorsRunner(metafeature_extractors=[TextLength(),WordCount(),DetectLanguage()
|
| 18 |
+
,SentimentPolarity(),TextComplexity(),ToxicityExtractor()])
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def ask_gpt(messages,model="gpt-4"):
|
| 22 |
+
ret = client.chat.completions.create(model=model,
|
| 23 |
+
messages=messages
|
| 24 |
+
)
|
| 25 |
+
return ret.choices[0].message.content
|
| 26 |
+
|
| 27 |
+
client = OpenAI()
|
| 28 |
+
|
| 29 |
+
st.title("GPT-4 Chatbot")
|
| 30 |
+
system_prompt = st.text_input("Enter your system's prompt",value="Translate the following into russian")
|
| 31 |
+
user_prompt = st.text_input("Enter your user's prompt",value="Hello, how are you?")
|
| 32 |
+
|
| 33 |
+
messages = [
|
| 34 |
+
{"role": "system", "content": system_prompt},
|
| 35 |
+
|
| 36 |
+
{"role": "user", "content": user_prompt}
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
output = ask_gpt(messages)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
st.header("Output")
|
| 43 |
+
st.write(output)
|
| 44 |
+
|
| 45 |
+
st.header("Metafeatures")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
df = pd.DataFrame([
|
| 49 |
+
runner.run(system_prompt),
|
| 50 |
+
runner.run(user_prompt),
|
| 51 |
+
runner.run(output)])
|
| 52 |
+
df["prompt"] = ["system","user","output"]
|
| 53 |
+
df = df.set_index("prompt")
|
| 54 |
+
|
| 55 |
+
st.dataframe(df)
|
| 56 |
+
|
| 57 |
+
st.header("Chat Monitoring")
|
| 58 |
+
|
| 59 |
+
st.subheader("Chat")
|
| 60 |
+
# Initialize chat history
|
| 61 |
+
if "messages" not in st.session_state:
|
| 62 |
+
st.session_state.messages = []
|
| 63 |
+
|
| 64 |
+
# Display chat messages from history on app rerun
|
| 65 |
+
for message in st.session_state.messages:
|
| 66 |
+
with st.chat_message(message["role"]):
|
| 67 |
+
st.markdown(message["content"])
|
| 68 |
+
|
| 69 |
+
# Accept user input
|
| 70 |
+
if prompt := st.chat_input("What is up?"):
|
| 71 |
+
# Add user message to chat history
|
| 72 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 73 |
+
# Display user message in chat message container
|
| 74 |
+
with st.chat_message("user"):
|
| 75 |
+
st.markdown(prompt)
|
| 76 |
+
|
| 77 |
+
# Display assistant response in chat message container
|
| 78 |
+
with st.chat_message("assistant"):
|
| 79 |
+
message_placeholder = st.empty()
|
| 80 |
+
full_response = ""
|
| 81 |
+
assistant_response = ask_gpt(messages=st.session_state.messages)
|
| 82 |
+
# Simulate stream of response with milliseconds delay
|
| 83 |
+
for chunk in assistant_response.split():
|
| 84 |
+
full_response += chunk + " "
|
| 85 |
+
time.sleep(0.05)
|
| 86 |
+
# Add a blinking cursor to simulate typing
|
| 87 |
+
message_placeholder.markdown(full_response + "▌")
|
| 88 |
+
message_placeholder.markdown(full_response)
|
| 89 |
+
# Add assistant response to chat history
|
| 90 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
st.subheader("Chat Logs")
|
| 94 |
+
|
| 95 |
+
user_messages = [message["content"] for message in st.session_state.messages if message["role"] == "user"]
|
| 96 |
+
assistant_messages = [message["content"] for message in st.session_state.messages if message["role"] == "assistant"]
|
| 97 |
+
# st.write("User Messages",user_messages)
|
| 98 |
+
# st.write("Assistant Messages",assistant_messages)
|
| 99 |
+
|
| 100 |
+
user_df = pd.DataFrame([runner.run(user_prompt) for user_prompt in user_messages])
|
| 101 |
+
user_df["prompt"] = user_messages
|
| 102 |
+
user_df.columns = 'user_' + user_df.columns.values
|
| 103 |
+
# st.dataframe(user_df)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
assistant_df = pd.DataFrame([runner.run(assistant_prompt) for assistant_prompt in assistant_messages])
|
| 108 |
+
assistant_df["prompt"] = assistant_messages
|
| 109 |
+
assistant_df.columns = 'assistant_' + assistant_df.columns.values
|
| 110 |
+
# st.dataframe(assistant_df)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
st.subheader("Logs Metafeatures")
|
| 114 |
+
st.dataframe(pd.concat([user_df,assistant_df],axis=1))
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
elemeta
|
| 2 |
+
openai
|