Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""app.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1eWv35WwsifbbT9K-FV8p8S_FAEekBnLz
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import streamlit as st
|
| 11 |
+
import os
|
| 12 |
+
import pandas as pd
|
| 13 |
+
from together import Together
|
| 14 |
+
from utils.helper import *
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
st.set_page_config(layout="wide")
|
| 18 |
+
st.title("Duel Agent Simulation π¦π¦")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
with st.sidebar:
|
| 22 |
+
with st.expander("Instruction Manual"):
|
| 23 |
+
st.markdown("""
|
| 24 |
+
# π¦π¦ Duel Agent Simulation Streamlit App
|
| 25 |
+
|
| 26 |
+
## Overview
|
| 27 |
+
|
| 28 |
+
Welcome to the **Duel Agent Simulation** app! This Streamlit application allows you to chat with Meta's Llama3 model in a unique interview simulation. The app features two agents in an interview scenario, with a judge providing feedback. The best part? You simply provide a topic, and the simulation runs itself!
|
| 29 |
+
|
| 30 |
+
## Features
|
| 31 |
+
|
| 32 |
+
### π Instruction Manual
|
| 33 |
+
|
| 34 |
+
**Meta Llama3 π¦ Chatbot**
|
| 35 |
+
|
| 36 |
+
This application lets you interact with Meta's Llama3 model through a fun interview-style chat.
|
| 37 |
+
|
| 38 |
+
**How to Use:**
|
| 39 |
+
1. **Input:** Type a topic into the input box labeled "Enter a topic".
|
| 40 |
+
2. **Submit:** Press the "Submit" button to start the simulation.
|
| 41 |
+
3. **Chat History:** View the previous conversations as the simulation unfolds.
|
| 42 |
+
|
| 43 |
+
**Credits:**
|
| 44 |
+
- **Developer:** Yiqiao Yin
|
| 45 |
+
- [Site](https://www.y-yin.io/)
|
| 46 |
+
- [LinkedIn](https://www.linkedin.com/in/yiqiaoyin/)
|
| 47 |
+
- [YouTube](https://youtube.com/YiqiaoYin/)
|
| 48 |
+
""")
|
| 49 |
+
|
| 50 |
+
# Text input
|
| 51 |
+
user_topic = st.text_input("Enter a topic", "Data Science")
|
| 52 |
+
|
| 53 |
+
# Add a button to submit
|
| 54 |
+
submit_button = st.button("Run Simulation!")
|
| 55 |
+
|
| 56 |
+
# Add a button to clear the session state
|
| 57 |
+
if st.button("Clear Session"):
|
| 58 |
+
st.session_state.messages = []
|
| 59 |
+
st.experimental_rerun()
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# Initialize chat history
|
| 63 |
+
if "messages" not in st.session_state:
|
| 64 |
+
st.session_state.messages = []
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# Display chat messages from history on app rerun
|
| 68 |
+
for message in st.session_state.messages:
|
| 69 |
+
with st.chat_message(message["role"]):
|
| 70 |
+
st.markdown(message["content"])
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# Create agents
|
| 74 |
+
interviewer = call_llama
|
| 75 |
+
interviewee = call_llama
|
| 76 |
+
judge = call_llama
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# React to user input
|
| 80 |
+
iter = 0
|
| 81 |
+
list_of_iters = []
|
| 82 |
+
list_of_questions = []
|
| 83 |
+
list_of_answers = []
|
| 84 |
+
list_of_judge_comments = []
|
| 85 |
+
list_of_passes = []
|
| 86 |
+
if submit_button:
|
| 87 |
+
|
| 88 |
+
# Initiatization
|
| 89 |
+
prompt = f"Ask a question about this topic: {user_topic}"
|
| 90 |
+
|
| 91 |
+
# Display user message in chat message container
|
| 92 |
+
# Default: user=interviewee, assistant=interviewer
|
| 93 |
+
st.chat_message("user").markdown(prompt)
|
| 94 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 95 |
+
|
| 96 |
+
while True:
|
| 97 |
+
|
| 98 |
+
# Interview asks a question
|
| 99 |
+
question = interviewer(prompt)
|
| 100 |
+
|
| 101 |
+
# Display assistant response in chat message container
|
| 102 |
+
st.chat_message("assistant").markdown(question)
|
| 103 |
+
st.session_state.messages.append({"role": "assistant", "content": question})
|
| 104 |
+
|
| 105 |
+
# Interviewee attempts an answer
|
| 106 |
+
if iter < 5:
|
| 107 |
+
answer = interviewee(
|
| 108 |
+
f"""
|
| 109 |
+
Answer the question: {question} in a mediocre way
|
| 110 |
+
Because you are an inexperienced interviewee.
|
| 111 |
+
"""
|
| 112 |
+
)
|
| 113 |
+
st.chat_message("user").markdown(answer)
|
| 114 |
+
st.session_state.messages.append({"role": "user", "content": answer})
|
| 115 |
+
else:
|
| 116 |
+
answer = interviewee(
|
| 117 |
+
f"""
|
| 118 |
+
Answer the question: {question} in a mediocre way
|
| 119 |
+
Because you are an inexperienced interviewee but you really want to learn,
|
| 120 |
+
so you learn from the judge comments: {judge_comments}
|
| 121 |
+
"""
|
| 122 |
+
)
|
| 123 |
+
st.chat_message("user").markdown(answer)
|
| 124 |
+
st.session_state.messages.append({"role": "user", "content": answer})
|
| 125 |
+
|
| 126 |
+
# Judge thinks and advises but the thoughts are hidden
|
| 127 |
+
judge_comments = judge(
|
| 128 |
+
f"""
|
| 129 |
+
The question is: {question}
|
| 130 |
+
The answer is: {answer}
|
| 131 |
+
Provide feedback and rate the answer from 1 to 10 while 10 being the best and 1 is the worst.
|
| 132 |
+
"""
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# Collect all responses
|
| 137 |
+
passed_or_not = 1 if '8' in judge_comments else 0
|
| 138 |
+
list_of_iters.append(iter)
|
| 139 |
+
list_of_questions.append(question)
|
| 140 |
+
list_of_answers.append(answer)
|
| 141 |
+
list_of_judge_comments.append(judge_comments)
|
| 142 |
+
list_of_passes.append(passed_or_not)
|
| 143 |
+
results_tab = pd.DataFrame({
|
| 144 |
+
"Iter.": list_of_iters,
|
| 145 |
+
"Questions": list_of_questions,
|
| 146 |
+
"Answers": list_of_answers,
|
| 147 |
+
"Judge Comments": list_of_judge_comments,
|
| 148 |
+
"Passed": list_of_passes
|
| 149 |
+
})
|
| 150 |
+
|
| 151 |
+
with st.expander("See explanation"):
|
| 152 |
+
st.table(results_tab)
|
| 153 |
+
|
| 154 |
+
# Stopping rule
|
| 155 |
+
if '8' in judge_comments:
|
| 156 |
+
break
|
| 157 |
+
|
| 158 |
+
# Checkpoint
|
| 159 |
+
iter += 1
|