freddieyeo commited on
Commit
bdb858e
·
2 Parent(s): 805c6e4 9ce7596

Merge branch 'main' of https://huggingface.co/spaces/Yeocoders/Guesssing_game

Browse files
Files changed (1) hide show
  1. app.py +34 -5
app.py CHANGED
@@ -34,18 +34,41 @@ if "messages" not in st.session_state:
34
  st.session_state.messages = []
35
  st.session_state.conversation_id = None
36
  st.session_state.history = []
37
-
38
 
39
  if clear_chat:
40
- st.session_state.messages = []
41
- st.session_state.conversation_id = None
42
- st.session_state.history = []
 
43
 
44
 
45
  def random_response(
46
  message,
 
47
  ):
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  params = {
50
  'question': message[-1]['content'],
51
  # 'conversation_id': Optional[str] = None,
@@ -54,6 +77,8 @@ def random_response(
54
 
55
  question = {
56
  'message': message[-1]['content'],
 
 
57
  }
58
 
59
  request_session = requests.Session()
@@ -92,14 +117,17 @@ if len(st.session_state.messages) > 0:
92
  for response in random_response(
93
  # model=st.session_state["openai_model"],
94
  message=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages],
 
95
  # conversation_id=st.session_state.conversation_id,
96
  # user=user,
97
  # chat_type=chat_options[chat_option],
98
  # stream=True,
99
  ):
100
- if 'answer' in response:
101
  full_response += response['answer']
102
  message_placeholder.markdown(full_response + "▌")
 
 
103
  # if 'id' in response:
104
  # st.session_state.conversation_id=response["id"]
105
  # # if 'type' in response and response['type'] == 'source':
@@ -120,6 +148,7 @@ if len(st.session_state.messages) > 0:
120
  {
121
  "role": "assistant",
122
  "content": full_response,
 
123
  "prompt": prompt,
124
  "history": st.session_state.history,
125
  # "next_questions": next_questions,
 
34
  st.session_state.messages = []
35
  st.session_state.conversation_id = None
36
  st.session_state.history = []
37
+ st.session_state.answer = None
38
 
39
  if clear_chat:
40
+ st.session_state.messages = []
41
+ st.session_state.conversation_id = None
42
+ st.session_state.history = []
43
+ st.session_state.answer = None
44
 
45
 
46
  def random_response(
47
  message,
48
+ answer: str = None,
49
  ):
50
 
51
+ all_history = []
52
+
53
+ for single_message in message[:-1]:
54
+
55
+ if single_message['role'] == 'user':
56
+ all_history.append( single_message["content"])
57
+ if single_message['role'] == 'assistant':
58
+ all_history.append(single_message["content"])
59
+
60
+ count = 0
61
+ history_single = {}
62
+ history_list = []
63
+ for item in all_history:
64
+ count += 1
65
+ if count % 2 != 0:
66
+ history_single["prompt"] = item
67
+ else:
68
+ history_single["response"] = item
69
+ history_list.append(history_single)
70
+ history_single = {}
71
+
72
  params = {
73
  'question': message[-1]['content'],
74
  # 'conversation_id': Optional[str] = None,
 
77
 
78
  question = {
79
  'message': message[-1]['content'],
80
+ 'history': history_list,
81
+ 'question': question,
82
  }
83
 
84
  request_session = requests.Session()
 
117
  for response in random_response(
118
  # model=st.session_state["openai_model"],
119
  message=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages],
120
+ answer=st.session_state.answer,
121
  # conversation_id=st.session_state.conversation_id,
122
  # user=user,
123
  # chat_type=chat_options[chat_option],
124
  # stream=True,
125
  ):
126
+ if 'questionAnswer' in response:
127
  full_response += response['answer']
128
  message_placeholder.markdown(full_response + "▌")
129
+ if 'answer' in response:
130
+ answer = response['answer']
131
  # if 'id' in response:
132
  # st.session_state.conversation_id=response["id"]
133
  # # if 'type' in response and response['type'] == 'source':
 
148
  {
149
  "role": "assistant",
150
  "content": full_response,
151
+ "answer": answer,
152
  "prompt": prompt,
153
  "history": st.session_state.history,
154
  # "next_questions": next_questions,