pratikshahp commited on
Commit
b5a980a
·
verified ·
1 Parent(s): 97f28fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -11
app.py CHANGED
@@ -156,6 +156,8 @@ def main_loop1(message, history):
156
  return run_action(message, history)
157
 
158
 
 
 
159
  # Function to process user input and actions
160
  def run_action(message, history):
161
  global game_state, game_running # Access the global game state and game status
@@ -166,24 +168,28 @@ def run_action(message, history):
166
  # Handle exit and restart logic
167
  if not game_running:
168
  response = "The game has ended. Type 'restart the game' to play again."
169
- history.append((message, response))
 
170
  return history
171
 
172
  if message.lower() == "start game":
173
  response = game_state["start"]
174
- history.append((message, response))
 
175
  return history
176
 
177
  if message.lower() == "restart the game":
178
  game_state = initialize_game_state()
179
  response = "Game restarted! " + game_state["start"]
180
- history.append((message, response))
 
181
  return history
182
 
183
  if message.lower() == "exit":
184
  game_running = False
185
  response = "The game has ended. Type 'restart the game' to play again."
186
- history.append((message, response))
 
187
  return history
188
 
189
  # Prepare the system prompt for the AI model
@@ -201,12 +207,13 @@ def run_action(message, history):
201
  Town: {game_state['town']}
202
  Your Character: {game_state['character']}"""
203
 
204
- # Convert history to messages for the AI model
205
  messages = [
206
  {"role": "system", "content": system_prompt},
207
  {"role": "user", "content": world_info},
208
  ]
209
 
 
210
  for user_input, assistant_response in history:
211
  messages.append({"role": "user", "content": user_input})
212
  messages.append({"role": "assistant", "content": assistant_response})
@@ -214,7 +221,7 @@ def run_action(message, history):
214
  # Add the user's current action
215
  messages.append({"role": "user", "content": message})
216
 
217
- # Get the model's response
218
  model_output = client.chat.completions.create(
219
  model="meta-llama/Llama-3-70b-chat-hf",
220
  messages=messages,
@@ -222,16 +229,45 @@ def run_action(message, history):
222
 
223
  response = model_output.choices[0].message.content
224
 
225
- # Append the user's input and assistant's response to history
226
- history.append((message, response))
 
227
 
228
  return history
229
 
230
- # Main loop to process the conversation
 
231
  def main_loop(message, history):
232
- history = run_action(message, history)
233
- return history, history
 
 
 
 
 
 
234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
 
237
 
 
156
  return run_action(message, history)
157
 
158
 
159
+ # Function to process user input and actions
160
+
161
  # Function to process user input and actions
162
  def run_action(message, history):
163
  global game_state, game_running # Access the global game state and game status
 
168
  # Handle exit and restart logic
169
  if not game_running:
170
  response = "The game has ended. Type 'restart the game' to play again."
171
+ history.append({"role": "user", "content": message})
172
+ history.append({"role": "assistant", "content": response})
173
  return history
174
 
175
  if message.lower() == "start game":
176
  response = game_state["start"]
177
+ history.append({"role": "user", "content": message})
178
+ history.append({"role": "assistant", "content": response})
179
  return history
180
 
181
  if message.lower() == "restart the game":
182
  game_state = initialize_game_state()
183
  response = "Game restarted! " + game_state["start"]
184
+ history.append({"role": "user", "content": message})
185
+ history.append({"role": "assistant", "content": response})
186
  return history
187
 
188
  if message.lower() == "exit":
189
  game_running = False
190
  response = "The game has ended. Type 'restart the game' to play again."
191
+ history.append({"role": "user", "content": message})
192
+ history.append({"role": "assistant", "content": response})
193
  return history
194
 
195
  # Prepare the system prompt for the AI model
 
207
  Town: {game_state['town']}
208
  Your Character: {game_state['character']}"""
209
 
210
+ # Prepare the messages for the AI model
211
  messages = [
212
  {"role": "system", "content": system_prompt},
213
  {"role": "user", "content": world_info},
214
  ]
215
 
216
+ # Convert Gradio's history format (list of tuples) to the required AI model format
217
  for user_input, assistant_response in history:
218
  messages.append({"role": "user", "content": user_input})
219
  messages.append({"role": "assistant", "content": assistant_response})
 
221
  # Add the user's current action
222
  messages.append({"role": "user", "content": message})
223
 
224
+ # Call the AI model to get the response
225
  model_output = client.chat.completions.create(
226
  model="meta-llama/Llama-3-70b-chat-hf",
227
  messages=messages,
 
229
 
230
  response = model_output.choices[0].message.content
231
 
232
+ # Append the user input and the AI response to history
233
+ history.append({"role": "user", "content": message})
234
+ history.append({"role": "assistant", "content": response})
235
 
236
  return history
237
 
238
+
239
+ # Main loop to process user input and maintain history
240
  def main_loop(message, history):
241
+ history = run_action(message, history or [])
242
+ display_history = [
243
+ (item["content"], history[i + 1]["content"])
244
+ for i, item in enumerate(history[:-1])
245
+ if item["role"] == "user"
246
+ ]
247
+ return display_history, history
248
+
249
 
250
+ # Gradio ChatInterface
251
+ demo = gr.ChatInterface(
252
+ main_loop,
253
+ chatbot=gr.Chatbot(
254
+ height=300,
255
+ placeholder="Type 'start game' to begin, 'restart the game' to restart, or 'exit' to end the game.",
256
+ type="messages", # Ensures proper rendering
257
+ ),
258
+ textbox=gr.Textbox(
259
+ placeholder="What do you do next?",
260
+ container=False,
261
+ scale=7,
262
+ ),
263
+ title="AI RPG",
264
+ theme="Monochrome",
265
+ examples=["Look around", "Continue the story"],
266
+ cache_examples=False,
267
+ )
268
+
269
+ # Launch the Gradio app
270
+ demo.launch(share=True, server_name="0.0.0.0")
271
 
272
 
273