Seth0330 commited on
Commit
5b72a23
·
verified ·
1 Parent(s): 67ac695

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -17
app.py CHANGED
@@ -4,11 +4,9 @@ import json
4
  import requests
5
  import traceback
6
 
7
- # --- Page config
8
  st.set_page_config(page_title="JSON-Backed AI Chat Agent", layout="wide")
9
  st.title("JSON-Backed AI Chat Agent")
10
 
11
- # --- Load API key
12
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
13
  if not OPENAI_API_KEY:
14
  st.error("❌ OPENAI_API_KEY not set in Settings → Secrets.")
@@ -19,13 +17,11 @@ HEADERS = {
19
  "Content-Type": "application/json",
20
  }
21
 
22
- # --- Sidebar: Multiple JSON upload & preview
23
  st.sidebar.header("Upload Multiple JSON Files")
24
  uploaded_files = st.sidebar.file_uploader(
25
  "Choose one or more JSON files", type="json", accept_multiple_files=True
26
  )
27
 
28
- # --- Session State for data and chat
29
  if "json_data" not in st.session_state:
30
  st.session_state.json_data = {}
31
  if "messages" not in st.session_state:
@@ -33,7 +29,6 @@ if "messages" not in st.session_state:
33
  if "temp_input" not in st.session_state:
34
  st.session_state.temp_input = ""
35
 
36
- # --- Load all JSON files
37
  if uploaded_files:
38
  st.session_state.json_data.clear()
39
  file_summaries = []
@@ -53,7 +48,6 @@ if uploaded_files:
53
  except Exception as e:
54
  st.sidebar.error(f"Error reading {f.name}: {e}")
55
 
56
- # Compose system prompt for the LLM
57
  system_message = {
58
  "role": "system",
59
  "content": (
@@ -69,7 +63,6 @@ if uploaded_files:
69
  else:
70
  st.session_state.json_data.clear()
71
 
72
- # --- Functions for querying JSON files
73
  def search_json(file_name, key, value):
74
  try:
75
  data = st.session_state.json_data[file_name]
@@ -110,7 +103,6 @@ def count_key_occurrences(file_name, key):
110
  except Exception as e:
111
  return {"error": str(e)}
112
 
113
- # --- Function schemas for OpenAI
114
  function_schema = [
115
  {
116
  "name": "search_json",
@@ -150,13 +142,16 @@ function_schema = [
150
  },
151
  ]
152
 
153
- # --- Chat interface
154
  st.markdown("### Conversation")
155
- for i, msg in enumerate(st.session_state.messages[1:]): # Skip system message for display
156
  if msg["role"] == "user":
157
  st.markdown(f"<div style='color: #4F8BF9;'><b>User:</b> {msg['content']}</div>", unsafe_allow_html=True)
158
  elif msg["role"] == "assistant":
159
- st.markdown(f"<div style='color: #1C6E4C;'><b>Agent:</b> {msg['content']}</div>", unsafe_allow_html=True)
 
 
 
 
160
  elif msg["role"] == "function":
161
  try:
162
  result = json.loads(msg["content"])
@@ -164,7 +159,6 @@ for i, msg in enumerate(st.session_state.messages[1:]): # Skip system message f
164
  except Exception:
165
  st.markdown(f"<b>Function '{msg['name']}' output:</b> {msg['content']}", unsafe_allow_html=True)
166
 
167
- # --- Sending user input and OpenAI call logic using a callback
168
  def send_message():
169
  user_input = st.session_state.temp_input
170
  if user_input and user_input.strip():
@@ -175,12 +169,11 @@ def send_message():
175
  else:
176
  chat_messages = chat_messages.copy()
177
  try:
178
- # OpenAI call
179
  chat_resp = requests.post(
180
  "https://api.openai.com/v1/chat/completions",
181
  headers=HEADERS,
182
  json={
183
- "model": "gpt-4.1",
184
  "messages": chat_messages,
185
  "functions": function_schema,
186
  "function_call": "auto",
@@ -193,7 +186,6 @@ def send_message():
193
  response_json = chat_resp.json()
194
  msg = response_json["choices"][0]["message"]
195
 
196
- # If OpenAI requests a function call
197
  if msg.get("function_call"):
198
  func_name = msg["function_call"]["name"]
199
  args_json = msg["function_call"]["arguments"]
@@ -215,7 +207,6 @@ def send_message():
215
  "name": func_name,
216
  "content": json.dumps(result),
217
  })
218
- # Second call to OpenAI for the final answer
219
  followup_messages = st.session_state.messages
220
  if len(followup_messages) > 12:
221
  followup_messages = [followup_messages[0]] + followup_messages[-11:]
@@ -225,7 +216,7 @@ def send_message():
225
  "https://api.openai.com/v1/chat/completions",
226
  headers=HEADERS,
227
  json={
228
- "model": "gpt-4.1",
229
  "messages": followup_messages,
230
  "temperature": 0,
231
  "max_tokens": 1500,
 
4
  import requests
5
  import traceback
6
 
 
7
  st.set_page_config(page_title="JSON-Backed AI Chat Agent", layout="wide")
8
  st.title("JSON-Backed AI Chat Agent")
9
 
 
10
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
  if not OPENAI_API_KEY:
12
  st.error("❌ OPENAI_API_KEY not set in Settings → Secrets.")
 
17
  "Content-Type": "application/json",
18
  }
19
 
 
20
  st.sidebar.header("Upload Multiple JSON Files")
21
  uploaded_files = st.sidebar.file_uploader(
22
  "Choose one or more JSON files", type="json", accept_multiple_files=True
23
  )
24
 
 
25
  if "json_data" not in st.session_state:
26
  st.session_state.json_data = {}
27
  if "messages" not in st.session_state:
 
29
  if "temp_input" not in st.session_state:
30
  st.session_state.temp_input = ""
31
 
 
32
  if uploaded_files:
33
  st.session_state.json_data.clear()
34
  file_summaries = []
 
48
  except Exception as e:
49
  st.sidebar.error(f"Error reading {f.name}: {e}")
50
 
 
51
  system_message = {
52
  "role": "system",
53
  "content": (
 
63
  else:
64
  st.session_state.json_data.clear()
65
 
 
66
  def search_json(file_name, key, value):
67
  try:
68
  data = st.session_state.json_data[file_name]
 
103
  except Exception as e:
104
  return {"error": str(e)}
105
 
 
106
  function_schema = [
107
  {
108
  "name": "search_json",
 
142
  },
143
  ]
144
 
 
145
  st.markdown("### Conversation")
146
+ for i, msg in enumerate(st.session_state.messages[1:]):
147
  if msg["role"] == "user":
148
  st.markdown(f"<div style='color: #4F8BF9;'><b>User:</b> {msg['content']}</div>", unsafe_allow_html=True)
149
  elif msg["role"] == "assistant":
150
+ content = msg.get("content", "")
151
+ if content.strip():
152
+ st.markdown(f"<div style='color: #1C6E4C;'><b>Agent:</b> {content}</div>", unsafe_allow_html=True)
153
+ else:
154
+ st.markdown(f"<div style='color: #DC143C;'><b>Agent:</b> [No response generated]</div>", unsafe_allow_html=True)
155
  elif msg["role"] == "function":
156
  try:
157
  result = json.loads(msg["content"])
 
159
  except Exception:
160
  st.markdown(f"<b>Function '{msg['name']}' output:</b> {msg['content']}", unsafe_allow_html=True)
161
 
 
162
  def send_message():
163
  user_input = st.session_state.temp_input
164
  if user_input and user_input.strip():
 
169
  else:
170
  chat_messages = chat_messages.copy()
171
  try:
 
172
  chat_resp = requests.post(
173
  "https://api.openai.com/v1/chat/completions",
174
  headers=HEADERS,
175
  json={
176
+ "model": "gpt-4o",
177
  "messages": chat_messages,
178
  "functions": function_schema,
179
  "function_call": "auto",
 
186
  response_json = chat_resp.json()
187
  msg = response_json["choices"][0]["message"]
188
 
 
189
  if msg.get("function_call"):
190
  func_name = msg["function_call"]["name"]
191
  args_json = msg["function_call"]["arguments"]
 
207
  "name": func_name,
208
  "content": json.dumps(result),
209
  })
 
210
  followup_messages = st.session_state.messages
211
  if len(followup_messages) > 12:
212
  followup_messages = [followup_messages[0]] + followup_messages[-11:]
 
216
  "https://api.openai.com/v1/chat/completions",
217
  headers=HEADERS,
218
  json={
219
+ "model": "gpt-4o",
220
  "messages": followup_messages,
221
  "temperature": 0,
222
  "max_tokens": 1500,