DovieUU commited on
Commit
e04a51e
·
unverified ·
1 Parent(s): 6aeb4f0

fix responses api issue

Browse files
README.md CHANGED
@@ -15,10 +15,51 @@ license: gpl-3.0
15
 
16
  An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
17
 
18
- # development
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  ```bash
20
- cd ~/repos/huggingface
21
- code .
22
- source .env
23
- hf upload bandh-webops/demo ./ --exclude '.*'
 
24
  ```
 
15
 
16
  An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
17
 
18
+ ## Run locally
19
+
20
+ Environment variables:
21
+
22
+ - `OPENAI_API_KEY` (required for comparisons)
23
+ - `LOGIN_PASSWORD` (required to access the UI; username is `bnh`)
24
+ - `OVERRIDE_HEADER` (optional header for B&H API calls)
25
+ - `LOG_LEVEL` (optional; default `INFO`, set to `DEBUG` for verbose logs)
26
+
27
+ Commands:
28
+
29
+ ```bash
30
+ cd ~/repos/demo
31
+ python -m venv .venv
32
+ source .venv/bin/activate
33
+ pip install -r requirements.txt
34
+
35
+ # Set your environment variables
36
+ export OPENAI_API_KEY=sk-...
37
+ export LOGIN_PASSWORD=your-password
38
+ export LOG_LEVEL=DEBUG
39
+
40
+ # Run the app
41
+ python app.py
42
+ ```
43
+
44
+ The app will start at `http://127.0.0.1:7860`. Log in with username `bnh` and your `LOGIN_PASSWORD`.
45
+
46
+ ## Troubleshooting
47
+
48
+ - Full tracebacks are surfaced in the UI if an error occurs during comparison.
49
+ - The console also shows rich tracebacks and structured logs. Increase verbosity with `LOG_LEVEL=DEBUG`.
50
+ - Common issues:
51
+ - Missing `OPENAI_API_KEY` → the app will raise a clear error indicating it is not set.
52
+ - Invalid SKU input → the app requires numeric SKUs and will show a validation error.
53
+ - Network issues to B&H or OpenAI → retries are limited; check connectivity and try again.
54
+
55
+ ## Dev helpers
56
+
57
+ Quick smoke tests without launching the server:
58
+
59
  ```bash
60
+ # Validate OpenAI missing-key error path
61
+ python scripts/test_openai_error.py
62
+
63
+ # Show UI-layer traceback formatting using an invalid SKU
64
+ python scripts/smoke_test.py
65
  ```
app.py CHANGED
@@ -1,14 +1,28 @@
1
  import os
2
  import json
3
  import http.client
 
4
  import traceback
5
- import re
6
  import gradio as gr
7
 
8
  from dotenv import load_dotenv
9
  from openai import OpenAI
10
- from pydantic import BaseModel, Field
 
11
  from typing import List, Literal, Dict, Any
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
 
14
  # Define the Pydantic model for camera specifications with free-form specs
@@ -25,7 +39,7 @@ class SimpleComparisonSummary(BaseModel):
25
  # Initialize the OpenAI client
26
  # Make sure you have the OPENAI_API_KEY environment variable set or pass it directly
27
  load_dotenv()
28
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
29
  override_header = os.getenv("OVERRIDE_HEADER")
30
  login_password = os.getenv("LOGIN_PASSWORD")
31
 
@@ -47,9 +61,9 @@ def compare_cameras(model_name: str, prompt: str, camera1_specs: CameraSpecs, ca
47
 
48
  # Map verbosity and reasoning level to Responses API parameters
49
  verbosity_map = {
50
- "Low": (600, "Keep the response concise and focused on essentials."),
51
- "Medium": (1200, "Provide a balanced level of detail."),
52
- "High": (2000, "Be thorough and include more detail and nuance."),
53
  }
54
  max_tokens, verbosity_hint = verbosity_map.get(verbosity, (600, "Keep the response concise."))
55
 
@@ -57,101 +71,182 @@ def compare_cameras(model_name: str, prompt: str, camera1_specs: CameraSpecs, ca
57
  reasoning_effort = reasoning_map.get(reasoning_level, "low")
58
 
59
  try:
60
- response = client.responses.create(
61
- model=model_name,
62
- input=[
63
- {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant that compares camera specifications and outputs JSON. " + verbosity_hint}]},
64
- {"role": "user", "content": [{"type": "text", "text": prompt}]},
65
- ],
66
- response_format={
67
- "type": "json_schema",
68
- "json_schema": {
69
- "name": "SimpleComparisonSummary",
70
- "schema": SimpleComparisonSummary.model_json_schema(),
71
- "strict": True,
72
- },
73
- },
74
- max_output_tokens=max_tokens,
75
- reasoning={"effort": reasoning_effort},
76
- )
77
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  try:
79
- json_text = response.output_text # type: ignore
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  except Exception:
81
- # Fallback to explicit path
82
- json_text = response.output[0].content[0].text # type: ignore
83
-
84
- summary_data = json.loads(json_text)
85
- return SimpleComparisonSummary(**summary_data)
86
- except Exception as e:
87
- # Let the GUI render the detailed error, but keep a concise log here
88
- print(f"OpenAI comparison error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
89
  raise
90
 
91
- def _normalize_sku(raw: str) -> str:
92
- """Extract only digits from a SKU like '1394217-REG' -> '1394217'."""
93
- sku = re.sub(r"\D", "", str(raw or "").strip())
94
- if not sku:
95
- raise ValueError("SKU must contain digits; e.g., '1394217' or '1394217-REG'.")
96
- return sku
97
-
98
-
99
- def load_specs_data(sku_no):
100
- conn = http.client.HTTPSConnection("www.bhphotovideo.com")
101
 
102
- # Many users paste SKUs like '1394217-REG'. Keep only digits and send numeric skuNo.
103
- normalized = _normalize_sku(sku_no)
104
- payload_obj = {
105
- "params": {
106
- "itemList": [{"skuNo": int(normalized), "itemSource": "REG"}],
107
- "channels": ["specifications", "priceInfo"],
108
- }
109
- }
110
- payload = json.dumps(payload_obj)
111
 
112
- headers = {
113
- 'accept': "application/json, text/plain, */*",
114
- 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
115
- 'Content-Type': "application/json",
116
- 'x-waf-known': override_header
117
- }
118
 
119
- conn.request("POST", "/api/item/p/product-details?from=cli&aperture=1&cliReqId=d8b489d2-125a-46e0-94c9-0d731d9601fd-cli-11", payload, headers)
120
- res = conn.getresponse()
121
- data = res.read()
122
- if res.status != 200:
123
- # Surface HTTP errors to the UI with a helpful snippet of the body
124
- try:
125
- snippet = data.decode("utf-8", "ignore")[:500]
126
- except Exception:
127
- snippet = "<binary body>"
128
- raise RuntimeError(f"B&H API request failed: HTTP {res.status} {res.reason}. Body: {snippet}")
129
- return data
130
-
131
- def extract_group_details(data):
132
- data_dict = json.loads(data)
133
-
134
- groups_data = data_dict.get('data', [{}])[0].get('specifications', {}).get('items', [{}])[0].get('groups', [])
135
- short_description = data_dict.get('data', [{}])[0].get('specifications', {}).get('items', [{}])[0].get('item', {}).get('core', {}).get('shortDescription')
136
- details_url = data_dict.get('data', [{}])[0].get('specifications', {}).get('items', [{}])[0].get('item', {}).get('core', {}).get('detailsUrl')
137
- main_image_url = data_dict.get('data', [{}])[0].get('specifications', {}).get('items', [{}])[0].get('item', {}).get('mainImage', {}).get('listing', {}).get('url')
138
- price = data_dict.get('data', [{}])[0].get('priceInfo', {}).get('items', [{}])[0].get('price', {}).get('price')
139
-
140
- groups_data.append({"price" : price});
141
-
142
-
143
- # Create a dictionary with the extracted data
144
- extracted_data = {
145
- "model": short_description,
146
- "price": price,
147
- "details_url": details_url,
148
- "main_image_url": main_image_url,
149
- "details_url": details_url,
150
- "main_image_url": main_image_url,
151
- "specs": groups_data
152
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
- return extracted_data
155
 
156
 
157
  model_list = ["gpt-5-nano", "gpt-5-mini", "gpt-5"]
@@ -173,9 +268,9 @@ Maintain the prescribed section order: Which Is Right for You, Main Differences,
173
  def compare_cameras_gui(model_name, verbosity, reasoning_level, prompt, sku1, sku2):
174
  """
175
  Gradio interface function to compare cameras based on SKU numbers and model name.
176
- Shows detailed error output (message + traceback) in the Markdown output on failure.
177
  """
178
  try:
 
179
  data_a = load_specs_data(sku1)
180
  data_b = load_specs_data(sku2)
181
 
@@ -187,34 +282,20 @@ def compare_cameras_gui(model_name, verbosity, reasoning_level, prompt, sku1, sk
187
 
188
  summary = compare_cameras(model_name, prompt, camera_a, camera_b, verbosity=verbosity, reasoning_level=reasoning_level)
189
 
190
- # If we got here, summary should be a model object
191
  output_text = f"""
192
  ## Product Comparison Summary
193
  {summary.summary}
194
  """
195
  return output_text
196
-
197
- except Exception as e:
198
- # Return rich debugging information directly to the Markdown output
199
- tb = traceback.format_exc()
200
- try:
201
- s1 = _normalize_sku(sku1)
202
- except Exception:
203
- s1 = str(sku1)
204
- try:
205
- s2 = _normalize_sku(sku2)
206
- except Exception:
207
- s2 = str(sku2)
208
- helpful_hint = (
209
- "Tip: Enter SKUs with or without '-REG' (e.g., '1394217-REG' or '1394217'). "
210
- "We automatically normalize to digits: "
211
- f"'{s1}' and '{s2}'."
212
- )
213
  return (
214
- f"### Error\n"
215
- f"- Message: {e}\n\n"
216
- f"{helpful_hint}\n\n"
217
- f"<details><summary>Traceback</summary>\n\n````text\n{tb}\n````\n\n</details>"
218
  )
219
 
220
 
@@ -270,5 +351,4 @@ if __name__ == "__main__":
270
  # Launch the interface
271
  # Disable Gradio's experimental SSR to avoid svelte-i18n initial locale errors in server-side rendering
272
  # Also hide the "Use via API" link explicitly
273
- # show_error=True will also display a toast with the message, in addition to our Markdown output.
274
- demo.launch(auth=authenticate, share=False, ssr_mode=False, show_api=False, show_error=True)
 
1
  import os
2
  import json
3
  import http.client
4
+ import logging
5
  import traceback
 
6
  import gradio as gr
7
 
8
  from dotenv import load_dotenv
9
  from openai import OpenAI
10
+ import openai
11
+ from pydantic import BaseModel, Field, ValidationError
12
  from typing import List, Literal, Dict, Any
13
+ from rich.traceback import install as install_rich_traceback
14
+
15
+
16
+ # Enable rich tracebacks in the console for better debugging
17
+ install_rich_traceback(show_locals=True)
18
+
19
+ # Basic logging configuration
20
+ LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
21
+ logging.basicConfig(
22
+ level=getattr(logging, LOG_LEVEL, logging.INFO),
23
+ format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
24
+ )
25
+ logger = logging.getLogger(__name__)
26
 
27
 
28
  # Define the Pydantic model for camera specifications with free-form specs
 
39
  # Initialize the OpenAI client
40
  # Make sure you have the OPENAI_API_KEY environment variable set or pass it directly
41
  load_dotenv()
42
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"), timeout=60.0, max_retries=2)
43
  override_header = os.getenv("OVERRIDE_HEADER")
44
  login_password = os.getenv("LOGIN_PASSWORD")
45
 
 
61
 
62
  # Map verbosity and reasoning level to Responses API parameters
63
  verbosity_map = {
64
+ "Low": (1200, "Keep the response concise and focused on essentials."),
65
+ "Medium": (2400, "Provide a balanced level of detail."),
66
+ "High": (3600, "Be thorough and include more detail and nuance."),
67
  }
68
  max_tokens, verbosity_hint = verbosity_map.get(verbosity, (600, "Keep the response concise."))
69
 
 
71
  reasoning_effort = reasoning_map.get(reasoning_level, "low")
72
 
73
  try:
74
+ if not os.getenv("OPENAI_API_KEY"):
75
+ raise RuntimeError("OPENAI_API_KEY is not set. Please set it in your environment or .env file.")
76
+ logger.debug("Preparing request to OpenAI: model=%s, verbosity=%s, reasoning=%s", model_name, verbosity, reasoning_level)
77
+
78
+ # Responses API - use parse helper with Pydantic response_format (modern SDKs)
79
+ def supports_reasoning_param(model: str) -> bool:
80
+ name = (model or "").lower()
81
+ # Heuristic: known reasoning families and names often disallow temperature but allow `reasoning`
82
+ return any(key in name for key in ["o3", "o4", "reasoning", "gpt-5"]) # keep heuristic broad
83
+
84
+ def supports_temperature_param(model: str) -> bool:
85
+ name = (model or "").lower()
86
+ # Heuristic: avoid temperature for explicit reasoning models
87
+ if supports_reasoning_param(name):
88
+ return False
89
+ # Otherwise allow temperature by default
90
+ return True
91
+
92
+ def build_params(max_toks: int) -> Dict[str, Any]:
93
+ params: Dict[str, Any] = {
94
+ "model": model_name,
95
+ "instructions": "You are a helpful assistant that compares camera specifications and outputs JSON. " + verbosity_hint,
96
+ "input": [
97
+ {"role": "user", "content": [{"type": "input_text", "text": prompt}]},
98
+ ],
99
+ "text_format": SimpleComparisonSummary,
100
+ "max_output_tokens": max_toks,
101
+ }
102
+ if supports_reasoning_param(model_name):
103
+ params["reasoning"] = {"effort": reasoning_effort}
104
+ if supports_temperature_param(model_name):
105
+ params["temperature"] = 0.0
106
+ return params
107
+
108
+ def do_parse(max_toks: int):
109
+ params = build_params(max_toks)
110
+ try:
111
+ return client.responses.parse(**params)
112
+ except openai.APIStatusError as e:
113
+ # Retry automatically if server reports unsupported parameters
114
+ msg = (getattr(e, "response", None) and getattr(e.response, "json", lambda: {})()) or {}
115
+ err_msg = None
116
+ try:
117
+ err_msg = (msg or {}).get("error", {}).get("message")
118
+ except Exception:
119
+ err_msg = None
120
+
121
+ # Fallback: inspect exception text
122
+ err_text = str(e)
123
+ def mentions(word: str) -> bool:
124
+ return (err_msg and word in err_msg) or (word in err_text)
125
+
126
+ mutated = False
127
+ if "temperature" in params and (mentions("temperature") and mentions("Unsupported parameter")):
128
+ logger.info("Model %s does not support temperature; retrying without it", model_name)
129
+ params.pop("temperature", None)
130
+ mutated = True
131
+ if "reasoning" in params and (mentions("reasoning") and mentions("Unsupported parameter")):
132
+ logger.info("Model %s does not support reasoning param; retrying without it", model_name)
133
+ params.pop("reasoning", None)
134
+ mutated = True
135
+
136
+ if mutated:
137
+ return client.responses.parse(**params)
138
+ raise
139
+
140
+ # First attempt
141
  try:
142
+ response = do_parse(max_tokens)
143
+ except ValidationError:
144
+ # Likely truncated/invalid JSON; retry once with a higher token cap
145
+ logger.warning("ValidationError parsing JSON; retrying with higher max_output_tokens")
146
+ response = do_parse(max(4096, max_tokens * 2))
147
+
148
+ logger.debug("Received response from OpenAI (Responses API parse)")
149
+ # Try to extract parsed object from any output content part
150
+ parsed_obj = None
151
+ try:
152
+ outputs = getattr(response, "output", [])
153
+ for item in outputs or []:
154
+ content = getattr(item, "content", None)
155
+ if not content:
156
+ continue
157
+ for part in content:
158
+ p = getattr(part, "parsed", None)
159
+ if p is not None:
160
+ parsed_obj = p
161
+ break
162
+ if parsed_obj is not None:
163
+ break
164
  except Exception:
165
+ logger.debug("Direct parsed extraction failed; will try alternative access paths.")
166
+
167
+ if isinstance(parsed_obj, SimpleComparisonSummary):
168
+ return parsed_obj
169
+ if isinstance(parsed_obj, dict):
170
+ return SimpleComparisonSummary(**parsed_obj)
171
+
172
+ # Fallback: parse from output_text if available
173
+ output_text = getattr(response, "output_text", None)
174
+ if isinstance(output_text, str) and output_text.strip():
175
+ try:
176
+ summary_data = json.loads(output_text)
177
+ return SimpleComparisonSummary(**summary_data)
178
+ except Exception:
179
+ logger.exception("Failed to parse output_text as JSON for SimpleComparisonSummary")
180
+
181
+ raise RuntimeError("Failed to extract parsed JSON from OpenAI response.")
182
+ except Exception:
183
+ logger.exception("Error while comparing cameras via OpenAI API")
184
+ # Re-raise so the UI layer can display full traceback
185
  raise
186
 
187
+ def load_specs_data(sku_no: str) -> bytes:
188
+ if not sku_no or not str(sku_no).strip().isdigit():
189
+ raise ValueError(f"Invalid SKU '{sku_no}'. Please enter a numeric SKU.")
 
 
 
 
 
 
 
190
 
191
+ conn = http.client.HTTPSConnection("www.bhphotovideo.com", timeout=15)
 
 
 
 
 
 
 
 
192
 
193
+ payload = (
194
+ "{\"params\":{\"itemList\":[{\"skuNo\":" + str(sku_no) + ",\"itemSource\":\"REG\"}],\"channels\":[\"specifications\", \"priceInfo\"]}}"
195
+ )
 
 
 
196
 
197
+ headers = {
198
+ 'accept': "application/json, text/plain, */*",
199
+ 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
200
+ 'Content-Type': "application/json",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
+ # Only include this optional header if provided
203
+ if override_header:
204
+ headers['x-waf-known'] = override_header
205
+
206
+ try:
207
+ conn.request(
208
+ "POST",
209
+ "/api/item/p/product-details?from=cli&aperture=1&cliReqId=d8b489d2-125a-46e0-94c9-0d731d9601fd-cli-11",
210
+ payload,
211
+ headers,
212
+ )
213
+ res = conn.getresponse()
214
+ status = res.status
215
+ data = res.read()
216
+ logger.debug("B&H response status=%s, %s bytes", status, len(data) if data else 0)
217
+ if status < 200 or status >= 300:
218
+ preview = (data[:500] if data else b"").decode("utf-8", errors="ignore")
219
+ raise RuntimeError(f"B&H API returned HTTP {status}. Body preview: {preview}")
220
+ return data
221
+ except Exception:
222
+ logger.exception("Failed to load specs for SKU %s", sku_no)
223
+ raise
224
+
225
+ def extract_group_details(data: bytes):
226
+ try:
227
+ data_dict = json.loads(data)
228
+ except Exception:
229
+ logger.exception("Failed to parse JSON from B&H response")
230
+ raise
231
+
232
+ groups_data = data_dict.get('data', [{}])[0].get('specifications', {}).get('items', [{}])[0].get('groups', [])
233
+ short_description = data_dict.get('data', [{}])[0].get('specifications', {}).get('items', [{}])[0].get('item', {}).get('core', {}).get('shortDescription')
234
+ details_url = data_dict.get('data', [{}])[0].get('specifications', {}).get('items', [{}])[0].get('item', {}).get('core', {}).get('detailsUrl')
235
+ main_image_url = data_dict.get('data', [{}])[0].get('specifications', {}).get('items', [{}])[0].get('item', {}).get('mainImage', {}).get('listing', {}).get('url')
236
+ price = data_dict.get('data', [{}])[0].get('priceInfo', {}).get('items', [{}])[0].get('price', {}).get('price')
237
+
238
+ groups_data.append({"price": price})
239
+
240
+ # Create a dictionary with the extracted data
241
+ extracted_data = {
242
+ "model": short_description,
243
+ "price": price,
244
+ "details_url": details_url,
245
+ "main_image_url": main_image_url,
246
+ "specs": groups_data,
247
+ }
248
 
249
+ return extracted_data
250
 
251
 
252
  model_list = ["gpt-5-nano", "gpt-5-mini", "gpt-5"]
 
268
  def compare_cameras_gui(model_name, verbosity, reasoning_level, prompt, sku1, sku2):
269
  """
270
  Gradio interface function to compare cameras based on SKU numbers and model name.
 
271
  """
272
  try:
273
+ logger.info("Fetching product specs for SKUs: %s, %s", sku1, sku2)
274
  data_a = load_specs_data(sku1)
275
  data_b = load_specs_data(sku2)
276
 
 
282
 
283
  summary = compare_cameras(model_name, prompt, camera_a, camera_b, verbosity=verbosity, reasoning_level=reasoning_level)
284
 
285
+ # Format the output for display in Gradio using more standard markdown syntax
286
  output_text = f"""
287
  ## Product Comparison Summary
288
  {summary.summary}
289
  """
290
  return output_text
291
+ except Exception:
292
+ # Surface full traceback in the UI for easier debugging
293
+ err = traceback.format_exc()
294
+ logger.error("An error occurred during comparison. Returning traceback to UI.\n%s", err)
 
 
 
 
 
 
 
 
 
 
 
 
 
295
  return (
296
+ "Encountered an error while generating the comparison. "
297
+ "Here is the full traceback to help diagnose the issue:\n\n"
298
+ f"```text\n{err}\n```"
 
299
  )
300
 
301
 
 
351
  # Launch the interface
352
  # Disable Gradio's experimental SSR to avoid svelte-i18n initial locale errors in server-side rendering
353
  # Also hide the "Use via API" link explicitly
354
+ demo.launch(auth=authenticate, share=False, ssr_mode=False, show_api=False)
 
requirements-dev.txt CHANGED
@@ -52,7 +52,7 @@ numpy==1.24.4 ; python_full_version < '3.9'
52
  numpy==2.0.2 ; python_full_version == '3.9.*'
53
  numpy==2.2.6 ; python_full_version == '3.10.*'
54
  numpy==2.3.2 ; python_full_version >= '3.11'
55
- openai==1.100.2
56
  orjson==3.10.15 ; python_full_version < '3.9'
57
  orjson==3.11.2 ; python_full_version >= '3.9'
58
  packaging==25.0
 
52
  numpy==2.0.2 ; python_full_version == '3.9.*'
53
  numpy==2.2.6 ; python_full_version == '3.10.*'
54
  numpy==2.3.2 ; python_full_version >= '3.11'
55
+ openai>=1.100.2,<2
56
  orjson==3.10.15 ; python_full_version < '3.9'
57
  orjson==3.11.2 ; python_full_version >= '3.9'
58
  packaging==25.0
requirements.txt CHANGED
@@ -160,7 +160,7 @@ numpy==2.3.2 ; python_full_version >= '3.11'
160
  # via
161
  # gradio
162
  # pandas
163
- openai==1.100.2
164
  # via huggingface
165
  orjson==3.10.15 ; python_full_version < '3.9'
166
  # via gradio
 
160
  # via
161
  # gradio
162
  # pandas
163
+ openai>=1.100.2,<2
164
  # via huggingface
165
  orjson==3.10.15 ; python_full_version < '3.9'
166
  # via gradio
scripts/smoke_test.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ.setdefault("LOG_LEVEL", "DEBUG")
3
+
4
+ from app import compare_cameras_gui
5
+
6
+ # Force an error by passing an invalid SKU
7
+ result = compare_cameras_gui("gpt-5-nano", "Low", "Minimal", "test", "abc", "123")
8
+ print(result)
scripts/test_openai_error.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from app import CameraSpecs, compare_cameras
3
+
4
+ # Ensure no API key to force the error path
5
+ os.environ.pop("OPENAI_API_KEY", None)
6
+
7
+ cam_a = CameraSpecs(model="Test A", specs=[{"key": "value"}])
8
+ cam_b = CameraSpecs(model="Test B", specs=[{"key": "value"}])
9
+
10
+ try:
11
+ compare_cameras("gpt-5-nano", "Test prompt", cam_a, cam_b)
12
+ except Exception as e:
13
+ import traceback
14
+ print("Captured exception from compare_cameras; full traceback below:\n")
15
+ print(traceback.format_exc())