Melika Kheirieh commited on
Commit
6f6e439
·
1 Parent(s): b432020

fix(planner): increase OpenAI timeout & add graceful timeout handling

Browse files
adapters/llm/openai_provider.py CHANGED
@@ -48,7 +48,7 @@ class OpenAIProvider(LLMProvider):
48
  api_key, base_url, model = _resolve_api_config()
49
  os.environ["OPENAI_API_KEY"] = api_key
50
  os.environ["OPENAI_BASE_URL"] = base_url
51
- self.client = OpenAI()
52
  self.model = model
53
  # last call usage/metadata for tracing
54
  self._last_usage: dict[str, Any] = {}
 
48
  api_key, base_url, model = _resolve_api_config()
49
  os.environ["OPENAI_API_KEY"] = api_key
50
  os.environ["OPENAI_BASE_URL"] = base_url
51
+ self.client = OpenAI(timeout=120.0)
52
  self.model = model
53
  # last call usage/metadata for tracing
54
  self._last_usage: dict[str, Any] = {}
nl2sql/planner.py CHANGED
@@ -86,6 +86,18 @@ def _pick_relevant_tables(schema_text: str, question: str, k: int = 3) -> str:
86
  return schema_text
87
 
88
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  # ------------------------------ Planner ------------------------------
90
  class Planner:
91
  """Planner wrapper around the LLM provider."""
@@ -98,7 +110,10 @@ class Planner:
98
  self._plan_cache: dict[tuple[str, int, int], tuple[str, int, int, float]] = {}
99
 
100
  def run(self, *, user_query: str, schema_preview: str) -> Dict[str, Any]:
 
101
  trimmed = _pick_relevant_tables(schema_preview or "", user_query or "", k=3)
 
 
102
 
103
  key: tuple[str, int, int] = (
104
  self.model_id,
@@ -108,8 +123,11 @@ class Planner:
108
  if key in self._plan_cache:
109
  plan_text, pin, pout, cost = self._plan_cache[key]
110
  else:
 
111
  plan_text, pin, pout, cost = self.llm.plan(
112
- user_query=user_query, schema_preview=trimmed
 
 
113
  )
114
  self._plan_cache[key] = (plan_text, pin, pout, cost)
115
 
 
86
  return schema_text
87
 
88
 
89
+ # --------- Add schema size check ---------
90
+ def _trim_if_large(schema_text: str, max_chars: int = 8000) -> str:
91
+ """Trim schema if it's too large to prevent timeout"""
92
+ if len(schema_text) <= max_chars:
93
+ return schema_text
94
+
95
+ # Keep first part of schema that fits
96
+ lines = schema_text[:max_chars].splitlines()
97
+ # Try to end at a complete line
98
+ return "\n".join(lines[:-1]) if len(lines) > 1 else lines[0]
99
+
100
+
101
  # ------------------------------ Planner ------------------------------
102
  class Planner:
103
  """Planner wrapper around the LLM provider."""
 
110
  self._plan_cache: dict[tuple[str, int, int], tuple[str, int, int, float]] = {}
111
 
112
  def run(self, *, user_query: str, schema_preview: str) -> Dict[str, Any]:
113
+ # First apply relevance filtering
114
  trimmed = _pick_relevant_tables(schema_preview or "", user_query or "", k=3)
115
+ # Then apply size limit to prevent timeout
116
+ trimmed = _trim_if_large(trimmed, max_chars=8000)
117
 
118
  key: tuple[str, int, int] = (
119
  self.model_id,
 
123
  if key in self._plan_cache:
124
  plan_text, pin, pout, cost = self._plan_cache[key]
125
  else:
126
+ # Call with increased timeout
127
  plan_text, pin, pout, cost = self.llm.plan(
128
+ user_query=user_query,
129
+ schema_preview=trimmed,
130
+ timeout=120, # Increase timeout for large schemas
131
  )
132
  self._plan_cache[key] = (plan_text, pin, pout, cost)
133