Navya-Sree commited on
Commit
5ffc95f
·
verified ·
1 Parent(s): 9c283b0

Update src/macg/llm_openai.py

Browse files
Files changed (1) hide show
  1. src/macg/llm_openai.py +8 -17
src/macg/llm_openai.py CHANGED
@@ -1,40 +1,31 @@
1
  from __future__ import annotations
2
 
3
- import os
4
  from openai import OpenAI
5
-
6
  from macg.llm_base import LLMClient
7
 
8
 
9
  class OpenAIResponsesLLM(LLMClient):
10
  def __init__(
11
  self,
 
12
  model: str = "gpt-5",
13
- api_key: str | None = None,
14
- max_output_tokens: int = 900,
15
  temperature: float = 0.2,
16
- base_url: str | None = None,
17
  ) -> None:
 
 
18
  self.model = model
19
- self.api_key = api_key or os.getenv("OPENAI_API_KEY")
20
- if not self.api_key:
21
- raise ValueError("OPENAI_API_KEY is not set.")
22
-
23
- self.max_output_tokens = max_output_tokens
24
  self.temperature = temperature
25
-
26
- if base_url:
27
- self.client = OpenAI(api_key=self.api_key, base_url=base_url)
28
- else:
29
- self.client = OpenAI(api_key=self.api_key)
30
 
31
  def complete(self, system: str, prompt: str) -> str:
32
- # Uses the Responses API
33
  resp = self.client.responses.create(
34
  model=self.model,
35
  instructions=system,
36
  input=prompt,
37
- max_output_tokens=self.max_output_tokens,
38
  temperature=self.temperature,
 
39
  )
40
  return resp.output_text
 
1
  from __future__ import annotations
2
 
 
3
  from openai import OpenAI
 
4
  from macg.llm_base import LLMClient
5
 
6
 
7
  class OpenAIResponsesLLM(LLMClient):
8
  def __init__(
9
  self,
10
+ api_key: str,
11
  model: str = "gpt-5",
12
+ base_url: str = "https://api.openai.com/v1",
 
13
  temperature: float = 0.2,
14
+ max_output_tokens: int = 900,
15
  ) -> None:
16
+ if not api_key:
17
+ raise ValueError("OpenAI api_key is required.")
18
  self.model = model
 
 
 
 
 
19
  self.temperature = temperature
20
+ self.max_output_tokens = max_output_tokens
21
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
 
 
 
22
 
23
  def complete(self, system: str, prompt: str) -> str:
 
24
  resp = self.client.responses.create(
25
  model=self.model,
26
  instructions=system,
27
  input=prompt,
 
28
  temperature=self.temperature,
29
+ max_output_tokens=self.max_output_tokens,
30
  )
31
  return resp.output_text