| | import os |
| | import requests |
| | from typing import List, Dict, Any |
| |
|
| | OPENAI_MODEL = "gpt-4.1-mini" |
| |
|
| | def grok_get_llm_response( |
| | system_prompt: str, |
| | user_input: str, |
| | tools: List[Dict[str, Any]] = None, |
| | tool_choice: str = "auto", |
| | reasoning_effort: str = "default", |
| | response_format: Dict[str, Any] = None, |
| | temperature: float = 0.3, |
| | max_completion_tokens: int = 2000, |
| | reasoning_format: str = "raw", |
| | ) -> str: |
| | """ |
| | Make a request to the Grok API and return the response content, supporting tool usage and agentic features. |
| | |
| | Args: |
| | system_prompt (str): The system prompt to set the context. |
| | user_input (str): The user input to process. |
| | tools (List[Dict[str, Any]], optional): List of tool definitions for tool-calling. |
| | tool_choice (str, optional): Controls tool usage ("none", "auto", "required"). Defaults to "auto". |
| | reasoning_effort (str, optional): Reasoning mode for Qwen3 models ("none", "default"). Defaults to "default". |
| | response_format (Dict[str, Any], optional): Format for structured outputs (e.g., JSON schema). |
| | temperature (float, optional): Sampling temperature (0 to 2). Defaults to 0.7 for determinism. |
| | max_completion_tokens (int, optional): Max tokens in response. Defaults to 1000. |
| | |
| | Returns: |
| | str: The content of the assistant's response or tool call results, or empty string on error. |
| | """ |
| | |
| | api_key = os.getenv("GROQ_API_KEY") |
| | if not api_key: |
| | print("Grok API error: GROQ_API_KEY environment variable not set") |
| | return "" |
| |
|
| | |
| | api_url = "https://api.groq.com/openai/v1/chat/completions" |
| | |
| | |
| | messages = [ |
| | {"role": "system", "content": system_prompt}, |
| | {"role": "user", "content": user_input} |
| | ] |
| | |
| | |
| | payload = { |
| | "model": "qwen/qwen3-32b", |
| | "messages": messages, |
| | "temperature": max(0, min(temperature, 2)), |
| | "max_completion_tokens": max_completion_tokens |
| |
|
| | } |
| | |
| | |
| | if tools: |
| | payload["tools"] = tools |
| | if tool_choice in ["none", "auto", "required"]: |
| | payload["tool_choice"] = tool_choice |
| | else: |
| | print(f"Grok API warning: Invalid tool_choice '{tool_choice}', defaulting to 'auto'") |
| | payload["tool_choice"] = "auto" |
| | |
| | |
| | if reasoning_effort in ["none", "default"]: |
| | payload["reasoning_effort"] = reasoning_effort |
| | else: |
| | print(f"Grok API warning: Invalid reasoning_effort '{reasoning_effort}', defaulting to 'default'") |
| | payload["reasoning_effort"] = "default" |
| | |
| | |
| | if response_format: |
| | payload["response_format"] = response_format |
| | |
| | |
| | if reasoning_format: |
| | payload["reasoning_format"] = reasoning_format |
| |
|
| | |
| | headers = { |
| | "Content-Type": "application/json", |
| | "Authorization": f"Bearer {api_key}" |
| | } |
| | |
| | try: |
| | |
| | response = requests.post(api_url, headers=headers, json=payload, timeout=60) |
| | response.raise_for_status() |
| | |
| | |
| | result = response.json() |
| | choice = result.get("choices", [{}])[0] |
| | message = choice.get("message", {}) |
| | |
| | |
| | if "tool_calls" in message: |
| | tool_calls = message["tool_calls"] |
| | tool_results = [] |
| | for tool_call in tool_calls: |
| | tool_name = tool_call.get("function", {}).get("name", "") |
| | tool_args = tool_call.get("function", {}).get("arguments", "{}") |
| | tool_results.append(f"Tool Call: {tool_name} with args {tool_args}") |
| | return "; ".join(tool_results) |
| | |
| | |
| | content = message.get("content", "") |
| | return content.strip() |
| | |
| | except requests.exceptions.HTTPError as e: |
| | print(f"Grok API error: HTTP {e.response.status_code} - {e.response.text}") |
| | return "" |
| | except requests.exceptions.RequestException as e: |
| | print(f"Grok API error: Network error - {e}") |
| | return "" |
| | except (KeyError, ValueError) as e: |
| | print(f"Grok API error: Unexpected response format - {e}") |
| | return "" |
| | except Exception as e: |
| | print(f"Grok API error: Unexpected error - {e}") |
| | return "" |
| |
|
| |
|
| |
|
| | import os |
| | import requests |
| | from typing import List, Dict, Any |
| |
|
| | import os |
| | import requests |
| | from typing import List, Dict, Any |
| |
|
| | def openai_get_llm_response( |
| | system_prompt: str, |
| | user_input: str, |
| | tools: List[Dict[str, Any]] = None, |
| | tool_choice: str = "auto", |
| | reasoning_effort: str = "default", |
| | response_format: Dict[str, Any] = None, |
| | temperature: float = 0.3, |
| | max_completion_tokens: int = 2000, |
| | reasoning_format: str = None, |
| | ) -> str: |
| | """ |
| | Make a request to the OpenAI API (o4 model) and return the response content, |
| | supporting tool usage and agentic features. |
| | """ |
| | api_key = os.getenv("OPENAI_API_KEY") |
| | if not api_key: |
| | print("OpenAI API error: OPENAI_API_KEY environment variable not set") |
| | return "" |
| |
|
| | api_url = "https://api.openai.com/v1/chat/completions" |
| |
|
| | messages = [ |
| | {"role": "system", "content": system_prompt}, |
| | {"role": "user", "content": user_input}, |
| | ] |
| |
|
| | payload = { |
| | "model": OPENAI_MODEL, |
| | "messages": messages, |
| | "temperature": max(0, min(temperature, 2)), |
| | "max_completion_tokens": max_completion_tokens, |
| | } |
| |
|
| | |
| | if tools: |
| | payload["tools"] = tools |
| | if tool_choice in ["none", "auto", "required"]: |
| | payload["tool_choice"] = tool_choice |
| |
|
| | |
| | if reasoning_effort in ["low", "medium", "high"]: |
| | payload["reasoning_effort"] = reasoning_effort |
| |
|
| | |
| | if response_format: |
| | payload["response_format"] = response_format |
| |
|
| | headers = { |
| | "Content-Type": "application/json", |
| | "Authorization": f"Bearer {api_key}", |
| | } |
| |
|
| | try: |
| | response = requests.post(api_url, headers=headers, json=payload, timeout=60) |
| | response.raise_for_status() |
| | result = response.json() |
| |
|
| | choice = result.get("choices", [{}])[0] |
| | message = choice.get("message", {}) |
| |
|
| | |
| | if "tool_calls" in message: |
| | tool_results = [] |
| | for tool_call in message["tool_calls"]: |
| | tool_name = tool_call.get("function", {}).get("name", "") |
| | tool_args = tool_call.get("function", {}).get("arguments", "{}") |
| | tool_results.append(f"Tool Call: {tool_name} with args {tool_args}") |
| | return "; ".join(tool_results) |
| |
|
| | return (message.get("content") or "").strip() |
| |
|
| | except requests.exceptions.HTTPError as e: |
| | print(f"OpenAI API error: HTTP {e.response.status_code} - {e.response.text}") |
| | return "" |
| | except requests.exceptions.RequestException as e: |
| | print(f"OpenAI API error: Network error - {e}") |
| | return "" |
| | except Exception as e: |
| | print(f"OpenAI API error: Unexpected error - {e}") |
| | return "" |
| |
|
| |
|
| | import os |
| | import requests |
| | from typing import List, Dict, Any |
| |
|
| | def deepseekapi_get_llm_response( |
| | system_prompt: str, |
| | user_input: str, |
| | model: str = "deepseek-reasoner", |
| | stream: bool = False, |
| | temperature: float = 0.2, |
| | max_tokens: int = None, |
| | ) -> str: |
| | """ |
| | Make a request to the DeepSeek API (compatible with OpenAI format). |
| | |
| | Args: |
| | system_prompt (str): The system prompt. |
| | user_input (str): The user’s message. |
| | model (str): "deepseek-chat" or "deepseek-reasoner". |
| | stream (bool): Whether to request streaming output. |
| | temperature (float): Sampling temperature. |
| | max_tokens (int, optional): Max tokens for the response. |
| | |
| | Returns: |
| | str: Assistant's response or streamed chunks; empty string on error. |
| | """ |
| | api_key = os.getenv("DEEPSEEK_API_KEY") |
| | if not api_key: |
| | print("DeepSeek API error: DEEPSEEK_API_KEY not set") |
| | return "" |
| | |
| | api_url = "https://api.deepseek.com/v1/chat/completions" |
| | |
| | headers = { |
| | "Content-Type": "application/json", |
| | "Authorization": f"Bearer {api_key}" |
| | } |
| |
|
| | payload: Dict[str, Any] = { |
| | "model": model, |
| | "messages": [ |
| | {"role": "system", "content": system_prompt}, |
| | {"role": "user", "content": user_input}, |
| | ], |
| | "stream": stream, |
| | "temperature": max(0.0, min(temperature, 2.0)), |
| | } |
| | if max_tokens is not None: |
| | payload["max_tokens"] = max_tokens |
| |
|
| | try: |
| | response = requests.post(api_url, headers=headers, json=payload, stream=stream, timeout=60) |
| | response.raise_for_status() |
| |
|
| | if stream: |
| | output = "" |
| | for chunk in response.iter_lines(chunk_size=8192, decode_unicode=True): |
| | if chunk: |
| | output += chunk.decode() if isinstance(chunk, bytes) else chunk |
| | return output |
| | else: |
| | data = response.json() |
| | return data.get("choices", [{}])[0].get("message", {}).get("content", "").strip() |
| |
|
| | except requests.exceptions.HTTPError as e: |
| | print(f"DeepSeek API error: HTTP {e.response.status_code} - {e.response.text}") |
| | except requests.exceptions.RequestException as e: |
| | print(f"DeepSeek API error: Network error - {e}") |
| | except Exception as e: |
| | print(f"DeepSeek API error: Unexpected error - {e}") |
| | return "" |
| |
|
| |
|
| | def API_llama_get_llm_response( |
| | system_prompt: str, |
| | user_input: str, |
| | tools: List[Dict[str, Any]] = None, |
| | tool_choice: str = "auto", |
| | response_format: Dict[str, Any] = None, |
| | temperature: float = 0.1, |
| | max_completion_tokens: int = 2000 |
| | ) -> str: |
| | """ |
| | Make a request to the Grok API and return the response content, supporting tool usage and agentic features. |
| | |
| | Args: |
| | system_prompt (str): The system prompt to set the context. |
| | user_input (str): The user input to process. |
| | tools (List[Dict[str, Any]], optional): List of tool definitions for tool-calling. |
| | tool_choice (str, optional): Controls tool usage ("none", "auto", "required"). Defaults to "auto". |
| | reasoning_effort (str, optional): Reasoning mode for Qwen3 models ("none", "default"). Defaults to "default". |
| | response_format (Dict[str, Any], optional): Format for structured outputs (e.g., JSON schema). |
| | temperature (float, optional): Sampling temperature (0 to 2). Defaults to 0.7 for determinism. |
| | max_completion_tokens (int, optional): Max tokens in response. Defaults to 1000. |
| | |
| | Returns: |
| | str: The content of the assistant's response or tool call results, or empty string on error. |
| | """ |
| | |
| | api_key = os.getenv("GROQ_API_KEY") |
| | if not api_key: |
| | print("Grok API error: GROQ_API_KEY environment variable not set") |
| | return "" |
| |
|
| | |
| | api_url = "https://api.groq.com/openai/v1/chat/completions" |
| | |
| | |
| | messages = [ |
| | {"role": "system", "content": system_prompt}, |
| | {"role": "user", "content": user_input} |
| | ] |
| | |
| | |
| | payload = { |
| | "model": "llama-3.3-70b-versatile", |
| | "messages": messages, |
| | "temperature": max(0, min(temperature, 2)), |
| | "max_completion_tokens": max_completion_tokens |
| | } |
| | |
| | |
| | if tools: |
| | payload["tools"] = tools |
| | if tool_choice in ["none", "auto", "required"]: |
| | payload["tool_choice"] = tool_choice |
| | else: |
| | print(f"Grok API warning: Invalid tool_choice '{tool_choice}', defaulting to 'auto'") |
| | payload["tool_choice"] = "auto" |
| | |
| | |
| | |
| | if response_format: |
| | payload["response_format"] = response_format |
| | |
| | |
| | headers = { |
| | "Content-Type": "application/json", |
| | "Authorization": f"Bearer {api_key}" |
| | } |
| | |
| | try: |
| | |
| | response = requests.post(api_url, headers=headers, json=payload, timeout=60) |
| | response.raise_for_status() |
| | |
| | |
| | result = response.json() |
| | choice = result.get("choices", [{}])[0] |
| | message = choice.get("message", {}) |
| | |
| | |
| | if "tool_calls" in message: |
| | tool_calls = message["tool_calls"] |
| | tool_results = [] |
| | for tool_call in tool_calls: |
| | tool_name = tool_call.get("function", {}).get("name", "") |
| | tool_args = tool_call.get("function", {}).get("arguments", "{}") |
| | tool_results.append(f"Tool Call: {tool_name} with args {tool_args}") |
| | return "; ".join(tool_results) |
| | |
| | |
| | content = message.get("content", "") |
| | return content.strip() |
| | |
| | except requests.exceptions.HTTPError as e: |
| | print(f"Grok API error: HTTP {e.response.status_code} - {e.response.text}") |
| | return "" |
| | except requests.exceptions.RequestException as e: |
| | print(f"Grok API error: Network error - {e}") |
| | return "" |
| | except (KeyError, ValueError) as e: |
| | print(f"Grok API error: Unexpected response format - {e}") |
| | return "" |
| | except Exception as e: |
| | print(f"Grok API error: Unexpected error - {e}") |
| | return "" |
| |
|
| |
|
| |
|
| |
|
| | def open_oss_get_llm_response( |
| | system_prompt: str, |
| | user_input: str, |
| | tools: List[Dict[str, Any]] = None, |
| | tool_choice: str = "auto", |
| | temperature: float = 0.1, |
| | max_completion_tokens: int = 3000, |
| | reasoning_format = 'hidden' |
| | ) -> str: |
| | """ |
| | Make a request to the Grok API and return the response content, supporting tool usage and agentic features. |
| | |
| | Args: |
| | system_prompt (str): The system prompt to set the context. |
| | user_input (str): The user input to process. |
| | tools (List[Dict[str, Any]], optional): List of tool definitions for tool-calling. |
| | tool_choice (str, optional): Controls tool usage ("none", "auto", "required"). Defaults to "auto". |
| | temperature (float, optional): Sampling temperature (0 to 2). Defaults to 0.7 for determinism. |
| | max_completion_tokens (int, optional): Max tokens in response. Defaults to 1000. |
| | |
| | Returns: |
| | str: The content of the assistant's response or tool call results, or empty string on error. |
| | """ |
| | |
| | api_key = os.getenv("GROQ_API_KEY") |
| | if not api_key: |
| | print("Grok API error: GROQ_API_KEY environment variable not set") |
| | return "" |
| |
|
| | |
| | api_url = "https://api.groq.com/openai/v1/chat/completions" |
| | |
| | |
| | messages = [ |
| | {"role": "system", "content": system_prompt}, |
| | {"role": "user", "content": user_input} |
| | ] |
| | |
| | |
| | payload = { |
| | "model": "openai/gpt-oss-20b", |
| | "messages": messages, |
| | "temperature": max(0, min(temperature, 2)), |
| | "max_completion_tokens": max_completion_tokens, |
| | "reasoning_effort": "medium" |
| | } |
| | |
| | |
| | if tools: |
| | payload["tools"] = tools |
| | if tool_choice in ["none", "auto", "required"]: |
| | payload["tool_choice"] = tool_choice |
| | else: |
| | print(f"Grok API warning: Invalid tool_choice '{tool_choice}', defaulting to 'auto'") |
| | payload["tool_choice"] = "auto" |
| | |
| | |
| | |
| | headers = { |
| | "Content-Type": "application/json", |
| | "Authorization": f"Bearer {api_key}" |
| | } |
| | |
| | try: |
| | |
| | response = requests.post(api_url, headers=headers, json=payload, timeout=60) |
| | response.raise_for_status() |
| | |
| | |
| | result = response.json() |
| | choice = result.get("choices", [{}])[0] |
| | message = choice.get("message", {}) |
| | |
| | |
| | if "tool_calls" in message: |
| | tool_calls = message["tool_calls"] |
| | tool_results = [] |
| | for tool_call in tool_calls: |
| | tool_name = tool_call.get("function", {}).get("name", "") |
| | tool_args = tool_call.get("function", {}).get("arguments", "{}") |
| | tool_results.append(f"Tool Call: {tool_name} with args {tool_args}") |
| | return "; ".join(tool_results) |
| | |
| | |
| | content = message.get("content", "") |
| | return content.strip() |
| | |
| | except requests.exceptions.HTTPError as e: |
| | print(f"Grok API error: HTTP {e.response.status_code} - {e.response.text}") |
| | return "" |
| | except requests.exceptions.RequestException as e: |
| | print(f"Grok API error: Network error - {e}") |
| | return "" |
| | except (KeyError, ValueError) as e: |
| | print(f"Grok API error: Unexpected response format - {e}") |
| | return "" |
| | except Exception as e: |
| | print(f"Grok API error: Unexpected error - {e}") |
| | return "" |