File size: 2,720 Bytes
f8a21e4 ab96010 f8a21e4 ab96010 3881adf ab96010 f8a21e4 3881adf ab96010 3881adf ab96010 3881adf ab96010 3881adf ab96010 f8a21e4 3881adf ab96010 3881adf ab96010 3881adf ab96010 f8a21e4 ab96010 f8a21e4 ab96010 f8a21e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import gradio as gr
import openai
import os # To securely load the API key
# No need to import torch, transformers, or peft for the external API call
# --- Configuration ---
# Your system prompt is now an instruction for the external model
SYSTEM_PROMPT = "You are a highly accurate math solver. Provide the final numerical answer to the user's problem. Use the required units (e.g., '40 cm^2') and round to two decimal places if needed. Do not show your work, steps, or formulas."
# Initialize the OpenAI client using the environment variable
try:
# This automatically looks for the OPENAI_API_KEY environment variable
client = openai.OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
except Exception as e:
print(f"Error initializing OpenAI client: {e}")
# Fallback for local testing if key is not set as environment variable
client = None
# ------------------------------------------------------------------
# 💡 EXPLICIT EXTERNAL API CALL FUNCTION
# ------------------------------------------------------------------
def call_external_api(prompt):
"""
Calls the external OpenAI API to get the model's response.
"""
if not client:
return "Error: API Key not configured. Please set OPENAI_API_KEY environment variable."
try:
# Call the chat completions API
response = client.chat.completions.create(
model="gpt-3.5-turbo", # A fast and capable model
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": prompt}
],
temperature=0.0 # Set to 0.0 for deterministic, accurate math answers
)
# Extract the text content from the response
return response.choices[0].message.content.strip()
except Exception as e:
return f"API Call Error: Could not get a response from the external model. Details: {e}"
# --- Inference Function (The Main App Logic) ---
def generate_response(message, history):
# We will pass the user's message directly to the external API
# The system prompt is already defined in the API call
# 1. Call the External Model (The API Call)
# The external model handles all the complex generation and cleanup internally
response = call_external_api(message)
# 2. Return the clean response
return response
# --- Gradio Interface ---
# We use gr.ChatInterface for a standard chatbot layout
demo = gr.ChatInterface(
fn=generate_response,
title=f"Reliable Math LLM (Powered by External API)",
description="Ask a math problem! This uses a reliable external service for answers.",
)
if __name__ == "__main__":
demo.launch() |