final / app.py
Vivek16's picture
Update app.py
ab96010 verified
import gradio as gr
import openai
import os # To securely load the API key
# No need to import torch, transformers, or peft for the external API call
# --- Configuration ---
# Your system prompt is now an instruction for the external model
SYSTEM_PROMPT = "You are a highly accurate math solver. Provide the final numerical answer to the user's problem. Use the required units (e.g., '40 cm^2') and round to two decimal places if needed. Do not show your work, steps, or formulas."
# Initialize the OpenAI client using the environment variable
try:
# This automatically looks for the OPENAI_API_KEY environment variable
client = openai.OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
except Exception as e:
print(f"Error initializing OpenAI client: {e}")
# Fallback for local testing if key is not set as environment variable
client = None
# ------------------------------------------------------------------
# 💡 EXPLICIT EXTERNAL API CALL FUNCTION
# ------------------------------------------------------------------
def call_external_api(prompt):
"""
Calls the external OpenAI API to get the model's response.
"""
if not client:
return "Error: API Key not configured. Please set OPENAI_API_KEY environment variable."
try:
# Call the chat completions API
response = client.chat.completions.create(
model="gpt-3.5-turbo", # A fast and capable model
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": prompt}
],
temperature=0.0 # Set to 0.0 for deterministic, accurate math answers
)
# Extract the text content from the response
return response.choices[0].message.content.strip()
except Exception as e:
return f"API Call Error: Could not get a response from the external model. Details: {e}"
# --- Inference Function (The Main App Logic) ---
def generate_response(message, history):
# We will pass the user's message directly to the external API
# The system prompt is already defined in the API call
# 1. Call the External Model (The API Call)
# The external model handles all the complex generation and cleanup internally
response = call_external_api(message)
# 2. Return the clean response
return response
# --- Gradio Interface ---
# We use gr.ChatInterface for a standard chatbot layout
demo = gr.ChatInterface(
fn=generate_response,
title=f"Reliable Math LLM (Powered by External API)",
description="Ask a math problem! This uses a reliable external service for answers.",
)
if __name__ == "__main__":
demo.launch()