Update app.py
Browse files
app.py
CHANGED
|
@@ -3,12 +3,12 @@ import os
|
|
| 3 |
from openai import OpenAI
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
|
| 6 |
-
# Load
|
| 7 |
load_dotenv()
|
| 8 |
api_key = os.getenv("OPENAI_API_KEY")
|
| 9 |
client = OpenAI(api_key=api_key)
|
| 10 |
|
| 11 |
-
# Function
|
| 12 |
def get_weather(city: str):
|
| 13 |
weather_data = {
|
| 14 |
"New York": "☀️ Sunny, 25°C",
|
|
@@ -17,9 +17,8 @@ def get_weather(city: str):
|
|
| 17 |
}
|
| 18 |
return weather_data.get(city, "Weather data not available for this city.")
|
| 19 |
|
| 20 |
-
#
|
| 21 |
def chat_with_llm(user_query):
|
| 22 |
-
# Define function schema
|
| 23 |
functions = [
|
| 24 |
{
|
| 25 |
"name": "get_weather",
|
|
@@ -34,29 +33,26 @@ def chat_with_llm(user_query):
|
|
| 34 |
}
|
| 35 |
]
|
| 36 |
|
| 37 |
-
# Get response from GPT with function calling enabled
|
| 38 |
response = client.chat.completions.create(
|
| 39 |
-
model="gpt-
|
| 40 |
messages=[{"role": "user", "content": user_query}],
|
| 41 |
functions=functions,
|
| 42 |
-
function_call="auto" # GPT decides
|
| 43 |
)
|
| 44 |
|
| 45 |
message = response.choices[0].message
|
| 46 |
|
| 47 |
-
|
| 48 |
-
if message.function_call:
|
| 49 |
try:
|
| 50 |
function_args = json.loads(message.function_call.arguments)
|
| 51 |
city = function_args.get("city")
|
| 52 |
if city:
|
| 53 |
return get_weather(city)
|
| 54 |
except Exception as e:
|
| 55 |
-
return f"Error processing function call: {str(e)}"
|
| 56 |
|
| 57 |
-
#
|
| 58 |
-
return message.content
|
| 59 |
|
| 60 |
# Example Queries
|
| 61 |
-
print(chat_with_llm("What's the weather like in New York?")) #
|
| 62 |
-
print(chat_with_llm("Tell me a joke.")) # Normal LLM
|
|
|
|
| 3 |
from openai import OpenAI
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
|
| 6 |
+
# Load environment variables
|
| 7 |
load_dotenv()
|
| 8 |
api_key = os.getenv("OPENAI_API_KEY")
|
| 9 |
client = OpenAI(api_key=api_key)
|
| 10 |
|
| 11 |
+
# Function that provides weather details
|
| 12 |
def get_weather(city: str):
|
| 13 |
weather_data = {
|
| 14 |
"New York": "☀️ Sunny, 25°C",
|
|
|
|
| 17 |
}
|
| 18 |
return weather_data.get(city, "Weather data not available for this city.")
|
| 19 |
|
| 20 |
+
# Chat function with function calling
|
| 21 |
def chat_with_llm(user_query):
|
|
|
|
| 22 |
functions = [
|
| 23 |
{
|
| 24 |
"name": "get_weather",
|
|
|
|
| 33 |
}
|
| 34 |
]
|
| 35 |
|
|
|
|
| 36 |
response = client.chat.completions.create(
|
| 37 |
+
model="gpt-4o-mini",
|
| 38 |
messages=[{"role": "user", "content": user_query}],
|
| 39 |
functions=functions,
|
| 40 |
+
function_call="auto" # GPT decides whether to call the function
|
| 41 |
)
|
| 42 |
|
| 43 |
message = response.choices[0].message
|
| 44 |
|
| 45 |
+
if hasattr(message, "function_call") and message.function_call:
|
|
|
|
| 46 |
try:
|
| 47 |
function_args = json.loads(message.function_call.arguments)
|
| 48 |
city = function_args.get("city")
|
| 49 |
if city:
|
| 50 |
return get_weather(city)
|
| 51 |
except Exception as e:
|
| 52 |
+
return f"❌ Error processing function call: {str(e)}"
|
| 53 |
|
| 54 |
+
return message.content # Return normal LLM response if no function is called
|
|
|
|
| 55 |
|
| 56 |
# Example Queries
|
| 57 |
+
print(chat_with_llm("What's the weather like in New York?")) # Calls function
|
| 58 |
+
print(chat_with_llm("Tell me a joke.")) # Normal LLM response
|