Spaces:
Sleeping
Sleeping
sync changes
Browse files
app.py
CHANGED
|
@@ -1,20 +1,23 @@
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
-
import
|
| 4 |
|
| 5 |
-
# Load API key securely
|
| 6 |
-
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
# Function to generate AI response
|
| 9 |
def call_openai_api(prompt, model, max_tokens, temperature):
|
| 10 |
try:
|
| 11 |
-
response =
|
| 12 |
model=model,
|
| 13 |
messages=[{"role": "user", "content": prompt}],
|
| 14 |
max_tokens=max_tokens,
|
| 15 |
temperature=temperature
|
| 16 |
)
|
| 17 |
-
return response
|
| 18 |
except Exception as e:
|
| 19 |
return f"Error: {str(e)}"
|
| 20 |
|
|
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
+
from openai import OpenAI # ✅ Import OpenAI's new client
|
| 4 |
|
| 5 |
+
# Load API key securely
|
| 6 |
+
api_key = os.getenv("OPENAI_API_KEY", st.secrets.get("OPENAI_API_KEY"))
|
| 7 |
+
|
| 8 |
+
# Initialize OpenAI client
|
| 9 |
+
client = OpenAI(api_key=api_key)
|
| 10 |
|
| 11 |
# Function to generate AI response
|
| 12 |
def call_openai_api(prompt, model, max_tokens, temperature):
|
| 13 |
try:
|
| 14 |
+
response = client.chat.completions.create(
|
| 15 |
model=model,
|
| 16 |
messages=[{"role": "user", "content": prompt}],
|
| 17 |
max_tokens=max_tokens,
|
| 18 |
temperature=temperature
|
| 19 |
)
|
| 20 |
+
return response.choices[0].message.content # ✅ Correct format
|
| 21 |
except Exception as e:
|
| 22 |
return f"Error: {str(e)}"
|
| 23 |
|