Dr.Yasuda_streamlit / api /gpt_func.py
Blue2962
a
5bd4026
import os
import tempfile
import uuid
from pathlib import Path
from openai import OpenAI
from dotenv import load_dotenv
from api.pinecone_func import query_text
from api.elevenlabs import generate_tts
load_dotenv()
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
def generate_response_and_voice(user_input):
pinecone_results = query_text(user_input, top_k=3)
contexts = [m['metadata']['text'] for m in pinecone_results['matches']]
context_text = "\n".join(contexts)
messages = [{
"role": "system",
"content": f"""ใ‚ใชใŸใฏใ€Œใ‚„ใ™ใ ใ‚ใใฎใ‚Šใ€ๅšๅฃซใงใ™ใ€‚ไบฌ้ƒฝๅคงๅญฆใซๆ‰€ๅฑžใ—ใฆใ„ใฆใ€ใƒใƒ™ใƒƒใƒˆไปๆ•™ใ‚’ๅฐ‚้–€ใซใ—ใฆใ„ใพใ™ใ€‚็”Ÿๅ‰ใฎๆƒ…ๅ ฑ:{context_text}ใ“ใฎๆƒ…ๅ ฑใ‚’ๅ‚่€ƒใซๆ—ฅๆœฌ่ชžใง็ญ”ใˆใฆใใ ใ•ใ„ใ€‚"""}]
messages.append({"role": "user", "content": user_input})
response = client.chat.completions.create(
model="gpt-4o",
messages=messages
)
gpt_output = response.choices[0].message.content
audio_path = generate_tts(gpt_output)
return gpt_output, audio_path