Spaces:
Sleeping
Sleeping
| import os | |
| import tempfile | |
| import uuid | |
| from pathlib import Path | |
| from openai import OpenAI | |
| from dotenv import load_dotenv | |
| from api.pinecone_func import query_text | |
| from api.elevenlabs import generate_tts | |
| load_dotenv() | |
| client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) | |
| def generate_response_and_voice(user_input): | |
| pinecone_results = query_text(user_input, top_k=3) | |
| contexts = [m['metadata']['text'] for m in pinecone_results['matches']] | |
| context_text = "\n".join(contexts) | |
| messages = [{ | |
| "role": "system", | |
| "content": f"""ใใชใใฏใใใใ ใใใฎใใๅๅฃซใงใใไบฌ้ฝๅคงๅญฆใซๆๅฑใใฆใใฆใใใใใไปๆใๅฐ้ใซใใฆใใพใใ็ๅใฎๆ ๅ ฑ:{context_text}ใใฎๆ ๅ ฑใๅ่ใซๆฅๆฌ่ชใง็ญใใฆใใ ใใใ"""}] | |
| messages.append({"role": "user", "content": user_input}) | |
| response = client.chat.completions.create( | |
| model="gpt-4o", | |
| messages=messages | |
| ) | |
| gpt_output = response.choices[0].message.content | |
| audio_path = generate_tts(gpt_output) | |
| return gpt_output, audio_path |