Spaces:
Sleeping
Sleeping
| import logging | |
| logging.basicConfig(level=logging.WARNING) | |
| import os | |
| from langchain.schema import AIMessage, HumanMessage, SystemMessage | |
| from dotenv import load_dotenv | |
| load_dotenv(verbose=True) | |
| assert os.getenv('GROQ_MODEL') is not None | |
| assert os.getenv('GROQ_WHISPER_MODEL') is not None | |
| import gradio as gr | |
| from gradio import ChatMessage | |
| from correctiveRag import app | |
| logger = logging.getLogger(__name__) # Child logger for this module | |
| logger.setLevel(logging.INFO) | |
| # Groq - Audio transcription | |
| from groq import Groq | |
| transcription_client = Groq() | |
| system_message = "You are a helpful assistant who provides consice answers to questions." | |
| def chat_response(message: str, history: list[dict], state: dict): | |
| logger.debug(f"session_id = {state['session_id']}") | |
| config = {"configurable": { "thread_id": state['session_id'] }} | |
| if message is not None: | |
| history.append( ChatMessage(role='user', content=message) ) | |
| response = app.invoke({"messages": [HumanMessage(content=message)]}, config) | |
| answer = ''.join(response['generation']) | |
| history.append(ChatMessage(role='assistant', content=answer)) | |
| return "", history | |
| def transcribe_audio(filename): | |
| print('filename', filename, os.getenv('GROQ_WHISPER_MODEL')) | |
| with open(filename, "rb") as audio_file: | |
| transcription = transcription_client.audio.transcriptions.create( | |
| file=(filename, audio_file.read()), | |
| model=os.getenv('GROQ_WHISPER_MODEL'), # Required model to use for transcription | |
| prompt="Preguntas sobre estructuras de datos y algoritmos.", # Optional | |
| response_format="json", # Optional | |
| language="es", # Optional | |
| temperature=0.0 # Optional | |
| ) | |
| return transcription.text | |
| def voice_input(audio, history, state): | |
| transcription = transcribe_audio(audio) | |
| return chat_response(transcription, history, state) | |