Spaces:
Sleeping
Sleeping
File size: 1,998 Bytes
bcaa3c2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import logging
logging.basicConfig(level=logging.WARNING)
import os
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from dotenv import load_dotenv
load_dotenv(verbose=True)
assert os.getenv('GROQ_MODEL') is not None
assert os.getenv('GROQ_WHISPER_MODEL') is not None
import gradio as gr
from gradio import ChatMessage
from correctiveRag import app
logger = logging.getLogger(__name__) # Child logger for this module
logger.setLevel(logging.INFO)
# Groq - Audio transcription
from groq import Groq
transcription_client = Groq()
system_message = "You are a helpful assistant who provides consice answers to questions."
def chat_response(message: str, history: list[dict], state: dict):
logger.debug(f"session_id = {state['session_id']}")
config = {"configurable": { "thread_id": state['session_id'] }}
if message is not None:
history.append( ChatMessage(role='user', content=message) )
response = app.invoke({"messages": [HumanMessage(content=message)]}, config)
answer = ''.join(response['generation'])
history.append(ChatMessage(role='assistant', content=answer))
return "", history
def transcribe_audio(filename):
print('filename', filename, os.getenv('GROQ_WHISPER_MODEL'))
with open(filename, "rb") as audio_file:
transcription = transcription_client.audio.transcriptions.create(
file=(filename, audio_file.read()),
model=os.getenv('GROQ_WHISPER_MODEL'), # Required model to use for transcription
prompt="Preguntas sobre estructuras de datos y algoritmos.", # Optional
response_format="json", # Optional
language="es", # Optional
temperature=0.0 # Optional
)
return transcription.text
def voice_input(audio, history, state):
transcription = transcribe_audio(audio)
return chat_response(transcription, history, state)
|