from scripts.llm.services import load_history , save_history import time from wasabi import msg import json from groq import Groq from redis import Redis async def run_groq( messages : list , groq_client : Groq , model : str = 'llama-3.3-70b-versatile' ) -> str : chat_completion = groq_client.chat.completions.create( messages = messages , model = model ) return str(chat_completion.choices[0].message.content) async def run_pandas_groq( query : str , session_id : str , system_prompt : str , groq_client : Groq , redis_client : Redis ) -> str : start_time = time.time() history : list = await load_history(redis_client , session_id , 'assets/history/pandas_chat.json') if history == [] : history = [ { 'role' : 'system' , 'content' : system_prompt } ] history.append({ 'role' : 'user' , 'content' : query }) response : str = await run_groq(history , groq_client) response = response.replace('python' , '') response = response.replace('`' , '') # ! Run script here and attach the response, to callback for llm in future responses history.append({ 'role' : 'assistant' , 'content' : response }) await save_history(redis_client , history , session_id , 'assets/history/pandas_chat.json') msg.info(f'----> Response : {response} in {time.time() - start_time} seconds') return response async def run_llm( query : str , result : str , session_id : str , system_prompt : str , groq_client : Groq , redis_client : Redis ) -> str : start_time = time.time() history : list = await load_history(redis_client , session_id , 'assets/history/chat.json') if history == [] : history = [ { 'role' : 'system' , 'content' : system_prompt } ] history.append({ 'role' : 'user' , 'content' : f''' Query : {query} Result : {result} ''' }) response : str = await run_groq(history , groq_client) history.append({ 'role' : 'assistant' , 'content' : response }) await save_history(redis_client , history , session_id , 'assets/history/chat.json') msg.info(f'----> Response : {response} in {time.time() - start_time} seconds') return response