Spaces:
Sleeping
Sleeping
File size: 2,071 Bytes
fa8520f 583f6dd 6874dac b4fb6ac 6c655a3 fa8520f 6874dac b4fb6ac 6874dac b4fb6ac 6874dac 6c655a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import pandas as pd
import ast
from groq import Groq
import os
from .prompts import captioning_prompt
from src.genai.utils.models_loader import llm
from langchain_core.messages import FunctionMessage , AIMessage
from .tools import retrieve_data_for_analytics
import re
def caption_image(image_base64,user_input):
if len(image_base64)>0:
print('Captioning image')
client = Groq(api_key=os.environ.get('GROQ_API_KEY'))
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": captioning_prompt(user_input)},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpg;base64,{image_base64[-1]}",
},
},
],
}
],
model="meta-llama/llama-4-scout-17b-16e-instruct",
max_completion_tokens=50,
temperature = 1
)
response=chat_completion.choices[0].message.content
return response
else:
return ''
def show_analytics(business_details):
tool_response = retrieve_data_for_analytics(str(business_details))
return tool_response
def extract_latest_response_block(response):
latest_block = []
temp_block = []
# Reverse iterate through the messages
for message in reversed(response):
if isinstance(message, (FunctionMessage, AIMessage)):
temp_block.insert(0, message.content)
# Once we collect 3 items in correct structure, stop
if len(temp_block) == 3:
if "tool=" in temp_block[1] and "query_response" in temp_block[1]:
latest_block = temp_block
break
else:
temp_block = []
print('The latest block', latest_block)
return latest_block
|