subashpoudel commited on
Commit
84d64f1
Β·
0 Parent(s):

Initial commit

Browse files
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ .env
2
+ myenv
3
+ *.pyc
4
+ __pycache__/
api/__init__.py ADDED
File without changes
api/main.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from .routers import reply_generator
3
+ from .routers import pickup_line_generator
4
+ from .routers import looks_analyzer
5
+
6
+ app = FastAPI()
7
+
8
+ @app.middleware("http")
9
+ async def log_requests(request: Request, call_next):
10
+ response = await call_next(request)
11
+ return response
12
+
13
+ @app.get("/")
14
+ async def root():
15
+ return {'response':'Hello'}
16
+
17
+ app.include_router(reply_generator.router, prefix="/api")
18
+ app.include_router(pickup_line_generator.router, prefix="/api")
19
+ app.include_router(looks_analyzer.router, prefix="/api")
20
+
api/routers/__init__.py ADDED
File without changes
api/routers/looks_analyzer.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from pydantic import BaseModel
3
+ from typing import Optional
4
+ from src.looks_analyzer.generate import Graph
5
+
6
+ router = APIRouter()
7
+ reply_generator = Graph()
8
+ graph = reply_generator.run()
9
+
10
+ class UserRequest(BaseModel):
11
+ image_base64: Optional[str] = None
12
+ user_query: Optional[str] = None
13
+
14
+
15
+ @router.post("/looks-analyzer")
16
+ async def generate_reply(request: UserRequest):
17
+ """
18
+ Endpoint to analyze looks
19
+ """
20
+ config={"configurable": {"thread_id": "looks-analyzer-thread"}}
21
+
22
+ result = graph.invoke({
23
+ 'image': request.image_base64,
24
+ 'messages': [request.user_query]
25
+ },config=config)
26
+ # print('The result is:', result)
27
+ return {'response':result['suggestions']}
api/routers/pickup_line_generator.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from pydantic import BaseModel
3
+ from typing import Optional
4
+ from api.stored_data import stored_data
5
+ from src.pickup_line_generator.generate import Graph
6
+ from typing import List
7
+
8
+ router = APIRouter()
9
+ pickup_generator = Graph()
10
+ graph = pickup_generator.run()
11
+
12
+ class UserRequest(BaseModel):
13
+ user_query: Optional[str] = None
14
+ tones: Optional[List[str]] = None
15
+ attributes: list
16
+
17
+
18
+ @router.post("/pickup-line-generator")
19
+ async def generate_reply(request: UserRequest):
20
+ """
21
+ Endpoint to extract conversation from a base64-encoded image
22
+ """
23
+ config={"configurable": {"thread_id": "reply-generator-thread"}}
24
+
25
+ result = graph.invoke({
26
+ 'messages': [request.user_query],
27
+ 'tones': request.tones,
28
+ 'attributes': request.attributes
29
+ },config=config)
30
+ print('The result is:', result)
31
+ return {'response':result['pickup_lines']}
32
+
api/routers/reply_generator.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from fastapi import APIRouter
3
+ from pydantic import BaseModel
4
+ from typing import Optional
5
+ from src.conversation_extractor.extractor import ConversationExtractor
6
+ from api.stored_data import stored_data
7
+ from src.reply_generator.generate import Graph
8
+ from typing import List
9
+
10
+ router = APIRouter()
11
+ reply_generator = Graph()
12
+ graph = reply_generator.run()
13
+
14
+ class UserRequest(BaseModel):
15
+ image_base64: str
16
+ user_query: Optional[str] = None
17
+ tones: Optional[List[str]] = None
18
+
19
+ @router.post("/reply-generator")
20
+ async def generate_reply(request: UserRequest):
21
+ """
22
+ Endpoint to extract conversation from a base64-encoded image
23
+ """
24
+ config={"configurable": {"thread_id": "reply-generator-thread"}}
25
+ image_bytes = base64.b64decode(request.image_base64)
26
+ conversation_text = ConversationExtractor().extract_conversation(image_bytes)
27
+ stored_data['conversation_context'] = conversation_text
28
+
29
+ result = graph.invoke({
30
+ 'messages': [request.user_query],
31
+ 'tones': request.tones,
32
+ 'conversation_chat': stored_data['conversation_context']
33
+ },config=config)
34
+ print('The result is:', result)
35
+ return {'response':result['replies']}
36
+
api/schemas.py ADDED
File without changes
api/stored_data.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ stored_data = {}
2
+ stored_data['conversation_context']= '''
3
+ miss u
4
+ *
5
+ same babe
6
+ do u want to hang out with me
7
+ love to
8
+ fine when?
9
+ '''
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain-groq
2
+ langchain-openai
3
+ pydantic
4
+ google-genai
5
+ langgraph
6
+ requests
7
+ fastapi
8
+ uvicorn
9
+ python-dotenv
10
+ streamlit
src/conversation_extractor/__init__.py ADDED
File without changes
src/conversation_extractor/extractor.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from google import genai
3
+ from google.genai import types
4
+ from src.utils.models_loader import ocr_llm
5
+ from .prompts import prompt
6
+
7
+ from dotenv import load_dotenv
8
+ import os
9
+
10
+ load_dotenv()
11
+ os.environ['GOOGLE_API_KEY']=os.getenv('GOOGLE_API_KEY')
12
+
13
+ class ConversationExtractor:
14
+ def __init__(self):
15
+ self.client = genai.Client()
16
+ self.model_name = ocr_llm
17
+ self.prompt = prompt
18
+
19
+ def extract_conversation(self, image_bytes: bytes) -> str:
20
+ """
21
+ Extract conversation text from an image.
22
+ :param image_bytes: Binary content of the image
23
+ :return: Extracted conversation text
24
+ """
25
+ response = self.client.models.generate_content(
26
+ model=self.model_name,
27
+ contents=[
28
+ types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg"),
29
+ self.prompt
30
+ ]
31
+ )
32
+ return response.text
src/conversation_extractor/prompts.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+
2
+ prompt = (
3
+ "Extract only the conversation text between two persons from this image. "
4
+ "Ignore timestamps, emojis, or extra elements. "
5
+ "Preserve dialogue order. "
6
+ "If names exist, use them; otherwise label speakers Person A and Person B. "
7
+ "Output strictly as chat dialogue lines. Return only dialogue."
8
+ )
src/looks_analyzer/__init__.py ADDED
File without changes
src/looks_analyzer/generate.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.utils.models_loader import ocr_llm
2
+ from .prompts import prompt
3
+ from .state import State
4
+ from langgraph.graph import StateGraph, START, END
5
+ from langgraph.checkpoint.memory import MemorySaver
6
+ from google.genai import types
7
+ from google import genai
8
+ from langchain_core.messages import HumanMessage
9
+
10
+
11
+
12
+ class Generate:
13
+ def __init__(self):
14
+ self.client = genai.Client()
15
+ self.model_name = ocr_llm
16
+
17
+
18
+ def run(self, state:State):
19
+ latest_human_message = next(
20
+ (msg for msg in reversed(state['messages']) if isinstance(msg, HumanMessage)),
21
+ None
22
+ )
23
+ response = self.client.models.generate_content(
24
+ model=self.model_name,
25
+
26
+ contents=[
27
+ types.Part.from_bytes(data=state['image'], mime_type="image/jpeg"),
28
+ prompt(latest_human_message)
29
+ ]
30
+ )
31
+ print('The prompt is:', prompt(latest_human_message))
32
+ return {
33
+ 'messages':[{'role': 'assistant', 'content': response.text}],
34
+ 'suggestions': response.text
35
+ }
36
+
37
+
38
+
39
+ class Graph:
40
+ def __init__(self):
41
+ self.memory = MemorySaver()
42
+
43
+ def run(self):
44
+ workflow = StateGraph(State)
45
+ workflow.add_node('generate_pickups', Generate().run)
46
+ workflow.add_edge(START,'generate_pickups')
47
+ workflow.add_edge('generate_pickups',END)
48
+ return workflow.compile(checkpointer=self.memory)
src/looks_analyzer/prompts.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def prompt(user_query: str = None) -> str:
2
+ base_prompt = '''
3
+ Analyze the person in this image and evaluate their overall appearance, style, and presentation.
4
+ Provide constructive suggestions to enhance their looks, grooming, and fashion.
5
+ Focus on actionable, practical, and personalized recommendations.
6
+ Do not add unrelated details or opinions.
7
+ Return concise, clear advice only shortly in 40 words only.
8
+ '''
9
+
10
+ if user_query:
11
+ return base_prompt + f'\nAlso, take into account the user’s request: "{user_query}".'
12
+ else:
13
+ return base_prompt
14
+
src/looks_analyzer/state.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional , TypedDict , Annotated
2
+ from langgraph.graph.message import add_messages
3
+
4
+
5
+ class State(TypedDict):
6
+ image: bytes
7
+ messages: Annotated[list, add_messages]
8
+ suggestions: str
src/pickup_line_generator/__init__.py ADDED
File without changes
src/pickup_line_generator/generate.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.utils.models_loader import reply_llm
2
+ from .prompts import pickup_line_prompt
3
+ from .schemas import PickupFormatter
4
+ from .state import State
5
+ from langchain_core.messages import SystemMessage, HumanMessage, FunctionMessage
6
+ from langgraph.graph import StateGraph, START, END
7
+ from langgraph.checkpoint.memory import MemorySaver
8
+
9
+ class Generate:
10
+ def __init__(self):
11
+ self.llm = reply_llm
12
+
13
+ def run(self, state:State):
14
+ messages = [SystemMessage(content=pickup_line_prompt),
15
+ FunctionMessage(name='attrubutes',content=f'''The attributes of person are: {state['attributes']}'''),
16
+ HumanMessage(content=f'''The required tone: {state["tones"]}''')]
17
+ pickup_lines = reply_llm.with_structured_output(PickupFormatter).invoke(messages)
18
+ return {
19
+ 'messages':[{'role': 'assistant', 'content': str(pickup_lines.model_dump())}],
20
+ 'pickup_lines': pickup_lines.model_dump()
21
+ }
22
+
23
+
24
+ class Graph:
25
+ def __init__(self):
26
+ self.memory = MemorySaver()
27
+
28
+ def run(self):
29
+ workflow = StateGraph(State)
30
+ workflow.add_node('generate_pickups', Generate().run)
31
+ workflow.add_edge(START,'generate_pickups')
32
+ workflow.add_edge('generate_pickups',END)
33
+ return workflow.compile(checkpointer=self.memory)
34
+
35
+
src/pickup_line_generator/prompts.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pickup_line_prompt ='''
2
+ You are a smart conversation coach.
3
+ You will be given:
4
+ 1. The recent conversation between two people (context).
5
+ 2. The attributes of the target person (e.g., ambitious, classy, fitness freak, spiritual, funny).
6
+
7
+ Your task is to generate 5 highly personalized pickup lines or impressive responses that can attract or influence the target person.
8
+ Adapt the style of the lines to match both the context and the person’s attributes.
9
+ They can be romantic, professional, or casual depending on the context.
10
+ Each line should be unique, concise, and impactful.
11
+ Return exactly 5 lines, numbered 1 to 5, without extra commentary.
12
+ '''
src/pickup_line_generator/schemas.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+ class PickupFormatter(BaseModel):
4
+ pickup_line1: str
5
+ pickup_line2: str
6
+ pickup_line3: str
7
+ pickup_line4: str
src/pickup_line_generator/state.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional , TypedDict , Annotated
2
+ from langgraph.graph.message import add_messages
3
+
4
+
5
+ class State(TypedDict):
6
+ messages: Annotated[list, add_messages]
7
+ conversation_chat: str
8
+ tones: list
9
+ attributes:list
10
+ pickup_lines:dict
src/reply_generator/__init__.py ADDED
File without changes
src/reply_generator/generate.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.utils.models_loader import reply_llm
2
+ from .prompts import prompt
3
+ from .schemas import ReplyFormatter
4
+ from .state import State
5
+ from langchain_core.messages import SystemMessage, HumanMessage, FunctionMessage
6
+ from langgraph.graph import StateGraph, START, END
7
+ from langgraph.checkpoint.memory import MemorySaver
8
+
9
+ class Generate:
10
+ def __init__(self):
11
+ self.llm = reply_llm
12
+
13
+ def run(self, state:State):
14
+ messages = [SystemMessage(content=prompt),
15
+ FunctionMessage(name='conversation-context',content=state['conversation_chat']),
16
+ HumanMessage(content=f'''The required tone: {state["tones"]}''')]
17
+ reply = reply_llm.with_structured_output(ReplyFormatter).invoke(messages)
18
+ print('The reply is:',reply)
19
+ print(reply.model_dump())
20
+
21
+ return {
22
+ 'messages':[{'role': 'assistant', 'content': str(reply.model_dump())}],
23
+ 'replies': reply.model_dump()
24
+ }
25
+
26
+
27
+ class Graph:
28
+ def __init__(self):
29
+ self.memory = MemorySaver()
30
+
31
+ def run(self):
32
+ workflow = StateGraph(State)
33
+ workflow.add_node('generate_reply', Generate().run)
34
+ workflow.add_edge(START,'generate_reply')
35
+ workflow.add_edge('generate_reply',END)
36
+ return workflow.compile(checkpointer=self.memory)
37
+
38
+
src/reply_generator/prompts.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ prompt = '''
2
+ "You are a conversation assistant. Read the dialogue between two people. "
3
+ "Suggest the best possible 5 replies for the next message, considering context and relationship. "
4
+ "Your reply should be natural, concise, and impressive. "
5
+ "If it’s romantic, be charming but respectful. "
6
+ "If it’s professional, be polite, confident, and value-adding. "
7
+ "If it’s casual, be friendly and engaging. "
8
+ "The tone to response will be provided by the human themselves"
9
+ "Return only the suggested replies text, nothing else."
10
+ '''
src/reply_generator/schemas.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+ class ReplyFormatter(BaseModel):
4
+ reply1: str
5
+ reply2: str
6
+ reply3: str
7
+ reply4: str
src/reply_generator/state.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional , TypedDict , Annotated
2
+ from langgraph.graph.message import add_messages
3
+
4
+
5
+ class State(TypedDict):
6
+ messages: Annotated[list, add_messages]
7
+ conversation_chat: str
8
+ tones: list
9
+ replies:dict
src/utils/__init__.py ADDED
File without changes
src/utils/models_loader.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from langchain_groq import ChatGroq
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ load_dotenv()
6
+ os.environ['GROQ_API_KEY']=os.getenv('GROQ_API_KEY')
7
+ reply_llm = ChatGroq(model='llama-3.1-8b-instant')
8
+ ocr_llm = "gemini-2.5-flash"
streamlit/app.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import streamlit as st
2
+ st.set_page_config(page_title="REVEL-RIZZ", layout="centered")
streamlit/pages/check_looks.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import base64
4
+
5
+ # FastAPI endpoint
6
+ API_URL = "http://localhost:8000/api/looks-analyzer" # Change if deployed elsewhere
7
+
8
+ st.set_page_config(page_title="Looks Analyzer", page_icon="πŸ§‘β€πŸŽ¨", layout="centered")
9
+
10
+ st.title("πŸ§‘β€πŸŽ¨ Looks Analyzer")
11
+ st.write("Upload your image and get style & looks suggestions!")
12
+
13
+ # Upload image
14
+ uploaded_file = st.file_uploader("Upload an image (JPG/PNG)", type=["jpg", "jpeg", "png"])
15
+
16
+ # User query input
17
+ user_query = st.text_area("Enter your query (optional)", placeholder="e.g., How do I look in this outfit?")
18
+
19
+ # Button to trigger API
20
+ if st.button("Analyze Looks"):
21
+ if uploaded_file is None:
22
+ st.error("⚠️ Please upload an image.")
23
+ else:
24
+ try:
25
+ # Convert image to base64
26
+ image_bytes = uploaded_file.read()
27
+ image_base64 = base64.b64encode(image_bytes).decode("utf-8")
28
+
29
+ payload = {
30
+ "image_base64": image_base64,
31
+ "user_query": user_query if user_query.strip() else None,
32
+ }
33
+
34
+ with st.spinner("✨ Analyzing your looks..."):
35
+ response = requests.post(API_URL, json=payload)
36
+
37
+ if response.status_code == 200:
38
+ data = response.json()
39
+ st.success("βœ… Analysis complete!")
40
+ st.write("### Suggestions:")
41
+ st.json(data)
42
+
43
+ else:
44
+ st.error(f"❌ Error: {response.status_code} - {response.text}")
45
+
46
+ except Exception as e:
47
+ st.error(f"🚨 An error occurred: {str(e)}")
streamlit/pages/pickup_line_generator.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+
4
+ # FastAPI endpoint
5
+ API_URL = "http://localhost:8000/api/pickup-line-generator" # Change if deployed elsewhere
6
+
7
+ st.set_page_config(page_title="Pickup Line Generator", page_icon="πŸ’˜", layout="centered")
8
+
9
+ st.title("πŸ’˜ Pickup Line Generator")
10
+ st.write("Generate fun and creative pickup lines with customizable tones and attributes.")
11
+
12
+ # User query input
13
+ user_query = st.text_area("Your some Qureies? (optional)", placeholder="e.g., I have to impress her by tomorrow.")
14
+
15
+ # Tones (multi-select)
16
+ available_tones = ["romantic", "funny", "cheesy", "flirty", "sarcastic", "friendly"]
17
+ tones = st.multiselect("Select tones (optional)", available_tones)
18
+
19
+ # Attributes (must provide at least one)
20
+ attributes = st.text_area(
21
+ "Enter attributes of that person (comma-separated, required)",
22
+ placeholder="e.g., coffee lover, programmer, bookworm"
23
+ )
24
+
25
+ # Button to trigger API
26
+ if st.button("Generate Pickup Line"):
27
+ if not attributes.strip():
28
+ st.error("⚠️ Please enter at least one attribute.")
29
+ else:
30
+ try:
31
+ # Process attributes into list
32
+ attributes_list = [attr.strip() for attr in attributes.split(",") if attr.strip()]
33
+
34
+ payload = {
35
+ "user_query": user_query if user_query.strip() else None,
36
+ "tones": tones if tones else None,
37
+ "attributes": attributes_list,
38
+ }
39
+
40
+ with st.spinner("πŸ’« Generating pickup lines..."):
41
+ response = requests.post(API_URL, json=payload)
42
+
43
+ if response.status_code == 200:
44
+ data = response.json()
45
+ st.success("βœ… Pickup lines generated successfully!")
46
+ st.write("### Your Pickup Lines:")
47
+ st.json(response.json())
48
+
49
+ else:
50
+ st.error(f"❌ Error: {response.status_code} - {response.text}")
51
+
52
+ except Exception as e:
53
+ st.error(f"🚨 An error occurred: {str(e)}")
streamlit/pages/reply_generator.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import base64
4
+
5
+ # FastAPI endpoint
6
+ API_URL = "http://localhost:8000/api/reply-generator" # Change if deployed elsewhere
7
+
8
+ st.set_page_config(page_title="Reply Generator", page_icon="πŸ’¬", layout="centered")
9
+
10
+ st.title("πŸ’¬ Reply Generator")
11
+ st.write("Upload an image, ask a question, and get AI-powered replies.")
12
+
13
+ # Upload image
14
+ uploaded_file = st.file_uploader("Upload an image (JPG/PNG)", type=["jpg", "jpeg", "png"])
15
+
16
+ # User query input
17
+ user_query = st.text_area("Enter your query", placeholder="Type your question here...")
18
+
19
+ # Tones (multi-select)
20
+ available_tones = ["formal", "casual", "friendly", "professional", "humorous"]
21
+ tones = st.multiselect("Select tones (optional)", available_tones)
22
+
23
+ # Button to trigger API
24
+ if st.button("Generate Reply"):
25
+ if uploaded_file is None:
26
+ st.error("⚠️ Please upload an image first.")
27
+ elif not user_query.strip():
28
+ st.error("⚠️ Please enter a query.")
29
+ else:
30
+ try:
31
+ # Convert image to base64
32
+ image_bytes = uploaded_file.read()
33
+ image_base64 = base64.b64encode(image_bytes).decode("utf-8")
34
+
35
+ payload = {
36
+ "image_base64": image_base64,
37
+ "user_query": user_query,
38
+ "tones": tones if tones else None,
39
+ }
40
+
41
+ with st.spinner("⏳ Generating reply..."):
42
+ response = requests.post(API_URL, json=payload)
43
+
44
+ if response.status_code == 200:
45
+ data = response.json()
46
+ st.success("βœ… Reply generated successfully!")
47
+ st.write("### Response:")
48
+ st.json(response.json())
49
+
50
+ else:
51
+ st.error(f"❌ Error: {response.status_code} - {response.text}")
52
+
53
+ except Exception as e:
54
+ st.error(f"🚨 An error occurred: {str(e)}")