import json import base64 from typing import List, Optional from fastapi import HTTPException, UploadFile, File, Form from fastapi.responses import StreamingResponse from models import ChatRequest, ChatResponse, WardrobeItem, TextRequest from query_processing import ( extract_clothing_info, extract_colors_from_query, detect_query_type, get_color_matches, is_greeting, is_name_question ) from conversation import ( get_conversation_context, enhance_message_with_context, update_context ) from model_manager import generate_chat_response, generate_chat_response_streaming from wardrobe import handle_wardrobe_chat from rag import retrieve_relevant_context, format_rag_context from config import COLOR_HARMONY from model_manager import style_model def setup_routes(app): @app.get("/") async def root(): return { "message": "Style GPT API - Milestone 1", "version": "1.0.0", "endpoints": { "/text": "POST - Text-only chat", "/chat": "POST - Chat with optional images", "/chat/upload": "POST - Chat with file upload", "/chat/upload/stream": "POST - Streaming chat with file upload", "/health": "GET - Health check" } } @app.get("/health") async def health_check(): return { "status": "healthy" if style_model is not None else "loading", "model_loaded": style_model is not None, "model_name": "Qwen/Qwen2.5-VL-7B-Instruct" } @app.post("/text", response_model=ChatResponse) async def text_only(request: TextRequest): try: message = request.message.strip() session_id = request.session_id if not message: raise HTTPException(status_code=400, detail="Message cannot be empty") conv_context = get_conversation_context(session_id) if is_name_question(message): prompt = "What is your name? Respond naturally and friendly." rag_chunks = retrieve_relevant_context(message, top_k=2) rag_context = format_rag_context(rag_chunks) response_text = generate_chat_response(prompt, max_length=100, temperature=0.8, rag_context=rag_context, images=None) update_context(session_id, message, {"response": response_text}) return ChatResponse(response=response_text, session_id=session_id) if is_greeting(message): prompt = f"{message} Respond warmly and offer to help with fashion advice." rag_chunks = retrieve_relevant_context(message, top_k=2) rag_context = format_rag_context(rag_chunks) response_text = generate_chat_response(prompt, max_length=150, temperature=0.8, rag_context=rag_context, images=None) update_context(session_id, message, {"response": response_text}) return ChatResponse(response=response_text, session_id=session_id) enhanced_message = enhance_message_with_context(message, conv_context["context"]) query_type = detect_query_type(enhanced_message) rag_chunks = retrieve_relevant_context(enhanced_message, top_k=3) rag_context = format_rag_context(rag_chunks) if query_type == "color_compatibility": found_colors = extract_colors_from_query(enhanced_message) if len(found_colors) >= 2: color1_mapped = found_colors[0][1] color2_mapped = found_colors[1][1] color1_original = found_colors[0][0] color2_original = found_colors[1][0] compatible = False if color1_mapped in COLOR_HARMONY: compatible = color2_mapped in COLOR_HARMONY[color1_mapped] elif color2_mapped in COLOR_HARMONY: compatible = color1_mapped in COLOR_HARMONY[color2_mapped] neutrals = ["white", "black", "grey", "gray", "beige", "navy"] if color1_mapped in neutrals or color2_mapped in neutrals: compatible = True if compatible: response_text = f"Yes, {color1_original.title()} will go well with {color2_original.title()}. They create a balanced and stylish combination that works great together!" else: response_text = f"{color1_original.title()} and {color2_original.title()} can work together, though you might want to add some neutral pieces to balance the look." prompt = f"Does {color1_original} go well with {color2_original}? Answer naturally and conversationally." ai_response = generate_chat_response(prompt, max_length=150, temperature=0.8, rag_context=rag_context, images=None) if len(ai_response) > 15: response_text = ai_response update_context(session_id, message, { "response": response_text, "color": color1_original, "colors": [color1_original, color2_original] }) return ChatResponse( response=response_text, session_id=session_id ) elif query_type == "color_suggestion": clothing_info = extract_clothing_info(enhanced_message) base_color = clothing_info.get("color") if not base_color: found_colors = extract_colors_from_query(enhanced_message) if found_colors: base_color = found_colors[0][1] elif conv_context["context"].get("last_color"): base_color = conv_context["context"]["last_color"] if not base_color: return ChatResponse( response="I'd love to help you with colors! Could you tell me which color you're working with? For example, 'what colors go with red?'", session_id=session_id ) matching_colors = get_color_matches(base_color) clothing_item = clothing_info.get("existing_item") or clothing_info.get("type") or conv_context["context"].get("last_item", "outfit") suggested_colors = [c.title() for c in matching_colors[:4]] message_lower_for_style = message.lower() style_keywords = [] if "stylish" in message_lower_for_style or "standout" in message_lower_for_style or "stand out" in message_lower_for_style: style_keywords.append("stylish and eye-catching") if "professional" in message_lower_for_style or "formal" in message_lower_for_style: style_keywords.append("professional") if "casual" in message_lower_for_style: style_keywords.append("casual") style_note = "" if style_keywords: style_note = f" The user wants something {', '.join(style_keywords)}." prompt = f"What colors go well with {base_color} {clothing_item}?{style_note} Give me a natural, conversational answer with specific color suggestions." ai_response = generate_chat_response(prompt, max_length=300, temperature=0.8, rag_context=rag_context, images=None) if len(ai_response) > 30: response_text = ai_response else: response_text = f"For your {base_color} {clothing_item}, I'd suggest pairing it with {', '.join(suggested_colors[:3])}, or {suggested_colors[3] if len(suggested_colors) > 3 else 'other neutrals'}. These colors complement each other beautifully!" update_context(session_id, message, { "response": response_text, "color": base_color, "item": clothing_item, "colors": suggested_colors }) return ChatResponse( response=response_text, session_id=session_id ) else: clothing_info = extract_clothing_info(enhanced_message) if not clothing_info.get("color") and conv_context["context"].get("last_color"): enhanced_message = f"{enhanced_message} {conv_context['context']['last_color']}" clothing_info = extract_clothing_info(enhanced_message) context_info = "" if clothing_info.get("color"): context_info += f"Color preference: {clothing_info.get('color')}. " if clothing_info.get("type"): context_info += f"Item type: {clothing_info.get('type')}. " if clothing_info.get("existing_item"): context_info += f"User has: {clothing_info.get('existing_item')}. " occasion_keywords = ["defense", "project", "presentation", "meeting", "interview", "formal", "casual", "party", "wedding"] occasion = next((word for word in occasion_keywords if word in enhanced_message.lower()), None) if occasion: context_info += f"Occasion: {occasion}. " prompt = f"{enhanced_message}" if context_info: prompt += f"\n\nContext: {context_info.strip()}" prompt += "\n\nGive helpful, detailed outfit suggestions that are practical and stylish. Be specific about item combinations and explain why they work well." response_text = generate_chat_response(prompt, max_length=1024, temperature=0.8, rag_context=rag_context, images=None) update_context(session_id, message, { "response": response_text, "color": clothing_info.get("color"), "item": clothing_info.get("type") or clothing_info.get("requested_item"), "items": clothing_info.get("items", []) }) return ChatResponse( response=response_text, session_id=session_id ) except Exception as e: raise HTTPException(status_code=500, detail=f"Error processing text message: {str(e)}") @app.post("/chat", response_model=ChatResponse) async def chat(request: ChatRequest): try: message = request.message.strip() session_id = request.session_id if not message: raise HTTPException(status_code=400, detail="Message cannot be empty") if request.wardrobe and len(request.wardrobe) > 0: print(f"[WARDROBE CHAT] ===== WARDROBE REQUEST DETECTED =====") if request.wardrobe_description: print(f"[WARDROBE CHAT] Using provided wardrobe description ({len(request.wardrobe_description)} chars)") return await handle_wardrobe_chat(message, request.wardrobe, session_id, images=request.images, wardrobe_description=request.wardrobe_description) conv_context = get_conversation_context(session_id) if is_name_question(message): prompt = "What is your name? Respond naturally and friendly." rag_chunks = retrieve_relevant_context(message, top_k=2) rag_context = format_rag_context(rag_chunks) response_text = generate_chat_response(prompt, max_length=100, temperature=0.8, rag_context=rag_context, images=request.images) update_context(session_id, message, {"response": response_text}) return ChatResponse(response=response_text, session_id=session_id) if is_greeting(message): prompt = f"{message} Respond warmly and offer to help with fashion advice." rag_chunks = retrieve_relevant_context(message, top_k=2) rag_context = format_rag_context(rag_chunks) response_text = generate_chat_response(prompt, max_length=150, temperature=0.8, rag_context=rag_context, images=request.images) update_context(session_id, message, {"response": response_text}) return ChatResponse(response=response_text, session_id=session_id) enhanced_message = enhance_message_with_context(message, conv_context["context"]) query_type = detect_query_type(enhanced_message) rag_chunks = retrieve_relevant_context(enhanced_message, top_k=3) rag_context = format_rag_context(rag_chunks) if query_type == "color_compatibility": found_colors = extract_colors_from_query(enhanced_message) if len(found_colors) >= 2: color1_mapped = found_colors[0][1] color2_mapped = found_colors[1][1] color1_original = found_colors[0][0] color2_original = found_colors[1][0] compatible = False if color1_mapped in COLOR_HARMONY: compatible = color2_mapped in COLOR_HARMONY[color1_mapped] elif color2_mapped in COLOR_HARMONY: compatible = color1_mapped in COLOR_HARMONY[color2_mapped] neutrals = ["white", "black", "grey", "gray", "beige", "navy"] if color1_mapped in neutrals or color2_mapped in neutrals: compatible = True if compatible: response_text = f"Yes, {color1_original.title()} will go well with {color2_original.title()}. They create a balanced and stylish combination that works great together!" else: response_text = f"{color1_original.title()} and {color2_original.title()} can work together, though you might want to add some neutral pieces to balance the look." prompt = f"Does {color1_original} go well with {color2_original}? Answer naturally and conversationally." ai_response = generate_chat_response(prompt, max_length=150, temperature=0.8, rag_context=rag_context, images=request.images) if len(ai_response) > 15: response_text = ai_response update_context(session_id, message, { "response": response_text, "color": color1_original, "colors": [color1_original, color2_original] }) return ChatResponse( response=response_text, session_id=session_id ) elif query_type == "color_suggestion": clothing_info = extract_clothing_info(enhanced_message) base_color = clothing_info.get("color") if not base_color: found_colors = extract_colors_from_query(enhanced_message) if found_colors: base_color = found_colors[0][1] elif conv_context["context"].get("last_color"): base_color = conv_context["context"]["last_color"] if not base_color: return ChatResponse( response="I'd love to help you with colors! Could you tell me which color you're working with? For example, 'what colors go with red?'", session_id=session_id ) matching_colors = get_color_matches(base_color) clothing_item = clothing_info.get("existing_item") or clothing_info.get("type") or conv_context["context"].get("last_item", "outfit") suggested_colors = [c.title() for c in matching_colors[:4]] message_lower_for_style = message.lower() style_keywords = [] if "stylish" in message_lower_for_style or "standout" in message_lower_for_style or "stand out" in message_lower_for_style: style_keywords.append("stylish and eye-catching") if "professional" in message_lower_for_style or "formal" in message_lower_for_style: style_keywords.append("professional") if "casual" in message_lower_for_style: style_keywords.append("casual") style_note = "" if style_keywords: style_note = f" The user wants something {', '.join(style_keywords)}." prompt = f"What colors go well with {base_color} {clothing_item}?{style_note} Give me a natural, conversational answer with specific color suggestions." ai_response = generate_chat_response(prompt, max_length=300, temperature=0.8, rag_context=rag_context, images=request.images) if len(ai_response) > 30: response_text = ai_response else: response_text = f"For your {base_color} {clothing_item}, I'd suggest pairing it with {', '.join(suggested_colors[:3])}, or {suggested_colors[3] if len(suggested_colors) > 3 else 'other neutrals'}. These colors complement each other beautifully!" update_context(session_id, message, { "response": response_text, "color": base_color, "item": clothing_item, "colors": suggested_colors }) return ChatResponse( response=response_text, session_id=session_id ) else: clothing_info = extract_clothing_info(enhanced_message) if not clothing_info.get("color") and conv_context["context"].get("last_color"): enhanced_message = f"{enhanced_message} {conv_context['context']['last_color']}" clothing_info = extract_clothing_info(enhanced_message) context_info = "" if clothing_info.get("color"): context_info += f"Color preference: {clothing_info.get('color')}. " if clothing_info.get("type"): context_info += f"Item type: {clothing_info.get('type')}. " if clothing_info.get("existing_item"): context_info += f"User has: {clothing_info.get('existing_item')}. " occasion_keywords = ["defense", "project", "presentation", "meeting", "interview", "formal", "casual", "party", "wedding"] occasion = next((word for word in occasion_keywords if word in enhanced_message.lower()), None) if occasion: context_info += f"Occasion: {occasion}. " prompt = f"{enhanced_message}" if context_info: prompt += f"\n\nContext: {context_info.strip()}" prompt += "\n\nGive helpful, detailed outfit suggestions that are practical and stylish. Be specific about item combinations and explain why they work well." response_text = generate_chat_response(prompt, max_length=1024, temperature=0.8, rag_context=rag_context, images=request.images) update_context(session_id, message, { "response": response_text, "color": clothing_info.get("color"), "item": clothing_info.get("type") or clothing_info.get("requested_item"), "items": clothing_info.get("items", []) }) return ChatResponse( response=response_text, session_id=session_id ) except Exception as e: raise HTTPException(status_code=500, detail=f"Error processing chat message: {str(e)}") @app.post("/chat/upload", response_model=ChatResponse) async def chat_with_upload( message: str = Form(...), session_id: str = Form(default="default"), wardrobe: Optional[str] = Form(default=None), wardrobe_description: Optional[str] = Form(default=None), images: List[UploadFile] = File(default=[]) ): try: wardrobe_items = [] if wardrobe and wardrobe.strip() and wardrobe.strip() not in ["[]", "", "string"]: try: wardrobe_data = json.loads(wardrobe) if isinstance(wardrobe_data, list): wardrobe_items = [WardrobeItem(**item) for item in wardrobe_data] except json.JSONDecodeError: print(f"[UPLOAD] Ignoring invalid wardrobe value: {wardrobe[:50]}") image_data_urls = [] for img_file in images: if img_file.filename: content = await img_file.read() content_type = img_file.content_type or "image/jpeg" base64_data = base64.b64encode(content).decode("utf-8") data_url = f"data:{content_type};base64,{base64_data}" image_data_urls.append(data_url) print(f"[UPLOAD] Processed image: {img_file.filename} ({len(content)} bytes)") request = ChatRequest( message=message, session_id=session_id, wardrobe=wardrobe_items if wardrobe_items else None, wardrobe_description=wardrobe_description if wardrobe_description and wardrobe_description.strip() else None, images=image_data_urls if image_data_urls else None ) print(f"[UPLOAD] Processing chat request: message='{message[:50]}...', images={len(image_data_urls)}, wardrobe={len(wardrobe_items)}") result = await chat(request) print(f"[UPLOAD] Response generated: {len(result.response)} chars") return result except Exception as e: print(f"[UPLOAD] Error: {e}") raise HTTPException(status_code=500, detail=f"Error processing upload: {str(e)}") @app.post("/chat/upload/stream") async def chat_with_upload_stream( message: str = Form(...), session_id: str = Form(default="default"), wardrobe: Optional[str] = Form(default=None), wardrobe_description: Optional[str] = Form(default=None), images: List[UploadFile] = File(default=[]) ): image_data_urls = [] for img_file in images: if img_file.filename: content = await img_file.read() content_type = img_file.content_type or "image/jpeg" base64_data = base64.b64encode(content).decode("utf-8") data_url = f"data:{content_type};base64,{base64_data}" image_data_urls.append(data_url) print(f"[STREAM UPLOAD] Processed image: {img_file.filename} ({len(content)} bytes)") wardrobe_items = [] if wardrobe and wardrobe.strip() and wardrobe.strip() not in ["[]", "", "string"]: try: wardrobe_data = json.loads(wardrobe) if isinstance(wardrobe_data, list): wardrobe_items = [WardrobeItem(**item) for item in wardrobe_data] except json.JSONDecodeError: print(f"[STREAM UPLOAD] Ignoring invalid wardrobe value: {wardrobe[:50]}") rag_chunks = retrieve_relevant_context(message, top_k=3) rag_context = format_rag_context(rag_chunks) wardrobe_context = "" if wardrobe_description and wardrobe_description.strip(): wardrobe_context = wardrobe_description print(f"[STREAM UPLOAD] Using provided wardrobe description ({len(wardrobe_context)} chars)") elif wardrobe_items: from wardrobe import format_wardrobe_for_prompt wardrobe_context = format_wardrobe_for_prompt(wardrobe_items) print(f"[STREAM UPLOAD] Generated wardrobe context ({len(wardrobe_context)} chars)") if wardrobe_context: prompt = f"""{wardrobe_context} User request: {message} Suggest a complete outfit using ONLY the items listed above. Reference items by their exact names. Include accessories if available. Be friendly and conversational.""" else: prompt = message print(f"[STREAM UPLOAD] Starting streaming response for: {message[:50]}...") async def generate(): yield f"data: {json.dumps({'type': 'start', 'session_id': session_id})}\n\n" full_response = "" async for chunk in generate_chat_response_streaming( prompt=prompt, max_length=512, temperature=0.7, rag_context=rag_context, images=image_data_urls if image_data_urls else None ): full_response += chunk yield f"data: {json.dumps({'type': 'chunk', 'content': chunk})}\n\n" yield f"data: {json.dumps({'type': 'end', 'full_response': full_response, 'session_id': session_id})}\n\n" print(f"[STREAM UPLOAD] Streaming complete: {len(full_response)} chars") return StreamingResponse( generate(), media_type="text/event-stream", headers={ "Cache-Control": "no-cache", "Connection": "keep-alive", "X-Accel-Buffering": "no", } )