hamxaameer's picture
Update app.py
7eb2f2d verified
raw
history blame
39.4 kB
"""
Fashion Advisor RAG - Hugging Face Deployment
Complete RAG system with FAISS vector store and local LLM
"""
import gradio as gr
import logging
import os
from pathlib import Path
from typing import List, Tuple, Dict, Optional
import pickle
# Core ML libraries
import torch
from transformers import pipeline
from sentence_transformers import SentenceTransformer
import requests
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Optimize PyTorch for CPU inference
torch.set_num_threads(4) # Limit threads for better CPU performance
torch.set_grad_enabled(False) # Disable gradients (inference only)
# Suppress specific warnings
import warnings
warnings.filterwarnings("ignore", message="MatMul8bitLt")
warnings.filterwarnings("ignore", message="torch_dtype")
# ============================================================================
# CONFIGURATION
# ============================================================================
CONFIG = {
"embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
"llm_model": None,
"vector_store_path": ".",
"top_k": 8, # Minimal retrieval for speed
"temperature": 0.85, # Higher for faster sampling
"max_tokens": 280, # Aggressive reduction
}
# Local PHI model configuration for Hugging Face Spaces
# PHI-2 is optimal for CPU deployment: 2.7B parameters, excellent quality
# Can be swapped with Phi-3-mini-4k-instruct if more memory is available
LOCAL_PHI_MODEL = os.environ.get("LOCAL_PHI_MODEL", "microsoft/phi-2")
USE_8BIT_QUANTIZATION = True # Reduces memory usage by ~50%
USE_REMOTE_LLM = False
# Advanced optimization settings for FAST generation
MAX_CONTEXT_LENGTH = 500 # Minimal context for speed
TARGET_ANSWER_WORDS = 220 # Shorter answers = faster generation
USE_CACHING = True # Cache model outputs for repeated patterns
ENABLE_FAST_MODE = True # Skip iterative generation, use single-shot only
# Prefer the environment variable, but also allow a local token file for users
# who don't know how to set env vars. Create a file named `hf_token.txt` in the
# project root containing only the token (no newline is necessary). DO NOT
# commit that file to version control. A .gitignore entry will be added.
HF_INFERENCE_API_KEY = os.environ.get("HF_INFERENCE_API_KEY")
if not HF_INFERENCE_API_KEY:
try:
token_path = Path("hf_token.txt")
if token_path.exists():
HF_INFERENCE_API_KEY = token_path.read_text(encoding="utf-8").strip()
logger.info("Loaded HF token from hf_token.txt (ensure this file is private and not committed)")
except Exception:
logger.warning("Could not read hf_token.txt for HF token")
if HF_INFERENCE_API_KEY:
USE_REMOTE_LLM = True
# ============================================================================
# INITIALIZE MODELS
# ============================================================================
def initialize_llm():
"""Initialize PHI model locally with CPU optimizations for Hugging Face Spaces.
Uses efficient techniques:
- 8-bit quantization to reduce memory by ~50%
- CPU-optimized loading with device_map
- Lazy loading and minimal memory footprint
"""
global LOCAL_PHI_MODEL, USE_8BIT_QUANTIZATION
logger.info(f"πŸ”„ Initializing local PHI model: {LOCAL_PHI_MODEL}")
logger.info(" Using CPU-optimized configuration for Hugging Face Spaces")
try:
from transformers import AutoTokenizer, AutoModelForCausalLM
# Check if we have GPU (unlikely on free Spaces, but check anyway)
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f" Target device: {device}")
# Load tokenizer (lightweight)
logger.info(" Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(
LOCAL_PHI_MODEL,
trust_remote_code=True,
use_fast=True
)
# Set padding token if not present (PHI models need this)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Configure model loading for CPU efficiency
model_kwargs = {
"trust_remote_code": True,
"low_cpu_mem_usage": True,
"torch_dtype": torch.float32, # CPU works best with float32
}
# Try to use 8-bit quantization if available (requires bitsandbytes)
if USE_8BIT_QUANTIZATION and device == "cpu":
try:
logger.info(" Attempting 8-bit quantization for memory efficiency...")
model_kwargs["load_in_8bit"] = True
except Exception as quant_error:
logger.warning(f" 8-bit quantization unavailable: {quant_error}")
logger.info(" Falling back to float32 (will use more memory)")
# Load the model with optimization
logger.info(" Loading PHI model (this may take 30-60 seconds)...")
model = AutoModelForCausalLM.from_pretrained(
LOCAL_PHI_MODEL,
**model_kwargs
)
# Apply advanced optimizations for faster inference
if hasattr(model, 'config'):
# Reduce attention heads computation for speed
model.config.use_cache = True # Enable KV cache for faster generation
model.config.output_attentions = False
model.config.output_hidden_states = False
# Move to eval mode to disable dropout and save memory
model.eval()
# Advanced: Try to optimize with torch.compile (PyTorch 2.0+)
try:
if hasattr(torch, 'compile') and not USE_8BIT_QUANTIZATION:
logger.info(" Applying torch.compile for faster inference...")
model = torch.compile(model, mode="reduce-overhead")
except Exception as compile_error:
logger.info(f" Torch compile not available or failed: {compile_error}")
# Create pipeline for generation
# NOTE: When using accelerate/quantization, do NOT specify device parameter
logger.info(" Creating text-generation pipeline...")
llm_client = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=280, # Default optimized value
pad_token_id=tokenizer.eos_token_id,
batch_size=1 # Single batch for optimal CPU performance
)
CONFIG["llm_model"] = LOCAL_PHI_MODEL
CONFIG["model_type"] = "phi_local"
logger.info(f"βœ… PHI model initialized successfully: {LOCAL_PHI_MODEL}")
logger.info(f" Model size: ~2.7B parameters (PHI-2) or ~3.8B (PHI-3)")
logger.info(f" Memory optimization: {'8-bit quantization' if USE_8BIT_QUANTIZATION else 'float32'}")
return llm_client
except ImportError as ie:
logger.error(f"❌ Missing required library: {ie}")
logger.info(" Install with: pip install transformers accelerate bitsandbytes")
raise
except Exception as e:
logger.error(f"❌ Failed to load PHI model: {str(e)}")
logger.info(" This may be due to insufficient memory on the Space")
logger.info(" Try using a smaller model or enabling 8-bit quantization")
raise Exception(f"Failed to initialize PHI LLM: {str(e)}")
def remote_generate(prompt: str, max_new_tokens: int = 512, temperature: float = 0.7, top_p: float = 0.9) -> str:
"""Call the Hugging Face Inference API for remote generation. Requires
`HF_INFERENCE_API_KEY` env var to be set and a model name in
`REMOTE_LLM_MODEL`.
PHI models work best with clear instruction formatting. This function
handles both the standard HF Inference API and PHI-specific response parsing.
"""
if not HF_INFERENCE_API_KEY:
raise Exception("HF_INFERENCE_API_KEY not set for remote generation")
# Use the HF Inference API endpoint (not router for better PHI compatibility)
api_url = f"https://api-inference.huggingface.co/models/{REMOTE_LLM_MODEL}"
headers = {"Authorization": f"Bearer {HF_INFERENCE_API_KEY}"}
# PHI models prefer simple parameters; avoid return_full_text which can cause issues
payload = {
"inputs": prompt,
"parameters": {
"max_new_tokens": max_new_tokens,
"temperature": temperature,
"top_p": top_p,
"do_sample": True,
"repetition_penalty": 1.1
}
}
logger.info(f" β†’ Remote PHI inference to {REMOTE_LLM_MODEL} (tokens={max_new_tokens}, temp={temperature})")
try:
r = requests.post(api_url, headers=headers, json=payload, timeout=90)
except Exception as e:
logger.error(f" βœ— Remote request failed: {e}")
return ""
if r.status_code == 503:
logger.warning(f" ⚠️ Model loading (503), retrying in 5s...")
import time
time.sleep(5)
try:
r = requests.post(api_url, headers=headers, json=payload, timeout=90)
except Exception as e:
logger.error(f" βœ— Retry failed: {e}")
return ""
if r.status_code != 200:
logger.error(f" βœ— Remote inference error {r.status_code}: {r.text[:300]}")
return ""
result = r.json()
# Handle error responses
if isinstance(result, dict) and result.get("error"):
logger.error(f" βœ— Remote inference returned error: {result.get('error')}")
return ""
# Parse the generated text from various response formats
generated_text = ""
if isinstance(result, list) and result:
# HF Inference API returns [{"generated_text": "..."}]
first = result[0]
if isinstance(first, dict):
generated_text = first.get("generated_text", "")
else:
generated_text = str(first)
elif isinstance(result, dict) and "generated_text" in result:
generated_text = result["generated_text"]
else:
generated_text = str(result)
# Clean up: PHI may return the prompt + completion, extract only new text
generated_text = generated_text.strip()
# If the response contains the original prompt, extract only the new completion
if prompt in generated_text:
# Find where the prompt ends and new generation begins
prompt_end = generated_text.find(prompt) + len(prompt)
generated_text = generated_text[prompt_end:].strip()
return generated_text
def initialize_embeddings():
logger.info("πŸ”„ Initializing embeddings model...")
embeddings = HuggingFaceEmbeddings(
model_name=CONFIG["embedding_model"],
model_kwargs={'device': 'cpu'},
encode_kwargs={'normalize_embeddings': True}
)
logger.info(f"βœ… Embeddings initialized: {CONFIG['embedding_model']}")
return embeddings
def load_vector_store(embeddings):
logger.info("πŸ”„ Loading FAISS vector store...")
vector_store_path = CONFIG["vector_store_path"]
index_file = os.path.join(vector_store_path, "index.faiss")
pkl_file = os.path.join(vector_store_path, "index.pkl")
if not os.path.exists(index_file):
raise FileNotFoundError(f"FAISS index file not found: {index_file}")
if not os.path.exists(pkl_file):
raise FileNotFoundError(f"FAISS metadata file not found: {pkl_file}")
logger.info(f"βœ… Found index.faiss ({os.path.getsize(index_file)/1024/1024:.2f} MB)")
logger.info(f"βœ… Found index.pkl ({os.path.getsize(pkl_file)/1024:.2f} KB)")
try:
vectorstore = FAISS.load_local(
vector_store_path,
embeddings,
allow_dangerous_deserialization=True
)
logger.info(f"βœ… FAISS vector store loaded successfully")
return vectorstore
except Exception as e:
logger.warning(f"⚠️ Pydantic compatibility issue: {str(e)[:100]}")
logger.info("πŸ”„ Applying Pydantic monkey-patch and retrying...")
try:
import pydantic.v1.main as pydantic_main
original_setstate = pydantic_main.BaseModel.__setstate__
def patched_setstate(self, state):
if '__fields_set__' not in state:
state['__fields_set__'] = set(state.get('__dict__', {}).keys())
return original_setstate(self, state)
pydantic_main.BaseModel.__setstate__ = patched_setstate
logger.info(" βœ… Pydantic monkey-patch applied")
except Exception as patch_error:
logger.warning(f" ⚠️ Pydantic patch failed: {patch_error}")
try:
vectorstore = FAISS.load_local(
vector_store_path,
embeddings,
allow_dangerous_deserialization=True
)
logger.info(f"βœ… FAISS vector store loaded with Pydantic patch")
return vectorstore
except Exception as e2:
logger.error(f" βœ— Still failed after patch: {str(e2)[:100]}")
logger.info("πŸ”„ Using manual reconstruction (last resort)...")
import faiss
from langchain_community.docstore.in_memory import InMemoryDocstore
index = faiss.read_index(index_file)
logger.info(f" βœ… FAISS index loaded")
with open(pkl_file, "rb") as f:
import re
raw_bytes = f.read()
logger.info(f" Read {len(raw_bytes)} bytes from pickle")
text_pattern = rb'([A-Za-z0-9\s\.\,\;\:\!\?\-\'\"\(\)]{50,})'
matches = re.findall(text_pattern, raw_bytes)
if len(matches) > 100:
logger.info(f" Found {len(matches)} potential document fragments")
documents = []
for idx, match in enumerate(matches[:5000]):
try:
content = match.decode('utf-8', errors='ignore').strip()
if len(content) >= 100:
doc = Document(
page_content=content,
metadata={"source": "reconstructed", "id": idx}
)
documents.append(doc)
except:
continue
if len(documents) < 100:
raise Exception(f"Only extracted {len(documents)} documents, need at least 100")
logger.info(f" βœ… Extracted {len(documents)} high-quality documents")
logger.info(f" πŸ”„ Rebuilding FAISS index from scratch...")
vectorstore = FAISS.from_documents(
documents=documents,
embedding=embeddings
)
logger.info(f"βœ… FAISS vector store rebuilt from {len(documents)} documents")
return vectorstore
else:
raise Exception("Could not extract enough document content from pickle")
# ============================================================================
# RAG PIPELINE FUNCTIONS
# ============================================================================
def generate_extractive_answer(query: str, retrieved_docs: List[Document]) -> Optional[str]:
"""Build a long-form answer from retrieved documents using extractive
selection + templated transitions. This avoids calling the LLM when it
repeatedly fails or returns very short outputs.
"""
logger.info(f"πŸ”§ Running extractive fallback for: '{query}'")
# Collect text and split into sentences
import re
all_text = "\n\n".join([d.page_content for d in retrieved_docs])
# Basic sentence split (keeps punctuation)
sentences = re.split(r'(?<=[.!?])\s+', all_text)
sentences = [s.strip() for s in sentences if len(s.strip()) > 30]
if not sentences:
logger.warning(" βœ— No sentences found in retrieved documents for extractive fallback")
return None
# Scoring: keyword overlap with query and fashion terms
query_tokens = set(re.findall(r"\w+", query.lower()))
fashion_keywords = set(["outfit","wear","wardrobe","style","colors","color","layer","layering",
"blazer","trousers","dress","shirt","shoes","boots","sweater","jacket",
"care","wash","dry","clean","wool","cotton","silk","linen","fit","tailor",
"versatile","neutral","accessory","belt","bag","occasion","season","fall"])
keywords = query_tokens.union(fashion_keywords)
scored = []
for s in sentences:
s_tokens = set(re.findall(r"\w+", s.lower()))
score = len(s_tokens & keywords)
# length bonus to prefer richer sentences
score += min(3, len(s.split()) // 20)
scored.append((score, s))
scored.sort(key=lambda x: x[0], reverse=True)
top_sentences = [s for _, s in scored[:60]]
# Build structured sections using top sentences + templates
def pick(n, start=0):
return top_sentences[start:start+n]
intro = []
intro.extend(pick(2, 0))
key_items = pick(8, 2)
styling = pick(8, 10)
care = pick(6, 18)
conclusion = pick(4, 24)
# Add handcrafted, helpful transitions to improve flow
template_intro = f"Here's a detailed answer to '{query}'. I'll cover essential wardrobe items, styling tips, and care advice so you can apply these suggestions practically."
# Ensure care advice includes the user's specific care example if present or add it
care_text = "\n\n".join(care)
if "dry clean" not in care_text.lower() and "hand wash" not in care_text.lower():
care_text += "\n\nDry clean or hand wash in cold water with wool-specific detergent. Never wring out wool - gently squeeze excess water and lay flat to dry on a towel."
parts = []
parts.append(template_intro)
if intro:
parts.append(" ".join(intro))
if key_items:
parts.append("Key wardrobe items to prioritize:")
parts.append(" ".join(key_items))
if styling:
parts.append("Practical styling tips:")
parts.append(" ".join(styling))
if care_text:
parts.append("Care & maintenance:")
parts.append(care_text)
if conclusion:
parts.append("Wrapping up:")
parts.append(" ".join(conclusion))
# Combine and refine spacing
answer = "\n\n".join(parts)
# Post-process: ensure target length (approximately 400-700 words)
words = answer.split()
word_count = len(words)
# If too short, append templated practical paragraphs built from keywords
if word_count < 380:
logger.info(f" β†’ Extractive answer short ({word_count} words). Appending templated paragraphs.")
extra_paragraphs = []
extra_paragraphs.append("A reliable strategy is to build around versatile, neutral pieces: a well-fitted blazer, tailored trousers, a versatile dress, and quality shoes. These items can be mixed and matched for many occasions.")
extra_paragraphs.append("Focus on fit and fabric: ensure key items are well-tailored, prioritize breathable fabrics for comfort, and choose merino or wool blends for colder seasons to layer effectively.")
extra_paragraphs.append("Layering is essential for transitional weather; combine a lightweight sweater under a jacket, and carry a scarf for added warmth and visual interest.")
extra_paragraphs.append("Accessories like belts, a structured bag, and minimal jewelry can elevate basic outfits without extra effort. Neutral colors increase versatility and pair well with bolder accents.")
answer += "\n\n" + "\n\n".join(extra_paragraphs)
words = answer.split()
word_count = len(words)
# If still too long, truncate gracefully
if word_count > 750:
words = words[:700]
answer = " ".join(words) + '...'
word_count = 700
logger.info(f" βœ… Extractive answer ready ({word_count} words)")
return answer
def scaffold_and_polish(query: str, retrieved_docs: List[Document], llm_client) -> Optional[str]:
"""Create a concise scaffold (approx 150-220 words) from retrieved docs,
then ask the remote (or local) LLM to expand and polish it into a
320-420 word expert answer. Returns None if polishing fails.
"""
logger.info(f"πŸ”¨ Building scaffold for polish: '{query}'")
import re
# Reuse sentence extraction logic but stop early for a compact scaffold
all_text = "\n\n".join([d.page_content for d in retrieved_docs[:12]])
sentences = re.split(r'(?<=[.!?])\s+', all_text)
sentences = [s.strip() for s in sentences if len(s.strip()) > 30]
if not sentences:
logger.warning(" βœ— No sentences to build scaffold")
return None
# Score sentences by overlap with query + fashion keywords
query_tokens = set(re.findall(r"\w+", query.lower()))
fashion_keywords = set(["outfit","wear","wardrobe","style","colors","layer","blazer",
"trousers","dress","shoes","sweater","jacket","care","wool","fit",
"tailor","neutral","accessory","season","fall"])
keywords = query_tokens.union(fashion_keywords)
scored = []
for s in sentences:
s_tokens = set(re.findall(r"\w+", s.lower()))
score = len(s_tokens & keywords)
score += min(2, len(s.split()) // 30)
scored.append((score, s))
scored.sort(key=lambda x: x[0], reverse=True)
scaffold_parts = []
word_count = 0
for _, s in scored:
scaffold_parts.append(s)
word_count = len(" ".join(scaffold_parts).split())
if word_count >= 180:
break
scaffold = "\n\n".join(scaffold_parts).strip()
if not scaffold:
logger.warning(" βœ— Scaffold empty after selection")
return None
# Craft polish prompt - optimized for speed
polish_prompt = f"""Expand this draft to ~280 words with practical fashion advice for: {query}
Draft: {scaffold[:400]}
Enhanced answer:
"""
logger.info(" β†’ Polishing scaffold with PHI model")
try:
out = llm_client(
polish_prompt,
max_new_tokens=400, # Reduced for speed
temperature=0.75,
top_p=0.90,
do_sample=True,
repetition_penalty=1.1,
pad_token_id=llm_client.tokenizer.eos_token_id
)
# Extract and clean the polished text
if isinstance(out, list) and out:
polished = out[0].get('generated_text', '') if isinstance(out[0], dict) else str(out[0])
else:
polished = str(out)
# Remove prompt echo if present
if polish_prompt in polished:
polished = polished[len(polish_prompt):].strip()
else:
polished = polished.strip()
except Exception as e:
logger.error(f" βœ— Polishing error: {e}")
return None
if not polished:
logger.warning(" βœ— Polished output empty")
return None
final_words = polished.split()
fw = len(final_words)
if fw < 200:
logger.warning(f" βœ— Polished output too short ({fw} words)")
return None
if fw > 380:
polished = ' '.join(final_words[:350]) + '...'
logger.info(f" βœ… Polished answer ready ({len(polished.split())} words)")
return polished
def retrieve_knowledge_langchain(
query: str,
vectorstore,
top_k: int = 8
) -> Tuple[List[Document], float]:
logger.info(f"πŸ” Retrieving knowledge for: '{query}'")
# Fast mode: single query only (no variants)
global ENABLE_FAST_MODE
if ENABLE_FAST_MODE:
query_variants = [query]
else:
query_variants = [
query,
f"fashion advice clothing outfit style for {query}",
]
all_docs = []
for variant in query_variants:
try:
docs_and_scores = vectorstore.similarity_search_with_score(variant, k=top_k)
for doc, score in docs_and_scores:
similarity = 1.0 / (1.0 + score)
doc.metadata['similarity'] = similarity
doc.metadata['query_variant'] = variant
all_docs.append(doc)
except Exception as e:
logger.error(f"Retrieval error for variant '{variant}': {e}")
unique_docs = {}
for doc in all_docs:
content_key = doc.page_content[:100]
if content_key not in unique_docs:
unique_docs[content_key] = doc
else:
if doc.metadata.get('similarity', 0) > unique_docs[content_key].metadata.get('similarity', 0):
unique_docs[content_key] = doc
final_docs = list(unique_docs.values())
final_docs.sort(key=lambda x: x.metadata.get('similarity', 0), reverse=True)
if final_docs:
avg_similarity = sum(d.metadata.get('similarity', 0) for d in final_docs) / len(final_docs)
confidence = min(avg_similarity, 1.0)
else:
confidence = 0.0
logger.info(f"βœ… Retrieved {len(final_docs)} unique documents (confidence: {confidence:.2f})")
return final_docs, confidence
def generate_llm_answer(
query: str,
retrieved_docs: List[Document],
llm_client,
attempt: int = 1
) -> Optional[str]:
# Ensure we have a local PHI model loaded
if not llm_client:
logger.error(" β†’ PHI model not initialized")
return None
query_lower = query.lower()
query_words = set(query_lower.split())
scored_docs = []
for doc in retrieved_docs[:20]:
content = doc.page_content.lower()
doc_words = set(content.split())
overlap = len(query_words.intersection(doc_words))
if doc.metadata.get('verified', False):
overlap += 10
if len(doc.page_content) > 200:
overlap += 3
scored_docs.append((doc, overlap))
scored_docs.sort(key=lambda x: x[1], reverse=True)
top_docs = [doc[0] for doc in scored_docs[:8]]
# Ultra-fast context preparation: only use top 4 docs, very short snippets
context_parts = []
for doc in top_docs[:4]: # Reduced from 8 to 4
content = doc.page_content.strip()
if len(content) > 200: # Reduced from 300 to 200
content = content[:200] + "..."
context_parts.append(content)
context_text = "\n".join(context_parts) # Single newline instead of double
# Ultra-fast mode: minimal words, no iterations
global ENABLE_FAST_MODE
if ENABLE_FAST_MODE:
target_min_words = 180 # Much shorter
target_max_words = 280
chunk_target_words = 0 # No continuations
max_iterations = 0 # No iterations
else:
target_min_words = 250
target_max_words = 350
chunk_target_words = 120
max_iterations = 2
def call_model(prompt, max_new_tokens, temperature, top_p, repetition_penalty):
logger.info(f" β†’ PHI model call (temp={temperature}, max_new_tokens={max_new_tokens})")
try:
# Call local PHI model with speed optimizations
out = llm_client(
prompt,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True,
repetition_penalty=repetition_penalty,
num_return_sequences=1,
pad_token_id=llm_client.tokenizer.eos_token_id,
eos_token_id=llm_client.tokenizer.eos_token_id,
num_beams=1, # Greedy/sampling is faster than beam search
early_stopping=True, # Stop as soon as EOS is generated
use_cache=True # Use KV cache for speed
)
# Extract generated text from pipeline output
if isinstance(out, list) and out:
generated = out[0].get('generated_text', '') if isinstance(out[0], dict) else str(out[0])
else:
generated = str(out)
# PHI models return prompt + completion, extract only new text
if prompt in generated:
# Remove the prompt from the output
generated = generated[len(prompt):].strip()
return generated
except Exception as e:
logger.error(f" βœ— PHI model call error: {e}")
return ''
# Ultra-compact prompt for maximum speed
base_prompt = f"""Q: {query}
Context: {context_text[:400]}
A:"""
# Aggressive speed optimization: fewer tokens, higher temperature for faster sampling
if attempt == 1:
temperature = 0.85 # Higher = faster sampling
max_new_tokens = 280 # Reduced significantly
top_p = 0.88
repetition_penalty = 1.08
else:
temperature = 0.90
max_new_tokens = 320
top_p = 0.90
repetition_penalty = 1.10
initial_output = call_model(base_prompt, max_new_tokens, temperature, top_p, repetition_penalty)
response = (initial_output or '').strip()
# Basic sanity checks
if not response:
logger.warning(" βœ— Empty initial response")
response = ''
words = response.split()
word_count = len(words)
# Fast mode: accept shorter answers immediately
if ENABLE_FAST_MODE and word_count >= 150:
if word_count > target_max_words:
response = ' '.join(words[:target_max_words]) + '...'
word_count = target_max_words
logger.info(f" βœ… Fast-mode generated {word_count} words")
return response
# If single-shot succeeded, validate length and return
if word_count >= target_min_words:
if word_count > target_max_words:
response = ' '.join(words[:target_max_words]) + '...'
word_count = target_max_words
logger.info(f" βœ… Single-shot generated {word_count} words")
return response
# Skip iterations in fast mode
if ENABLE_FAST_MODE or max_iterations == 0:
if word_count >= 120: # Accept even shorter in fast mode
logger.info(f" βœ… Fast-mode accepted {word_count} words")
return response
# If too short, return None to trigger fallback
logger.warning(f" βœ— Output too short ({word_count} words), trying fallback")
return None
# Otherwise, try iterative continuation to build up to the target
accumulated = response
prev_word_count = word_count
for i in range(max_iterations):
remaining = max(0, target_min_words - len(accumulated.split()))
if remaining <= 0:
break
# Ask the model to continue without repeating previous content
continue_prompt = f"""Add {min(chunk_target_words, remaining)} more words to complete this answer:
{accumulated[-400:]}
Continue naturally:
"""
# Optimized continuation parameters for speed
cont_output = call_model(continue_prompt, max_new_tokens=250, temperature=0.80, top_p=0.90, repetition_penalty=1.10)
cont_text = (cont_output or '').strip()
if not cont_text:
logger.warning(f" βœ— Continuation {i+1} returned empty β€” stopping")
break
# Avoid trivial repeats: if continuation repeats the accumulated text, stop
if cont_text in accumulated or accumulated.endswith(cont_text[:50]):
logger.warning(f" βœ— Continuation {i+1} appears repetitive β€” stopping")
break
# Append and normalize spacing
accumulated = accumulated.rstrip() + '\n\n' + cont_text
current_word_count = len(accumulated.split())
logger.info(f" β†’ After continuation {i+1}, words={current_word_count}")
# Stop early if we've reached or exceeded the minimum target
if current_word_count >= target_min_words:
break
# Safety: if no progress, break
if current_word_count == prev_word_count:
logger.warning(" βœ— No progress from continuation β€” stopping")
break
prev_word_count = current_word_count
final_words = accumulated.split()
final_count = len(final_words)
if final_count < target_min_words:
logger.warning(f" βœ— Final answer too short ({final_count} words) after continuations")
return None
if final_count > target_max_words:
logger.info(f" ⚠️ Final answer long ({final_count} words). Truncating to {target_max_words} words.")
accumulated = ' '.join(final_words[:target_max_words]) + '...'
final_count = target_max_words
# Final check for apology/hedging at start
apology_phrases = ["i cannot", "i can't", "i'm sorry", "i apologize", "i don't have"]
if any(phrase in accumulated.lower()[:200] for phrase in apology_phrases):
logger.warning(" βœ— Apology/hedging detected in final answer")
return None
logger.info(f" βœ… Built long-form answer ({final_count} words)")
return accumulated
def generate_answer_langchain(
query: str,
vectorstore,
llm_client
) -> str:
logger.info(f"\n{'='*80}")
logger.info(f"Processing query: '{query}'")
logger.info(f"{'='*80}")
retrieved_docs, confidence = retrieve_knowledge_langchain(
query,
vectorstore,
top_k=CONFIG["top_k"]
)
if not retrieved_docs:
return "I couldn't find relevant information to answer your question."
# Fast mode: single attempt only
global ENABLE_FAST_MODE
max_attempts = 1 if ENABLE_FAST_MODE else 2
llm_answer = None
for attempt in range(1, max_attempts + 1):
logger.info(f"\n πŸ€– LLM Generation Attempt {attempt}/{max_attempts}")
llm_answer = generate_llm_answer(query, retrieved_docs, llm_client, attempt)
if llm_answer:
logger.info(f" βœ… LLM answer generated successfully")
break
else:
if attempt < max_attempts:
logger.warning(f" β†’ Attempt {attempt}/{max_attempts} failed, retrying...")
if not llm_answer:
logger.error(f" βœ— All {max_attempts} LLM attempts failed")
# In fast mode, skip scaffold-and-polish and go straight to extractive
if not ENABLE_FAST_MODE:
try:
logger.info(" β†’ Attempting scaffold-and-polish using PHI model")
polished = scaffold_and_polish(query, retrieved_docs, llm_client)
if polished:
logger.info(" βœ… Scaffold-and-polish produced an answer")
return polished
except Exception as e:
logger.error(f" βœ— Scaffold-and-polish error: {e}")
# Final fallback: extractive templated answer (guaranteed deterministic & FAST)
try:
logger.info(" β†’ Using extractive fallback generator")
fallback = generate_extractive_answer(query, retrieved_docs)
if fallback:
logger.info(" βœ… Extractive fallback produced an answer")
return fallback
except Exception as e:
logger.error(f" βœ— Extractive fallback error: {e}")
return "I apologize, but I'm having trouble generating a response. Please try rephrasing your question or ask something else."
return llm_answer
# ============================================================================
# GRADIO INTERFACE
# ============================================================================
def fashion_chatbot(message: str, history: List[List[str]]):
try:
if not message or not message.strip():
yield "Please ask a fashion-related question!"
return
yield "πŸ” Searching fashion knowledge..."
retrieved_docs, confidence = retrieve_knowledge_langchain(
message.strip(),
vectorstore,
top_k=CONFIG["top_k"]
)
if not retrieved_docs:
yield "I couldn't find relevant information to answer your question."
return
yield f"πŸ’­ Generating answer ({len(retrieved_docs)} sources found)..."
llm_answer = None
for attempt in range(1, 3):
logger.info(f"\n πŸ€– LLM Generation Attempt {attempt}/2")
llm_answer = generate_llm_answer(message.strip(), retrieved_docs, llm_client, attempt)
if llm_answer:
break
if not llm_answer:
logger.error(f" βœ— All LLM attempts failed")
yield "I apologize, but I'm having trouble generating a response. Please try rephrasing your question."
return
import time
words = llm_answer.split()
displayed_text = ""
# Faster streaming for better UX
for i, word in enumerate(words):
displayed_text += word + " "
if i % 5 == 0 or i == len(words) - 1:
yield displayed_text.strip()
time.sleep(0.02) # Reduced delay
except Exception as e:
logger.error(f"Error in chatbot: {e}")
yield f"Sorry, I encountered an error: {str(e)}"
# ============================================================================
# INITIALIZE AND LAUNCH
# ============================================================================
llm_client = None
embeddings = None
vectorstore = None
def startup():
global llm_client, embeddings, vectorstore
logger.info("πŸš€ Starting Fashion Advisor RAG...")
embeddings = initialize_embeddings()
vectorstore = load_vector_store(embeddings)
llm_client = initialize_llm()
logger.info("βœ… All components initialized successfully!")
startup()
demo = gr.ChatInterface(
fn=fashion_chatbot,
title="πŸ‘— Fashion Advisor - RAG System",
description="""
**Ask me anything about fashion!** 🌟
I can help with:
- Outfit recommendations for occasions
- Color combinations and styling
- Seasonal fashion advice
- Body type and fit guidance
- Wardrobe essentials
*Powered by RAG with FAISS vector search and local LLM*
""",
examples=[
"What should I wear to a business meeting?",
"What colors go well with navy blue?",
"What are essential wardrobe items for fall?",
"How to dress for a summer wedding?",
"What's the best outfit for a university presentation?",
],
)
if __name__ == "__main__":
demo.launch()