File size: 33,434 Bytes
9af7bbc 4fc4e4b 9af7bbc a32c4ca 9af7bbc 4fc4e4b 9af7bbc 4fc4e4b 9af7bbc bd2bde4 4fc4e4b bd2bde4 4fc4e4b a32c4ca 4fc4e4b bd2bde4 a32c4ca bd2bde4 4fc4e4b bd2bde4 a32c4ca bd2bde4 4fc4e4b bd2bde4 9af7bbc 4fc4e4b 7ba258a 4fc4e4b 7ba258a 4fc4e4b 9af7bbc 15dcfa8 a32c4ca 0a73b6c 15dcfa8 0a73b6c 15dcfa8 0a73b6c 164acc9 0a73b6c a32c4ca 0a73b6c 164acc9 0a73b6c 164acc9 0a73b6c ed0b266 a32c4ca 0a73b6c ed0b266 a32c4ca ed0b266 0a73b6c 164acc9 ed0b266 164acc9 ed0b266 0a73b6c ed0b266 164acc9 0a73b6c ed0b266 0a73b6c 9af7bbc 4fc4e4b 7ba258a 9af7bbc a32c4ca 9af7bbc 45ef96f 9af7bbc a32c4ca d73508e 0e156ba 4fc4e4b 0e156ba 4fc4e4b 0e156ba 9af7bbc 486261d 0e156ba c993f47 486261d a32c4ca 486261d 0e156ba 486261d 0e156ba c993f47 0e156ba c993f47 0e156ba 486261d 0e156ba c993f47 0e156ba 486261d 9af7bbc 0e156ba 486261d 0e156ba 486261d 0e156ba 486261d 0e156ba 486261d 0e156ba 486261d 0e156ba 486261d 0e156ba 486261d 0e156ba 486261d 0e156ba a32c4ca 0e156ba a32c4ca 9af7bbc 24467e0 9af7bbc 5a8f7f6 24467e0 5a8f7f6 c993f47 7ba258a 4fc4e4b c993f47 a32c4ca d73508e 89410ee 9af7bbc 89410ee 3e60f9f 89410ee 24467e0 5a8f7f6 a32c4ca 5a8f7f6 281f39e a32c4ca 3e60f9f 89410ee a32c4ca 9af7bbc 89410ee 9af7bbc 3e60f9f 9af7bbc 252f46a 9af7bbc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 |
"""
Fashion Advisor RAG - Hugging Face Deployment
Complete RAG system with FAISS vector store and local LLM
"""
import gradio as gr
import logging
import os
from pathlib import Path
from typing import List, Tuple, Dict, Optional
import pickle
# Core ML libraries
import torch
from transformers import pipeline
from sentence_transformers import SentenceTransformer
import requests
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ============================================================================
# CONFIGURATION
# ============================================================================
CONFIG = {
"embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
"llm_model": None,
"vector_store_path": ".",
"top_k": 15,
"temperature": 0.75,
"max_tokens": 350,
}
# Remote inference config (optional). If `HF_INFERENCE_API_KEY` is set in the
# environment, the app will prefer calling the Hugging Face Inference API (remote
# hosted model) which can generate longer outputs faster than a CPU-bound local
# model. Set `HF_INFERENCE_MODEL` to choose the remote model (instruction-tuned
# model recommended).
USE_REMOTE_LLM = False
REMOTE_LLM_MODEL = os.environ.get("HF_INFERENCE_MODEL", "tiiuae/falcon-7b-instruct")
# Prefer the environment variable, but also allow a local token file for users
# who don't know how to set env vars. Create a file named `hf_token.txt` in the
# project root containing only the token (no newline is necessary). DO NOT
# commit that file to version control. A .gitignore entry will be added.
HF_INFERENCE_API_KEY = os.environ.get("HF_INFERENCE_API_KEY")
if not HF_INFERENCE_API_KEY:
try:
token_path = Path("hf_token.txt")
if token_path.exists():
HF_INFERENCE_API_KEY = token_path.read_text(encoding="utf-8").strip()
logger.info("Loaded HF token from hf_token.txt (ensure this file is private and not committed)")
except Exception:
logger.warning("Could not read hf_token.txt for HF token")
if HF_INFERENCE_API_KEY:
USE_REMOTE_LLM = True
# ============================================================================
# INITIALIZE MODELS
# ============================================================================
def initialize_llm():
# If a remote HF Inference API key is provided, we won't instantiate a local
# heavy model; instead generation will be performed via the HTTP API.
global USE_REMOTE_LLM, REMOTE_LLM_MODEL
if USE_REMOTE_LLM:
logger.info(f"π Using remote Hugging Face Inference model: {REMOTE_LLM_MODEL}")
CONFIG["llm_model"] = REMOTE_LLM_MODEL
CONFIG["model_type"] = "remote"
return None
logger.info("π Initializing FREE local language model...")
model_name = "google/flan-t5-large"
try:
logger.info(f" Loading {model_name}...")
device = 0 if torch.cuda.is_available() else -1
model_kwargs = {"low_cpu_mem_usage": True}
llm_client = pipeline(
"text2text-generation",
model=model_name,
device=device,
model_kwargs=model_kwargs
)
CONFIG["llm_model"] = model_name
CONFIG["model_type"] = "t5"
logger.info(f"β
LLM initialized: {model_name}")
logger.info(f" Device: {'GPU' if device == 0 else 'CPU'}")
return llm_client
except Exception as e:
logger.error(f"β Failed to load model: {str(e)}")
raise Exception(f"Failed to initialize LLM: {str(e)}")
def remote_generate(prompt: str, max_new_tokens: int = 512, temperature: float = 0.7, top_p: float = 0.9) -> str:
"""Call the Hugging Face Inference API for remote generation. Requires
`HF_INFERENCE_API_KEY` env var to be set and a model name in
`REMOTE_LLM_MODEL`.
"""
if not HF_INFERENCE_API_KEY:
raise Exception("HF_INFERENCE_API_KEY not set for remote generation")
# New router endpoint is required by HF (replaces api-inference.huggingface.co)
router_url = f"https://router.huggingface.co/models/{REMOTE_LLM_MODEL}"
old_url = f"https://api-inference.huggingface.co/models/{REMOTE_LLM_MODEL}"
headers = {"Authorization": f"Bearer {HF_INFERENCE_API_KEY}", "Accept": "application/json"}
payload = {
"inputs": prompt,
"parameters": {
"max_new_tokens": max_new_tokens,
"temperature": temperature,
"top_p": top_p,
"return_full_text": False
}
}
logger.info(f" β Remote inference request to router {REMOTE_LLM_MODEL} (tokens={max_new_tokens}, temp={temperature})")
try:
r = requests.post(router_url, headers=headers, json=payload, timeout=120)
except Exception as e:
logger.error(f" β Remote router request failed: {e}")
# Try older endpoint as a fallback
try:
logger.info(" β Attempting legacy api-inference endpoint as fallback")
r = requests.post(old_url, headers=headers, json=payload, timeout=120)
except Exception as e2:
logger.error(f" β Legacy endpoint request failed: {e2}")
return ""
if r.status_code != 200:
logger.error(f" β Remote inference error {r.status_code}: {r.text[:200]}")
return ""
result = r.json()
if isinstance(result, dict) and result.get("error"):
logger.error(f" β Remote inference returned error: {result.get('error')}")
return ""
# The HF Inference API can return a list of generated outputs or text
if isinstance(result, list) and result:
# entries may be strings or dicts like {"generated_text": "..."}
first = result[0]
if isinstance(first, dict):
return first.get("generated_text", "").strip()
return str(first).strip()
if isinstance(result, dict) and "generated_text" in result:
return result["generated_text"].strip()
return str(result).strip()
def initialize_embeddings():
logger.info("π Initializing embeddings model...")
embeddings = HuggingFaceEmbeddings(
model_name=CONFIG["embedding_model"],
model_kwargs={'device': 'cpu'},
encode_kwargs={'normalize_embeddings': True}
)
logger.info(f"β
Embeddings initialized: {CONFIG['embedding_model']}")
return embeddings
def load_vector_store(embeddings):
logger.info("π Loading FAISS vector store...")
vector_store_path = CONFIG["vector_store_path"]
index_file = os.path.join(vector_store_path, "index.faiss")
pkl_file = os.path.join(vector_store_path, "index.pkl")
if not os.path.exists(index_file):
raise FileNotFoundError(f"FAISS index file not found: {index_file}")
if not os.path.exists(pkl_file):
raise FileNotFoundError(f"FAISS metadata file not found: {pkl_file}")
logger.info(f"β
Found index.faiss ({os.path.getsize(index_file)/1024/1024:.2f} MB)")
logger.info(f"β
Found index.pkl ({os.path.getsize(pkl_file)/1024:.2f} KB)")
try:
vectorstore = FAISS.load_local(
vector_store_path,
embeddings,
allow_dangerous_deserialization=True
)
logger.info(f"β
FAISS vector store loaded successfully")
return vectorstore
except Exception as e:
logger.warning(f"β οΈ Pydantic compatibility issue: {str(e)[:100]}")
logger.info("π Applying Pydantic monkey-patch and retrying...")
try:
import pydantic.v1.main as pydantic_main
original_setstate = pydantic_main.BaseModel.__setstate__
def patched_setstate(self, state):
if '__fields_set__' not in state:
state['__fields_set__'] = set(state.get('__dict__', {}).keys())
return original_setstate(self, state)
pydantic_main.BaseModel.__setstate__ = patched_setstate
logger.info(" β
Pydantic monkey-patch applied")
except Exception as patch_error:
logger.warning(f" β οΈ Pydantic patch failed: {patch_error}")
try:
vectorstore = FAISS.load_local(
vector_store_path,
embeddings,
allow_dangerous_deserialization=True
)
logger.info(f"β
FAISS vector store loaded with Pydantic patch")
return vectorstore
except Exception as e2:
logger.error(f" β Still failed after patch: {str(e2)[:100]}")
logger.info("π Using manual reconstruction (last resort)...")
import faiss
from langchain_community.docstore.in_memory import InMemoryDocstore
index = faiss.read_index(index_file)
logger.info(f" β
FAISS index loaded")
with open(pkl_file, "rb") as f:
import re
raw_bytes = f.read()
logger.info(f" Read {len(raw_bytes)} bytes from pickle")
text_pattern = rb'([A-Za-z0-9\s\.\,\;\:\!\?\-\'\"\(\)]{50,})'
matches = re.findall(text_pattern, raw_bytes)
if len(matches) > 100:
logger.info(f" Found {len(matches)} potential document fragments")
documents = []
for idx, match in enumerate(matches[:5000]):
try:
content = match.decode('utf-8', errors='ignore').strip()
if len(content) >= 100:
doc = Document(
page_content=content,
metadata={"source": "reconstructed", "id": idx}
)
documents.append(doc)
except:
continue
if len(documents) < 100:
raise Exception(f"Only extracted {len(documents)} documents, need at least 100")
logger.info(f" β
Extracted {len(documents)} high-quality documents")
logger.info(f" π Rebuilding FAISS index from scratch...")
vectorstore = FAISS.from_documents(
documents=documents,
embedding=embeddings
)
logger.info(f"β
FAISS vector store rebuilt from {len(documents)} documents")
return vectorstore
else:
raise Exception("Could not extract enough document content from pickle")
# ============================================================================
# RAG PIPELINE FUNCTIONS
# ============================================================================
def generate_extractive_answer(query: str, retrieved_docs: List[Document]) -> Optional[str]:
"""Build a long-form answer from retrieved documents using extractive
selection + templated transitions. This avoids calling the LLM when it
repeatedly fails or returns very short outputs.
"""
logger.info(f"π§ Running extractive fallback for: '{query}'")
# Collect text and split into sentences
import re
all_text = "\n\n".join([d.page_content for d in retrieved_docs])
# Basic sentence split (keeps punctuation)
sentences = re.split(r'(?<=[.!?])\s+', all_text)
sentences = [s.strip() for s in sentences if len(s.strip()) > 30]
if not sentences:
logger.warning(" β No sentences found in retrieved documents for extractive fallback")
return None
# Scoring: keyword overlap with query and fashion terms
query_tokens = set(re.findall(r"\w+", query.lower()))
fashion_keywords = set(["outfit","wear","wardrobe","style","colors","color","layer","layering",
"blazer","trousers","dress","shirt","shoes","boots","sweater","jacket",
"care","wash","dry","clean","wool","cotton","silk","linen","fit","tailor",
"versatile","neutral","accessory","belt","bag","occasion","season","fall"])
keywords = query_tokens.union(fashion_keywords)
scored = []
for s in sentences:
s_tokens = set(re.findall(r"\w+", s.lower()))
score = len(s_tokens & keywords)
# length bonus to prefer richer sentences
score += min(3, len(s.split()) // 20)
scored.append((score, s))
scored.sort(key=lambda x: x[0], reverse=True)
top_sentences = [s for _, s in scored[:60]]
# Build structured sections using top sentences + templates
def pick(n, start=0):
return top_sentences[start:start+n]
intro = []
intro.extend(pick(2, 0))
key_items = pick(8, 2)
styling = pick(8, 10)
care = pick(6, 18)
conclusion = pick(4, 24)
# Add handcrafted, helpful transitions to improve flow
template_intro = f"Here's a detailed answer to '{query}'. I'll cover essential wardrobe items, styling tips, and care advice so you can apply these suggestions practically."
# Ensure care advice includes the user's specific care example if present or add it
care_text = "\n\n".join(care)
if "dry clean" not in care_text.lower() and "hand wash" not in care_text.lower():
care_text += "\n\nDry clean or hand wash in cold water with wool-specific detergent. Never wring out wool - gently squeeze excess water and lay flat to dry on a towel."
parts = []
parts.append(template_intro)
if intro:
parts.append(" ".join(intro))
if key_items:
parts.append("Key wardrobe items to prioritize:")
parts.append(" ".join(key_items))
if styling:
parts.append("Practical styling tips:")
parts.append(" ".join(styling))
if care_text:
parts.append("Care & maintenance:")
parts.append(care_text)
if conclusion:
parts.append("Wrapping up:")
parts.append(" ".join(conclusion))
# Combine and refine spacing
answer = "\n\n".join(parts)
# Post-process: ensure target length (approximately 400-700 words)
words = answer.split()
word_count = len(words)
# If too short, append templated practical paragraphs built from keywords
if word_count < 380:
logger.info(f" β Extractive answer short ({word_count} words). Appending templated paragraphs.")
extra_paragraphs = []
extra_paragraphs.append("A reliable strategy is to build around versatile, neutral pieces: a well-fitted blazer, tailored trousers, a versatile dress, and quality shoes. These items can be mixed and matched for many occasions.")
extra_paragraphs.append("Focus on fit and fabric: ensure key items are well-tailored, prioritize breathable fabrics for comfort, and choose merino or wool blends for colder seasons to layer effectively.")
extra_paragraphs.append("Layering is essential for transitional weather; combine a lightweight sweater under a jacket, and carry a scarf for added warmth and visual interest.")
extra_paragraphs.append("Accessories like belts, a structured bag, and minimal jewelry can elevate basic outfits without extra effort. Neutral colors increase versatility and pair well with bolder accents.")
answer += "\n\n" + "\n\n".join(extra_paragraphs)
words = answer.split()
word_count = len(words)
# If still too long, truncate gracefully
if word_count > 750:
words = words[:700]
answer = " ".join(words) + '...'
word_count = 700
logger.info(f" β
Extractive answer ready ({word_count} words)")
return answer
def scaffold_and_polish(query: str, retrieved_docs: List[Document], llm_client) -> Optional[str]:
"""Create a concise scaffold (approx 150-220 words) from retrieved docs,
then ask the remote (or local) LLM to expand and polish it into a
320-420 word expert answer. Returns None if polishing fails.
"""
logger.info(f"π¨ Building scaffold for polish: '{query}'")
import re
# Reuse sentence extraction logic but stop early for a compact scaffold
all_text = "\n\n".join([d.page_content for d in retrieved_docs[:12]])
sentences = re.split(r'(?<=[.!?])\s+', all_text)
sentences = [s.strip() for s in sentences if len(s.strip()) > 30]
if not sentences:
logger.warning(" β No sentences to build scaffold")
return None
# Score sentences by overlap with query + fashion keywords
query_tokens = set(re.findall(r"\w+", query.lower()))
fashion_keywords = set(["outfit","wear","wardrobe","style","colors","layer","blazer",
"trousers","dress","shoes","sweater","jacket","care","wool","fit",
"tailor","neutral","accessory","season","fall"])
keywords = query_tokens.union(fashion_keywords)
scored = []
for s in sentences:
s_tokens = set(re.findall(r"\w+", s.lower()))
score = len(s_tokens & keywords)
score += min(2, len(s.split()) // 30)
scored.append((score, s))
scored.sort(key=lambda x: x[0], reverse=True)
scaffold_parts = []
word_count = 0
for _, s in scored:
scaffold_parts.append(s)
word_count = len(" ".join(scaffold_parts).split())
if word_count >= 180:
break
scaffold = "\n\n".join(scaffold_parts).strip()
if not scaffold:
logger.warning(" β Scaffold empty after selection")
return None
# Craft polish prompt
polish_prompt = f"""Please rewrite and expand the draft below into a clear, expert, natural-flowing answer of about 320-420 words to the question: {query}
Requirements:
- Keep paragraphs natural and connected.
- Preserve factual content from the draft and avoid inventing unsupported facts.
- Use a friendly, expert tone and provide practical, actionable advice.
Draft:
{scaffold}
Answer:
"""
logger.info(" β Polishing scaffold with LLM")
try:
if USE_REMOTE_LLM:
polished = remote_generate(polish_prompt, max_new_tokens=600, temperature=0.72, top_p=0.92)
else:
out = llm_client(polish_prompt, max_new_tokens=600, temperature=0.72, top_p=0.92, do_sample=True, num_beams=1)
polished = out[0].get('generated_text', '') if isinstance(out, list) and out else str(out)
polished = polished.strip()
except Exception as e:
logger.error(f" β Polishing error: {e}")
return None
if not polished:
logger.warning(" β Polished output empty")
return None
final_words = polished.split()
fw = len(final_words)
if fw < 300:
logger.warning(f" β Polished output too short ({fw} words)")
return None
if fw > 460:
polished = ' '.join(final_words[:420]) + '...'
logger.info(f" β
Polished answer ready ({len(polished.split())} words)")
return polished
def retrieve_knowledge_langchain(
query: str,
vectorstore,
top_k: int = 15
) -> Tuple[List[Document], float]:
logger.info(f"π Retrieving knowledge for: '{query}'")
query_variants = [
query,
f"fashion advice clothing outfit style for {query}",
]
all_docs = []
for variant in query_variants:
try:
docs_and_scores = vectorstore.similarity_search_with_score(variant, k=top_k)
for doc, score in docs_and_scores:
similarity = 1.0 / (1.0 + score)
doc.metadata['similarity'] = similarity
doc.metadata['query_variant'] = variant
all_docs.append(doc)
except Exception as e:
logger.error(f"Retrieval error for variant '{variant}': {e}")
unique_docs = {}
for doc in all_docs:
content_key = doc.page_content[:100]
if content_key not in unique_docs:
unique_docs[content_key] = doc
else:
if doc.metadata.get('similarity', 0) > unique_docs[content_key].metadata.get('similarity', 0):
unique_docs[content_key] = doc
final_docs = list(unique_docs.values())
final_docs.sort(key=lambda x: x.metadata.get('similarity', 0), reverse=True)
if final_docs:
avg_similarity = sum(d.metadata.get('similarity', 0) for d in final_docs) / len(final_docs)
confidence = min(avg_similarity, 1.0)
else:
confidence = 0.0
logger.info(f"β
Retrieved {len(final_docs)} unique documents (confidence: {confidence:.2f})")
return final_docs, confidence
def generate_llm_answer(
query: str,
retrieved_docs: List[Document],
llm_client,
attempt: int = 1
) -> Optional[str]:
# Allow operation when using remote inference (no local llm_client).
if not llm_client and not USE_REMOTE_LLM:
logger.error(" β LLM client not initialized and remote inference disabled")
return None
query_lower = query.lower()
query_words = set(query_lower.split())
scored_docs = []
for doc in retrieved_docs[:20]:
content = doc.page_content.lower()
doc_words = set(content.split())
overlap = len(query_words.intersection(doc_words))
if doc.metadata.get('verified', False):
overlap += 10
if len(doc.page_content) > 200:
overlap += 3
scored_docs.append((doc, overlap))
scored_docs.sort(key=lambda x: x[1], reverse=True)
top_docs = [doc[0] for doc in scored_docs[:8]]
context_parts = []
for doc in top_docs:
content = doc.page_content.strip()
if len(content) > 400:
content = content[:400] + "..."
context_parts.append(content)
context_text = "\n\n".join(context_parts)
# Primary strategy: try a single long-form generation first. If that fails
# (too short or truncated), fall back to an iterative multi-pass generator
# that appends continuation chunks until we reach the target word count.
# Adjusted targets for faster generation and user's request: aim ~350 words
target_min_words = 320
target_max_words = 420
chunk_target_words = 140
max_iterations = 4
def call_model(prompt, max_new_tokens, temperature, top_p, repetition_penalty):
logger.info(f" β Model call (temp={temperature}, max_new_tokens={max_new_tokens})")
try:
if USE_REMOTE_LLM:
# Use remote Hugging Face Inference API
return remote_generate(prompt, max_new_tokens, temperature, top_p)
out = llm_client(
prompt,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True,
num_beams=1,
repetition_penalty=repetition_penalty,
early_stopping=False
)
if isinstance(out, list) and out:
return out[0].get('generated_text', '') if isinstance(out[0], dict) else str(out[0])
return str(out)
except Exception as e:
logger.error(f" β Model call error: {e}")
return ''
# Build initial prompt
base_prompt = f"""Please write a clear, natural-flowing, well-structured fashion answer using the context below.
Question: {query}
Context (use where helpful):
{context_text[:1200]}
Requirements:
- Aim for a long-form answer ~{target_min_words}-{target_max_words} words, structured in paragraphs.
- Use the provided context where relevant and add practical, actionable advice.
- Keep a friendly, expert tone and avoid hedging phrases like "I can't" or "I don't know".
Answer:
"""
# First attempt: single call with a medium-large token budget (may be limited by model)
if attempt == 1:
temperature = 0.70
max_new_tokens = 600
top_p = 0.92
repetition_penalty = 1.1
else:
temperature = 0.82
max_new_tokens = 800
top_p = 0.95
repetition_penalty = 1.15
initial_output = call_model(base_prompt, max_new_tokens, temperature, top_p, repetition_penalty)
response = (initial_output or '').strip()
# Basic sanity checks
if not response:
logger.warning(" β Empty initial response")
response = ''
words = response.split()
word_count = len(words)
# If single-shot succeeded, validate length and return
if word_count >= target_min_words:
if word_count > target_max_words:
response = ' '.join(words[:target_max_words]) + '...'
word_count = target_max_words
logger.info(f" β
Single-shot generated {word_count} words")
return response
# Otherwise, try iterative continuation to build up to the target
accumulated = response
prev_word_count = word_count
for i in range(max_iterations):
remaining = max(0, target_min_words - len(accumulated.split()))
if remaining <= 0:
break
# Ask the model to continue without repeating previous content
continue_prompt = f"""Continue the previous answer in the same tone and style to add about {min(chunk_target_words, remaining)} words.
Do not repeat sentences already present. Keep paragraphs natural and connected.
Previous answer:
{accumulated}
Continue:
"""
# Slightly higher temperature on continuations to encourage richer text
cont_output = call_model(continue_prompt, max_new_tokens=450, temperature=0.78, top_p=0.93, repetition_penalty=1.08)
cont_text = (cont_output or '').strip()
if not cont_text:
logger.warning(f" β Continuation {i+1} returned empty β stopping")
break
# Avoid trivial repeats: if continuation repeats the accumulated text, stop
if cont_text in accumulated or accumulated.endswith(cont_text[:50]):
logger.warning(f" β Continuation {i+1} appears repetitive β stopping")
break
# Append and normalize spacing
accumulated = accumulated.rstrip() + '\n\n' + cont_text
current_word_count = len(accumulated.split())
logger.info(f" β After continuation {i+1}, words={current_word_count}")
# Stop early if we've reached or exceeded the minimum target
if current_word_count >= target_min_words:
break
# Safety: if no progress, break
if current_word_count == prev_word_count:
logger.warning(" β No progress from continuation β stopping")
break
prev_word_count = current_word_count
final_words = accumulated.split()
final_count = len(final_words)
if final_count < target_min_words:
logger.warning(f" β Final answer too short ({final_count} words) after continuations")
return None
if final_count > target_max_words:
logger.info(f" β οΈ Final answer long ({final_count} words). Truncating to {target_max_words} words.")
accumulated = ' '.join(final_words[:target_max_words]) + '...'
final_count = target_max_words
# Final check for apology/hedging at start
apology_phrases = ["i cannot", "i can't", "i'm sorry", "i apologize", "i don't have"]
if any(phrase in accumulated.lower()[:200] for phrase in apology_phrases):
logger.warning(" β Apology/hedging detected in final answer")
return None
logger.info(f" β
Built long-form answer ({final_count} words)")
return accumulated
def generate_answer_langchain(
query: str,
vectorstore,
llm_client
) -> str:
logger.info(f"\n{'='*80}")
logger.info(f"Processing query: '{query}'")
logger.info(f"{'='*80}")
retrieved_docs, confidence = retrieve_knowledge_langchain(
query,
vectorstore,
top_k=CONFIG["top_k"]
)
if not retrieved_docs:
return "I couldn't find relevant information to answer your question."
llm_answer = None
for attempt in range(1, 3):
logger.info(f"\n π€ LLM Generation Attempt {attempt}/2")
llm_answer = generate_llm_answer(query, retrieved_docs, llm_client, attempt)
if llm_answer:
logger.info(f" β
LLM answer generated successfully")
break
else:
logger.warning(f" β Attempt {attempt}/2 failed, retrying...")
if not llm_answer:
logger.error(f" β All 2 LLM attempts failed")
# Next attempt: if remote LLM is available, build a short scaffold from
# retrieved documents and ask the remote model to polish/expand it. This
# is more reliable than single-shot long generation on some models.
if USE_REMOTE_LLM:
try:
logger.info(" β Attempting scaffold-and-polish using remote LLM")
polished = scaffold_and_polish(query, retrieved_docs, llm_client)
if polished:
logger.info(" β
Scaffold-and-polish produced an answer")
return polished
except Exception as e:
logger.error(f" β Scaffold-and-polish error: {e}")
# Final fallback: extractive templated answer (guaranteed deterministic)
try:
logger.info(" β Using extractive fallback generator")
fallback = generate_extractive_answer(query, retrieved_docs)
if fallback:
logger.info(" β
Extractive fallback produced an answer")
return fallback
except Exception as e:
logger.error(f" β Extractive fallback error: {e}")
return "I apologize, but I'm having trouble generating a response. Please try rephrasing your question or ask something else."
return llm_answer
# ============================================================================
# GRADIO INTERFACE
# ============================================================================
def fashion_chatbot(message: str, history: List[List[str]]):
try:
if not message or not message.strip():
yield "Please ask a fashion-related question!"
return
yield "π Searching fashion knowledge..."
retrieved_docs, confidence = retrieve_knowledge_langchain(
message.strip(),
vectorstore,
top_k=CONFIG["top_k"]
)
if not retrieved_docs:
yield "I couldn't find relevant information to answer your question."
return
yield f"π Generating answer ({len(retrieved_docs)} sources found)..."
llm_answer = None
for attempt in range(1, 3):
logger.info(f"\n π€ LLM Generation Attempt {attempt}/2")
llm_answer = generate_llm_answer(message.strip(), retrieved_docs, llm_client, attempt)
if llm_answer:
break
if not llm_answer:
logger.error(f" β All LLM attempts failed")
yield "I apologize, but I'm having trouble generating a response. Please try rephrasing your question."
return
import time
words = llm_answer.split()
displayed_text = ""
for i, word in enumerate(words):
displayed_text += word + " "
if i % 3 == 0 or i == len(words) - 1:
yield displayed_text.strip()
time.sleep(0.05)
except Exception as e:
logger.error(f"Error in chatbot: {e}")
yield f"Sorry, I encountered an error: {str(e)}"
# ============================================================================
# INITIALIZE AND LAUNCH
# ============================================================================
llm_client = None
embeddings = None
vectorstore = None
def startup():
global llm_client, embeddings, vectorstore
logger.info("π Starting Fashion Advisor RAG...")
embeddings = initialize_embeddings()
vectorstore = load_vector_store(embeddings)
llm_client = initialize_llm()
logger.info("β
All components initialized successfully!")
startup()
demo = gr.ChatInterface(
fn=fashion_chatbot,
title="π Fashion Advisor - RAG System",
description="""
**Ask me anything about fashion!** π
I can help with:
- Outfit recommendations for occasions
- Color combinations and styling
- Seasonal fashion advice
- Body type and fit guidance
- Wardrobe essentials
*Powered by RAG with FAISS vector search and local LLM*
""",
examples=[
"What should I wear to a business meeting?",
"What colors go well with navy blue?",
"What are essential wardrobe items for fall?",
"How to dress for a summer wedding?",
"What's the best outfit for a university presentation?",
],
)
if __name__ == "__main__":
demo.launch()
|