rag-workshop / queries /workshop_queries.json
patjs's picture
Update workshop dataset
95c135a verified
{
"schema_version": "1.0",
"description": "Workshop driving queries β€” used by notebooks 1-8 for retrieval evaluation, ground-truth comparison, and CRAG gap demonstration.",
"ground_truth_status": "Initial estimates. expected_relevant_doc_ids for queries B/C/E will be refined after Phase 2 (L1 build) once article content is known. D/G/H depend on L3 persona/conversation files written in Phase 5.",
"queries": [
{
"id": "A",
"text": "What's there to do in Iceland?",
"used_in_notebooks": [1, 2, 3, 4, 7],
"expected_relevant_doc_ids": [
"iceland",
"reykjavik",
"vatnajokull",
"iceland-westfjords"
],
"expected_winning_pipeline": "basic",
"narrative_role": "Easy baseline β€” naive RAG already works on a single, clearly-named destination. Establishes that the system is alive.",
"data_dependencies": ["L1.articles", "L1.chunks"]
},
{
"id": "B",
"text": "Affordable destinations in Southeast Asia for snorkeling and vegetarian food in March",
"used_in_notebooks": [1, 2, 3, 4, 5, 7],
"expected_relevant_doc_ids": [
"thailand",
"vietnam",
"malaysia",
"philippines",
"indonesia",
"cambodia",
"bali",
"ubud",
"phuket",
"koh-phi-phi",
"koh-samui",
"palawan",
"boracay",
"langkawi",
"gili-islands",
"khao-sok"
],
"expected_winning_pipeline": "graphrag",
"narrative_role": "Hard multi-aspect β€” combines geography (Southeast Asia), price (affordable), activity (snorkeling), cuisine (vegetarian), and season (March). Naive RAG returns shallow results; each notebook recovers more relevant docs. Main motivator for the entire arc.",
"data_dependencies": [
"L1.articles",
"L1.chunks",
"L1.metadata",
"L2.pricing",
"L2.seasonal",
"RAG.Entities",
"RAG.EntityRelationships"
]
},
{
"id": "C",
"text": "Where can I go for great hiking and excellent local food?",
"used_in_notebooks": [6],
"expected_relevant_doc_ids": [
"peru",
"sacred-valley",
"japan",
"japanese-alps",
"nepal",
"sagarmatha",
"tuscany",
"andalusia",
"dolomites",
"switzerland",
"patagonia",
"scottish-highlands",
"morocco",
"vietnam",
"bhutan"
],
"expected_winning_pipeline": "crag",
"narrative_role": "Mid-confidence ambiguous β€” CRAG's evaluator detects partial coverage and triggers enhanced retrieval (query rewriting + extra rounds) before answering. Contrasts with F (where CRAG refuses) and B (where graphrag wins outright).",
"data_dependencies": ["L1.articles", "L1.chunks", "L2.tips"]
},
{
"id": "D",
"text": "What did I love most about my Bali trip?",
"used_in_notebooks": [8],
"expected_relevant_doc_ids": ["bali", "ubud"],
"expected_relevant_persona_turns": "TODO β€” fill once L3 conversation seeds are written for persona 'me' in Phase 5",
"expected_winning_pipeline": "basic",
"narrative_role": "Personal-corpus retrieval β€” answer comes primarily from the attendee's prior conversation history, with destination chunks as supporting context.",
"data_dependencies": ["L1.articles", "L3.persona", "L3.conversations"]
},
{
"id": "E",
"text": "Find diving spots similar to the Great Barrier Reef",
"used_in_notebooks": [5, 7],
"expected_relevant_doc_ids": [
"great-barrier-reef",
"maldives",
"palawan",
"koh-phi-phi",
"okinawa",
"fiji",
"bora-bora",
"tahiti",
"galapagos",
"fernando-de-noronha",
"andaman-islands",
"komodo"
],
"expected_winning_pipeline": "graphrag",
"narrative_role": "Similar-to via graph traversal β€” uses entity relationships (same_type, near) that vector search alone misses. Vector retrieval may surface scuba/marine text but lacks the explicit 'reef destination' link.",
"data_dependencies": [
"L1.articles",
"L1.chunks",
"RAG.Entities",
"RAG.EntityRelationships"
]
},
{
"id": "F",
"text": "What does it cost to travel in New Zealand in October?",
"used_in_notebooks": [6, 7],
"expected_relevant_doc_ids": [],
"expected_winning_pipeline": "crag",
"narrative_role": "Deliberate corpus gap β€” NZ excluded from L1, L2, and personas. basic retrieves nearest-neighbor noise (Australia, Tasmania, Tahiti) and the LLM fabricates a confident-sounding answer. CRAG's evaluator classifies disoriented and refuses. Side-by-side comparison is the hallucination-defense punchline.",
"data_dependencies": ["L1.articles", "L1.chunks"],
"is_corpus_gap": true
},
{
"id": "G",
"text": "Find a place that works for me and Sarah",
"used_in_notebooks": [8],
"expected_relevant_doc_ids": "TODO β€” depends on the intersection of 'me' persona and 'Sarah' persona compatible_with fields; populate once Phase 5 personas are written",
"expected_winning_pipeline": "basic_rerank",
"narrative_role": "Multi-persona intersection β€” merges 'me' and 'Sarah' preference profiles, reranks destinations against both compatible_with constraints. Demonstrates persona-as-filter.",
"data_dependencies": ["L1.articles", "L3.persona.me", "L3.persona.sarah"]
},
{
"id": "H",
"text": "Remind me what I told you about my dietary restrictions",
"used_in_notebooks": [8],
"expected_relevant_doc_ids": [],
"expected_relevant_persona_turns": "TODO β€” fill once L3 conversation seeds include dietary references for persona 'me' in Phase 5",
"expected_winning_pipeline": "basic",
"narrative_role": "Memory-only retrieval β€” answer comes purely from prior conversation turns. No L1 docs are relevant. Demonstrates that RAG over personal text is the same primitive as RAG over a document corpus.",
"data_dependencies": ["L3.persona", "L3.conversations"]
}
]
}