File size: 6,239 Bytes
95c135a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 | {
"schema_version": "1.0",
"description": "Workshop driving queries β used by notebooks 1-8 for retrieval evaluation, ground-truth comparison, and CRAG gap demonstration.",
"ground_truth_status": "Initial estimates. expected_relevant_doc_ids for queries B/C/E will be refined after Phase 2 (L1 build) once article content is known. D/G/H depend on L3 persona/conversation files written in Phase 5.",
"queries": [
{
"id": "A",
"text": "What's there to do in Iceland?",
"used_in_notebooks": [1, 2, 3, 4, 7],
"expected_relevant_doc_ids": [
"iceland",
"reykjavik",
"vatnajokull",
"iceland-westfjords"
],
"expected_winning_pipeline": "basic",
"narrative_role": "Easy baseline β naive RAG already works on a single, clearly-named destination. Establishes that the system is alive.",
"data_dependencies": ["L1.articles", "L1.chunks"]
},
{
"id": "B",
"text": "Affordable destinations in Southeast Asia for snorkeling and vegetarian food in March",
"used_in_notebooks": [1, 2, 3, 4, 5, 7],
"expected_relevant_doc_ids": [
"thailand",
"vietnam",
"malaysia",
"philippines",
"indonesia",
"cambodia",
"bali",
"ubud",
"phuket",
"koh-phi-phi",
"koh-samui",
"palawan",
"boracay",
"langkawi",
"gili-islands",
"khao-sok"
],
"expected_winning_pipeline": "graphrag",
"narrative_role": "Hard multi-aspect β combines geography (Southeast Asia), price (affordable), activity (snorkeling), cuisine (vegetarian), and season (March). Naive RAG returns shallow results; each notebook recovers more relevant docs. Main motivator for the entire arc.",
"data_dependencies": [
"L1.articles",
"L1.chunks",
"L1.metadata",
"L2.pricing",
"L2.seasonal",
"RAG.Entities",
"RAG.EntityRelationships"
]
},
{
"id": "C",
"text": "Where can I go for great hiking and excellent local food?",
"used_in_notebooks": [6],
"expected_relevant_doc_ids": [
"peru",
"sacred-valley",
"japan",
"japanese-alps",
"nepal",
"sagarmatha",
"tuscany",
"andalusia",
"dolomites",
"switzerland",
"patagonia",
"scottish-highlands",
"morocco",
"vietnam",
"bhutan"
],
"expected_winning_pipeline": "crag",
"narrative_role": "Mid-confidence ambiguous β CRAG's evaluator detects partial coverage and triggers enhanced retrieval (query rewriting + extra rounds) before answering. Contrasts with F (where CRAG refuses) and B (where graphrag wins outright).",
"data_dependencies": ["L1.articles", "L1.chunks", "L2.tips"]
},
{
"id": "D",
"text": "What did I love most about my Bali trip?",
"used_in_notebooks": [8],
"expected_relevant_doc_ids": ["bali", "ubud"],
"expected_relevant_persona_turns": "TODO β fill once L3 conversation seeds are written for persona 'me' in Phase 5",
"expected_winning_pipeline": "basic",
"narrative_role": "Personal-corpus retrieval β answer comes primarily from the attendee's prior conversation history, with destination chunks as supporting context.",
"data_dependencies": ["L1.articles", "L3.persona", "L3.conversations"]
},
{
"id": "E",
"text": "Find diving spots similar to the Great Barrier Reef",
"used_in_notebooks": [5, 7],
"expected_relevant_doc_ids": [
"great-barrier-reef",
"maldives",
"palawan",
"koh-phi-phi",
"okinawa",
"fiji",
"bora-bora",
"tahiti",
"galapagos",
"fernando-de-noronha",
"andaman-islands",
"komodo"
],
"expected_winning_pipeline": "graphrag",
"narrative_role": "Similar-to via graph traversal β uses entity relationships (same_type, near) that vector search alone misses. Vector retrieval may surface scuba/marine text but lacks the explicit 'reef destination' link.",
"data_dependencies": [
"L1.articles",
"L1.chunks",
"RAG.Entities",
"RAG.EntityRelationships"
]
},
{
"id": "F",
"text": "What does it cost to travel in New Zealand in October?",
"used_in_notebooks": [6, 7],
"expected_relevant_doc_ids": [],
"expected_winning_pipeline": "crag",
"narrative_role": "Deliberate corpus gap β NZ excluded from L1, L2, and personas. basic retrieves nearest-neighbor noise (Australia, Tasmania, Tahiti) and the LLM fabricates a confident-sounding answer. CRAG's evaluator classifies disoriented and refuses. Side-by-side comparison is the hallucination-defense punchline.",
"data_dependencies": ["L1.articles", "L1.chunks"],
"is_corpus_gap": true
},
{
"id": "G",
"text": "Find a place that works for me and Sarah",
"used_in_notebooks": [8],
"expected_relevant_doc_ids": "TODO β depends on the intersection of 'me' persona and 'Sarah' persona compatible_with fields; populate once Phase 5 personas are written",
"expected_winning_pipeline": "basic_rerank",
"narrative_role": "Multi-persona intersection β merges 'me' and 'Sarah' preference profiles, reranks destinations against both compatible_with constraints. Demonstrates persona-as-filter.",
"data_dependencies": ["L1.articles", "L3.persona.me", "L3.persona.sarah"]
},
{
"id": "H",
"text": "Remind me what I told you about my dietary restrictions",
"used_in_notebooks": [8],
"expected_relevant_doc_ids": [],
"expected_relevant_persona_turns": "TODO β fill once L3 conversation seeds include dietary references for persona 'me' in Phase 5",
"expected_winning_pipeline": "basic",
"narrative_role": "Memory-only retrieval β answer comes purely from prior conversation turns. No L1 docs are relevant. Demonstrates that RAG over personal text is the same primitive as RAG over a document corpus.",
"data_dependencies": ["L3.persona", "L3.conversations"]
}
]
}
|