Spaces:
Sleeping
Sleeping
File size: 9,444 Bytes
46d90f9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 | """
Prompting Technique Selection Agent
Selects the most appropriate prompting technique based on task analysis.
"""
from typing import Dict, List, Any, Tuple
import json
class TechniqueSelectionAgent:
def __init__(self):
# Define prompting techniques and their use cases
self.techniques = {
"zero_shot": {
"name": "Zero-shot Prompting",
"description": "Direct instruction without examples",
"use_cases": ["simple_tasks", "well_defined_instructions", "classification"],
"complexity": ["simple", "moderate"],
"task_types": ["text_generation", "classification", "simple_qa"]
},
"few_shot": {
"name": "Few-shot Prompting",
"description": "Provides examples to guide the model",
"use_cases": ["pattern_learning", "format_specification", "style_mimicking"],
"complexity": ["moderate", "complex"],
"task_types": ["text_generation", "classification", "creative_writing"]
},
"chain_of_thought": {
"name": "Chain-of-Thought (CoT) Prompting",
"description": "Breaks down reasoning into steps",
"use_cases": ["reasoning", "math_problems", "logical_analysis"],
"complexity": ["moderate", "complex"],
"task_types": ["reasoning", "question_answering", "problem_solving"]
},
"react": {
"name": "ReAct Prompting",
"description": "Combines reasoning and acting with external tools",
"use_cases": ["tool_use", "information_retrieval", "multi_step_tasks"],
"complexity": ["complex"],
"task_types": ["question_answering", "research", "data_analysis"]
},
"tree_of_thoughts": {
"name": "Tree of Thoughts (ToT)",
"description": "Explores multiple reasoning paths",
"use_cases": ["complex_problem_solving", "strategic_planning", "exploration"],
"complexity": ["complex"],
"task_types": ["reasoning", "planning", "creative_problem_solving"]
},
"self_consistency": {
"name": "Self-Consistency",
"description": "Generates multiple solutions and selects the most consistent",
"use_cases": ["accuracy_improvement", "uncertainty_reduction"],
"complexity": ["moderate", "complex"],
"task_types": ["reasoning", "calculation", "analysis"]
},
"generated_knowledge": {
"name": "Generated Knowledge Prompting",
"description": "Generates relevant knowledge before answering",
"use_cases": ["knowledge_intensive_tasks", "fact_checking"],
"complexity": ["moderate", "complex"],
"task_types": ["question_answering", "research", "analysis"]
},
"prompt_chaining": {
"name": "Prompt Chaining",
"description": "Breaks complex tasks into subtasks",
"use_cases": ["complex_workflows", "multi_step_processes"],
"complexity": ["complex"],
"task_types": ["document_analysis", "multi_step_reasoning", "workflow_automation"]
},
"meta_prompting": {
"name": "Meta Prompting",
"description": "Focuses on structural and syntactical aspects",
"use_cases": ["abstract_reasoning", "pattern_recognition"],
"complexity": ["moderate", "complex"],
"task_types": ["reasoning", "code_generation", "mathematical_problems"]
},
"pal": {
"name": "Program-Aided Language Models (PAL)",
"description": "Generates code to solve problems",
"use_cases": ["calculations", "data_processing", "algorithmic_tasks"],
"complexity": ["moderate", "complex"],
"task_types": ["code_generation", "calculation", "data_analysis"]
}
}
def select_technique(self, analysis_result: Dict[str, Any]) -> Tuple[str, Dict[str, Any]]:
"""
Select the most appropriate prompting technique based on analysis.
Args:
analysis_result: Output from InputAnalysisAgent
Returns:
Tuple of (technique_key, technique_info_with_reasoning)
"""
task_type = analysis_result.get('task_type', 'text_generation')
complexity = analysis_result.get('complexity', 'simple')
domain = analysis_result.get('domain', 'general')
entities = analysis_result.get('entities', [])
intent = analysis_result.get('intent', '')
# Score each technique based on compatibility
technique_scores = {}
for tech_key, tech_info in self.techniques.items():
score = 0
# Task type compatibility
if task_type in tech_info['task_types']:
score += 3
elif any(tt in task_type for tt in tech_info['task_types']):
score += 1
# Complexity compatibility
if complexity in tech_info['complexity']:
score += 2
# Special case scoring based on content analysis
score += self._get_content_based_score(tech_key, analysis_result)
technique_scores[tech_key] = score
# Select the technique with the highest score
best_technique = max(technique_scores, key=technique_scores.get)
# Prepare the result with reasoning
selected_technique = self.techniques[best_technique].copy()
selected_technique['reasoning'] = self._generate_reasoning(
best_technique, analysis_result, technique_scores
)
selected_technique['confidence'] = min(technique_scores[best_technique] / 5.0, 1.0)
return best_technique, selected_technique
def _get_content_based_score(self, technique_key: str, analysis_result: Dict[str, Any]) -> int:
"""Calculate additional score based on content analysis."""
score = 0
prompt = analysis_result.get('original_prompt', '').lower()
# Keyword-based scoring
if technique_key == "chain_of_thought":
if any(word in prompt for word in ['step', 'reason', 'explain', 'how', 'why', 'solve']):
score += 2
elif technique_key == "few_shot":
if any(word in prompt for word in ['example', 'like', 'similar', 'format']):
score += 2
elif technique_key == "react":
if any(word in prompt for word in ['search', 'find', 'lookup', 'research', 'tool']):
score += 2
elif technique_key == "pal":
if any(word in prompt for word in ['calculate', 'compute', 'math', 'algorithm', 'code']):
score += 2
elif technique_key == "tree_of_thoughts":
if any(word in prompt for word in ['explore', 'consider', 'alternative', 'multiple']):
score += 2
elif technique_key == "generated_knowledge":
if any(word in prompt for word in ['fact', 'knowledge', 'information', 'research']):
score += 2
return score
def _generate_reasoning(self, technique_key: str, analysis_result: Dict[str, Any],
scores: Dict[str, int]) -> str:
"""Generate human-readable reasoning for the technique selection."""
task_type = analysis_result.get('task_type', 'unknown')
complexity = analysis_result.get('complexity', 'unknown')
reasoning_parts = []
# Main selection reason
technique_name = self.techniques[technique_key]['name']
reasoning_parts.append(f"Selected {technique_name} because:")
# Task type reasoning
if task_type in self.techniques[technique_key]['task_types']:
reasoning_parts.append(f"- It's well-suited for {task_type} tasks")
# Complexity reasoning
if complexity in self.techniques[technique_key]['complexity']:
reasoning_parts.append(f"- It handles {complexity} complexity effectively")
# Content-based reasoning
content_score = self._get_content_based_score(technique_key, analysis_result)
if content_score > 0:
reasoning_parts.append("- The prompt content suggests this approach would be beneficial")
# Confidence reasoning
max_score = max(scores.values())
if scores[technique_key] == max_score:
reasoning_parts.append(f"- It scored highest ({max_score}) among all techniques")
return " ".join(reasoning_parts)
def get_technique_info(self, technique_key: str) -> Dict[str, Any]:
"""Get detailed information about a specific technique."""
return self.techniques.get(technique_key, {})
def list_all_techniques(self) -> Dict[str, Dict[str, Any]]:
"""Return all available techniques."""
return self.techniques
|