wu981526092 commited on
Commit
09d46fc
Β·
1 Parent(s): f231a40
agentgraph/reconstruction/prompt_reconstructor.py CHANGED
@@ -470,6 +470,140 @@ class PromptReconstructor:
470
 
471
  result["reconstructed_prompt"] = self._remove_line_numbers(complete_prompt)
472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
  elif relation_type == "REQUIRES_TOOL" or relation_type == "NEXT":
474
  # These relations don't typically correspond to actual prompts in the execution
475
  # They are metadata that help establish dependencies and flow
 
470
 
471
  result["reconstructed_prompt"] = self._remove_line_numbers(complete_prompt)
472
 
473
+ elif relation_type == "CONSUMED_BY":
474
+ # Input/Request consumed by Agent - This represents input routing/dispatch
475
+ if source["type"] == "Input" and target["type"] == "Agent":
476
+ # This represents the system routing user input to the appropriate specialist agent
477
+ input_content = source.get("raw_prompt", source["name"])
478
+ agent_name = target["name"]
479
+ agent_description = target.get("description", "")
480
+ interaction = relation.get("interaction_prompt", "")
481
+
482
+ # This would be the routing/dispatch message from the system orchestrator
483
+ complete_prompt = f"SYSTEM ROUTING: Input Dispatch\n\n"
484
+ complete_prompt += f"User Input: {input_content}\n\n"
485
+ complete_prompt += f"🎯 ROUTING DECISION:\n"
486
+ complete_prompt += f"Selected Agent: {agent_name}\n"
487
+
488
+ if agent_description:
489
+ complete_prompt += f"Agent Expertise: {agent_description}\n"
490
+
491
+ complete_prompt += f"\nπŸ“‹ ROUTING RATIONALE:\n"
492
+ if interaction:
493
+ complete_prompt += f"{interaction}\n"
494
+ else:
495
+ complete_prompt += f"Input '{source['name']}' has been routed to {agent_name} based on the agent's specialized capabilities.\n"
496
+
497
+ complete_prompt += f"\nπŸ”„ NEXT STEP:\n"
498
+ complete_prompt += f"The system will now pass this input to {agent_name} for processing.\n"
499
+
500
+ result["reconstructed_prompt"] = complete_prompt
501
+
502
+ elif relation_type == "PRODUCES":
503
+ # Task produces Output - This represents the result generation
504
+ if source["type"] == "Task" and target["type"] == "Output":
505
+ task_name = source["name"]
506
+ output_name = target["name"]
507
+ output_content = target.get("raw_prompt", target["name"])
508
+ interaction = relation.get("interaction_prompt", "")
509
+
510
+ # This represents the task completion and output generation
511
+ complete_prompt = f"TASK COMPLETION: Output Generation\n\n"
512
+ complete_prompt += f"Completed Task: {task_name}\n"
513
+ complete_prompt += f"Generated Output: {output_name}\n\n"
514
+
515
+ complete_prompt += f"πŸ“€ OUTPUT DETAILS:\n"
516
+ complete_prompt += f"{output_content}\n\n"
517
+
518
+ if interaction:
519
+ complete_prompt += f"πŸ“‹ GENERATION NOTES:\n{interaction}\n\n"
520
+
521
+ complete_prompt += f"βœ… STATUS: Task successfully completed and output ready for delivery.\n"
522
+
523
+ result["reconstructed_prompt"] = complete_prompt
524
+
525
+ elif relation_type == "DELIVERS_TO":
526
+ # Output delivers to Human - This represents final result delivery
527
+ if source["type"] == "Output" and target["type"] == "Human":
528
+ output_name = source["name"]
529
+ output_content = source.get("raw_prompt", source["name"])
530
+ human_name = target["name"]
531
+ interaction = relation.get("interaction_prompt", "")
532
+
533
+ # This represents the final delivery to the end user
534
+ complete_prompt = f"FINAL DELIVERY: Output to User\n\n"
535
+ complete_prompt += f"πŸ“¬ DELIVERING TO: {human_name}\n"
536
+ complete_prompt += f"πŸ“¦ OUTPUT: {output_name}\n\n"
537
+
538
+ complete_prompt += f"πŸ“„ CONTENT:\n{output_content}\n\n"
539
+
540
+ if interaction:
541
+ complete_prompt += f"πŸ“‹ DELIVERY NOTES:\n{interaction}\n\n"
542
+
543
+ complete_prompt += f"βœ… DELIVERY STATUS: Output successfully delivered to user.\n"
544
+
545
+ result["reconstructed_prompt"] = complete_prompt
546
+
547
+ elif relation_type == "REQUIRED_BY":
548
+ # Tool required by Task - This represents tool dependency
549
+ if source["type"] == "Tool" and target["type"] == "Task":
550
+ tool_name = source["name"]
551
+ task_name = target["name"]
552
+ tool_desc = source.get("raw_prompt", "")
553
+ interaction = relation.get("interaction_prompt", "")
554
+
555
+ # This represents a tool dependency check or preparation
556
+ complete_prompt = f"DEPENDENCY CHECK: Tool Requirement\n\n"
557
+ complete_prompt += f"πŸ”§ REQUIRED TOOL: {tool_name}\n"
558
+ complete_prompt += f"πŸ“‹ FOR TASK: {task_name}\n\n"
559
+
560
+ if tool_desc:
561
+ complete_prompt += f"πŸ› οΈ TOOL DESCRIPTION:\n{tool_desc}\n\n"
562
+
563
+ if interaction:
564
+ complete_prompt += f"πŸ“‹ REQUIREMENT DETAILS:\n{interaction}\n\n"
565
+
566
+ complete_prompt += f"βœ… STATUS: Tool dependency verified and available for task execution.\n"
567
+
568
+ result["reconstructed_prompt"] = complete_prompt
569
+
570
+ elif relation_type == "SUBTASK_OF":
571
+ # Task is subtask of another Task - This represents task hierarchy
572
+ if source["type"] == "Task" and target["type"] == "Task":
573
+ subtask_name = source["name"]
574
+ parent_task_name = target["name"]
575
+ interaction = relation.get("interaction_prompt", "")
576
+
577
+ # This represents task decomposition or hierarchy
578
+ complete_prompt = f"TASK HIERARCHY: Subtask Relationship\n\n"
579
+ complete_prompt += f"🎯 PARENT TASK: {parent_task_name}\n"
580
+ complete_prompt += f"πŸ“‹ SUBTASK: {subtask_name}\n\n"
581
+
582
+ if interaction:
583
+ complete_prompt += f"πŸ“‹ HIERARCHY DETAILS:\n{interaction}\n\n"
584
+
585
+ complete_prompt += f"πŸ”„ WORKFLOW: Subtask '{subtask_name}' is part of larger task '{parent_task_name}'.\n"
586
+
587
+ result["reconstructed_prompt"] = complete_prompt
588
+
589
+ elif relation_type == "INTERVENES":
590
+ # Agent intervenes in process - This represents intervention/oversight
591
+ agent_name = source["name"] if source["type"] == "Agent" else target["name"]
592
+ process_name = target["name"] if source["type"] == "Agent" else source["name"]
593
+ interaction = relation.get("interaction_prompt", "")
594
+
595
+ # This represents agent intervention or oversight
596
+ complete_prompt = f"PROCESS INTERVENTION: Agent Oversight\n\n"
597
+ complete_prompt += f"πŸ‘€ INTERVENING AGENT: {agent_name}\n"
598
+ complete_prompt += f"βš™οΈ TARGET PROCESS: {process_name}\n\n"
599
+
600
+ if interaction:
601
+ complete_prompt += f"πŸ“‹ INTERVENTION DETAILS:\n{interaction}\n\n"
602
+
603
+ complete_prompt += f"🚨 ACTION: Agent '{agent_name}' is intervening in '{process_name}' for quality control or course correction.\n"
604
+
605
+ result["reconstructed_prompt"] = complete_prompt
606
+
607
  elif relation_type == "REQUIRES_TOOL" or relation_type == "NEXT":
608
  # These relations don't typically correspond to actual prompts in the execution
609
  # They are metadata that help establish dependencies and flow
debug_relations.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Debug script to check why some relations aren't being detected as using specialized logic.
4
+ """
5
+
6
+ import json
7
+ import sys
8
+ sys.path.append('/Users/zekunwu/Desktop/agent_monitoring/huggingface/AgentGraph')
9
+
10
+ from agentgraph.reconstruction import PromptReconstructor
11
+
12
+ def debug_relation_detection():
13
+ """Debug relation type detection."""
14
+ print("πŸ” Debugging Relation Type Detection")
15
+ print("=" * 60)
16
+
17
+ # Load sample data
18
+ kg_path = '/Users/zekunwu/Desktop/agent_monitoring/huggingface/AgentGraph/backend/database/samples/knowledge_graphs/kg_algorithm_sample_16.json'
19
+
20
+ with open(kg_path, 'r') as f:
21
+ kg_data = json.load(f)
22
+
23
+ kg = kg_data['graph_data']
24
+ reconstructor = PromptReconstructor(kg)
25
+
26
+ # Test each relation type
27
+ relation_types = ["DELIVERS_TO", "PRODUCES", "REQUIRED_BY", "USES"]
28
+
29
+ for rel_type in relation_types:
30
+ print(f"\nπŸ§ͺ Testing {rel_type}:")
31
+
32
+ type_relations = [r for r in kg['relations'] if r['type'] == rel_type]
33
+
34
+ if not type_relations:
35
+ print(f" ⚠️ No {rel_type} relations found")
36
+ continue
37
+
38
+ relation = type_relations[0]
39
+ print(f" πŸ“‹ Relation: {relation['id']}")
40
+ print(f" Source: {relation['source']} β†’ Target: {relation['target']}")
41
+
42
+ # Get entity types
43
+ source_entity = next(e for e in kg['entities'] if e['id'] == relation['source'])
44
+ target_entity = next(e for e in kg['entities'] if e['id'] == relation['target'])
45
+
46
+ print(f" Source type: {source_entity['type']}")
47
+ print(f" Target type: {target_entity['type']}")
48
+
49
+ # Test reconstruction
50
+ result = reconstructor.reconstruct_relation_prompt(relation['id'])
51
+
52
+ if "error" in result:
53
+ print(f" ❌ Error: {result['error']}")
54
+ continue
55
+
56
+ prompt = result.get('reconstructed_prompt', '')
57
+ print(f" πŸ“ Prompt length: {len(prompt)} chars")
58
+ print(f" πŸ“ First 200 chars: {prompt[:200]}...")
59
+
60
+ # Check for specialized logic indicators
61
+ indicators = ["TASK COMPLETION", "FINAL DELIVERY", "DEPENDENCY CHECK", "SYSTEM ROUTING"]
62
+ specialized = any(indicator in prompt for indicator in indicators)
63
+
64
+ print(f" 🎯 Specialized logic detected: {'βœ… Yes' if specialized else '❌ No'}")
65
+
66
+ if __name__ == "__main__":
67
+ debug_relation_detection()
test_improved_relations.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test the improved relation handling in prompt reconstruction.
4
+ Focus on CONSUMED_BY and other newly added relation types.
5
+ """
6
+
7
+ import json
8
+ import sys
9
+ sys.path.append('/Users/zekunwu/Desktop/agent_monitoring/huggingface/AgentGraph')
10
+
11
+ from agentgraph.reconstruction import PromptReconstructor
12
+
13
+ def test_consumed_by_relation():
14
+ """Test the specific CONSUMED_BY relation that was problematic."""
15
+ print("πŸ§ͺ Testing CONSUMED_BY Relation Handling")
16
+ print("=" * 60)
17
+
18
+ # Load the sample with CONSUMED_BY relation
19
+ kg_path = '/Users/zekunwu/Desktop/agent_monitoring/huggingface/AgentGraph/backend/database/samples/knowledge_graphs/kg_algorithm_sample_16.json'
20
+
21
+ with open(kg_path, 'r') as f:
22
+ kg_data = json.load(f)
23
+
24
+ kg = kg_data['graph_data']
25
+
26
+ # Initialize reconstructor
27
+ reconstructor = PromptReconstructor(kg)
28
+
29
+ # Find the CONSUMED_BY relation
30
+ consumed_by_relations = [r for r in kg['relations'] if r['type'] == 'CONSUMED_BY']
31
+
32
+ if not consumed_by_relations:
33
+ print("❌ No CONSUMED_BY relations found in sample")
34
+ return
35
+
36
+ print(f"Found {len(consumed_by_relations)} CONSUMED_BY relation(s)")
37
+
38
+ for relation in consumed_by_relations:
39
+ print(f"\nπŸ” Testing relation: {relation['id']}")
40
+ print(f" Source: {relation['source']} β†’ Target: {relation['target']}")
41
+ print(f" Interaction prompt: {relation.get('interaction_prompt', 'None')}")
42
+
43
+ # Test the reconstruction
44
+ result = reconstructor.reconstruct_relation_prompt(relation['id'])
45
+
46
+ if "error" in result:
47
+ print(f"❌ Error: {result['error']}")
48
+ continue
49
+
50
+ print(f"\nβœ… Reconstruction successful!")
51
+ print(f"πŸ“ Generated prompt preview:")
52
+ print("-" * 40)
53
+ prompt = result.get('reconstructed_prompt', '')
54
+ # Show first 300 characters
55
+ print(prompt[:300] + "..." if len(prompt) > 300 else prompt)
56
+ print("-" * 40)
57
+
58
+ # Check if it's no longer the generic fallback
59
+ if "SYSTEM ROUTING" in prompt:
60
+ print("βœ… Using specialized CONSUMED_BY logic (not generic fallback)")
61
+ else:
62
+ print("⚠️ May still be using generic fallback")
63
+
64
+ def test_all_relation_types():
65
+ """Test all supported relation types."""
66
+ print("\n\nπŸ§ͺ Testing All Relation Types")
67
+ print("=" * 60)
68
+
69
+ # Load a sample that has multiple relation types
70
+ kg_path = '/Users/zekunwu/Desktop/agent_monitoring/huggingface/AgentGraph/backend/database/samples/knowledge_graphs/kg_algorithm_sample_16.json'
71
+
72
+ with open(kg_path, 'r') as f:
73
+ kg_data = json.load(f)
74
+
75
+ kg = kg_data['graph_data']
76
+
77
+ # Initialize reconstructor
78
+ reconstructor = PromptReconstructor(kg)
79
+
80
+ # Get all relation types in this sample
81
+ relation_types = set(r['type'] for r in kg['relations'])
82
+
83
+ print(f"Relation types found in sample: {sorted(relation_types)}")
84
+
85
+ # Test each relation type
86
+ results = {}
87
+ for rel_type in sorted(relation_types):
88
+ print(f"\nπŸ” Testing {rel_type} relations:")
89
+
90
+ type_relations = [r for r in kg['relations'] if r['type'] == rel_type]
91
+
92
+ for relation in type_relations[:1]: # Test first one of each type
93
+ result = reconstructor.reconstruct_relation_prompt(relation['id'])
94
+
95
+ if "error" in result:
96
+ results[rel_type] = "❌ Error"
97
+ print(f" ❌ Error: {result['error']}")
98
+ else:
99
+ prompt = result.get('reconstructed_prompt', '')
100
+
101
+ # Check prompt quality indicators
102
+ if len(prompt) < 50:
103
+ results[rel_type] = "⚠️ Too short"
104
+ print(f" ⚠️ Prompt too short ({len(prompt)} chars)")
105
+ elif rel_type in prompt or "METADATA:" in prompt or "SYSTEM ROUTING:" in prompt:
106
+ results[rel_type] = "βœ… Specialized"
107
+ print(f" βœ… Using specialized logic ({len(prompt)} chars)")
108
+ else:
109
+ results[rel_type] = "⚠️ Generic"
110
+ print(f" ⚠️ May be using generic fallback ({len(prompt)} chars)")
111
+
112
+ # Summary
113
+ print(f"\nπŸ“Š RELATION TYPE COVERAGE SUMMARY:")
114
+ print("-" * 40)
115
+ for rel_type, status in results.items():
116
+ print(f" {rel_type:15} β†’ {status}")
117
+
118
+ def demonstrate_improved_consumed_by():
119
+ """Show the improvement in CONSUMED_BY relation handling."""
120
+ print("\n\n🎯 CONSUMED_BY Improvement Demonstration")
121
+ print("=" * 60)
122
+
123
+ # Load sample data
124
+ kg_path = '/Users/zekunwu/Desktop/agent_monitoring/huggingface/AgentGraph/backend/database/samples/knowledge_graphs/kg_algorithm_sample_16.json'
125
+
126
+ with open(kg_path, 'r') as f:
127
+ kg_data = json.load(f)
128
+
129
+ kg = kg_data['graph_data']
130
+ reconstructor = PromptReconstructor(kg)
131
+
132
+ # Find the specific CONSUMED_BY relation we were asked about
133
+ consumed_by_relations = [r for r in kg['relations'] if r['type'] == 'CONSUMED_BY']
134
+
135
+ if consumed_by_relations:
136
+ relation = consumed_by_relations[0] # Take the first one
137
+
138
+ print(f"πŸ“‹ Original relation:")
139
+ print(f" ID: {relation['id']}")
140
+ print(f" Type: {relation['type']}")
141
+ print(f" Source: {relation['source']} (Input)")
142
+ print(f" Target: {relation['target']} (Agent)")
143
+ print(f" Interaction: {relation.get('interaction_prompt', 'None')}")
144
+
145
+ # Get source and target entities
146
+ source_entity = next(e for e in kg['entities'] if e['id'] == relation['source'])
147
+ target_entity = next(e for e in kg['entities'] if e['id'] == relation['target'])
148
+
149
+ print(f"\nπŸ“‹ Entity details:")
150
+ print(f" Input: {source_entity['name']}")
151
+ print(f" Agent: {target_entity['name']} - {target_entity.get('description', 'No description')}")
152
+
153
+ # Reconstruct the prompt
154
+ result = reconstructor.reconstruct_relation_prompt(relation['id'])
155
+
156
+ if "error" not in result:
157
+ print(f"\nβœ… NEW IMPROVED PROMPT:")
158
+ print("=" * 50)
159
+ print(result['reconstructed_prompt'])
160
+ print("=" * 50)
161
+
162
+ print(f"\n🎯 Key improvements:")
163
+ print(" βœ… Clear system routing semantics")
164
+ print(" βœ… Proper input β†’ agent relationship")
165
+ print(" βœ… Explains the dispatch logic")
166
+ print(" βœ… Shows agent specialization")
167
+ print(" βœ… Indicates next workflow step")
168
+
169
+ if __name__ == "__main__":
170
+ test_consumed_by_relation()
171
+ test_all_relation_types()
172
+ demonstrate_improved_consumed_by()
173
+
174
+ print(f"\nπŸŽ‰ Testing completed! CONSUMED_BY relation handling has been significantly improved.")