Spaces:
Sleeping
Sleeping
File size: 5,944 Bytes
23ca2e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
#!/usr/bin/env python3
"""
Test script to verify the analysis functions work correctly without Streamlit.
"""
import json
import sys
import os
# Add src directory to path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
from structure_analysis import detect_summary_fields, classify_data_structure, get_hierarchy_summary
def analyze_with_llm(data, target_field="rotation_enabled"):
"""
Analyze data and generate a prompt for LLM analysis.
Returns structured analysis without requiring Ollama.
"""
print(f"DEBUG: Starting analysis with target_field: {target_field}")
print(f"DEBUG: Data type: {type(data)}")
print(f"DEBUG: Data keys: {list(data.keys()) if isinstance(data, dict) else 'Not a dict'}")
# Detect summary fields
print("DEBUG: Detecting summary fields...")
summary_fields = detect_summary_fields(data)
print(f"DEBUG: Found summary fields: {summary_fields}")
print("DEBUG: Classifying data structure...")
classification = classify_data_structure(data)
print(f"DEBUG: Classification result: {classification}")
print("DEBUG: Getting hierarchy summary...")
hierarchy_summary = get_hierarchy_summary(data)
print(f"DEBUG: Hierarchy summary: {hierarchy_summary}")
# Extract samples
print("DEBUG: Extracting samples...")
sample_object = {}
if 'results' in data:
print("DEBUG: Found 'results' key in data")
for section_name, section in data['results'].items():
print(f"DEBUG: Processing section '{section_name}': {type(section)}")
if isinstance(section, list) and len(section) > 0:
sample_object = section[0]
print(f"DEBUG: Found sample object from list: {sample_object}")
break
elif isinstance(section, dict):
for key, value in section.items():
if isinstance(value, list) and len(value) > 0:
sample_object = value[0] if isinstance(value[0], dict) else {}
print(f"DEBUG: Found sample object from dict list: {sample_object}")
break
else:
print("DEBUG: No 'results' key found in data")
summary_sample = data.get('results', {}).get('summary', {}) or data.get('summary', {})
print(f"DEBUG: Summary sample: {summary_sample}")
# Count objects with target field
def count_objects_with_field(obj, field_name):
count = 0
if isinstance(obj, dict):
if field_name in obj:
count += 1
for v in obj.values():
count += count_objects_with_field(v, field_name)
elif isinstance(obj, list):
for item in obj:
count += count_objects_with_field(item, field_name)
return count
print("DEBUG: Counting objects with target field...")
total_objects = count_objects_with_field(data, target_field)
print(f"DEBUG: Total objects with '{target_field}': {total_objects}")
# Generate analysis
print("DEBUG: Generating analysis...")
analysis = {
"summary_fields_detected": summary_fields[:10],
"classification": classification,
"hierarchy_summary": hierarchy_summary,
"total_objects": total_objects,
"sample_object": sample_object,
"summary_sample": summary_sample,
"recommended_fields": []
}
print(f"DEBUG: Initial analysis: {analysis}")
# Recommend fields based on priority
print("DEBUG: Generating field recommendations...")
if summary_fields:
analysis["recommended_fields"].extend(summary_fields[:3])
print(f"DEBUG: Added summary fields: {summary_fields[:3]}")
if classification.get('config_fields'):
analysis["recommended_fields"].extend(classification['config_fields'][:2])
print(f"DEBUG: Added config fields: {classification['config_fields'][:2]}")
if sample_object:
target_related = [k for k in sample_object.keys() if target_field in k.lower()]
analysis["recommended_fields"].extend(target_related)
print(f"DEBUG: Added target-related fields: {target_related}")
print(f"DEBUG: Final recommended fields: {analysis['recommended_fields']}")
print("DEBUG: Analysis completed successfully")
return analysis
def test_analysis_functions():
"""Test the analysis functions with sample data."""
# Load test data
with open('test_data.json', 'r') as f:
data = json.load(f)
print("=== Testing Analysis Functions ===")
print(f"Data loaded: {type(data)}")
print(f"Data keys: {list(data.keys())}")
print()
# Test detect_summary_fields
print("Testing detect_summary_fields...")
summary_fields = detect_summary_fields(data)
print(f"Summary fields found: {summary_fields}")
print()
# Test classify_data_structure
print("Testing classify_data_structure...")
classification = classify_data_structure(data)
print(f"Classification result: {classification}")
print()
# Test get_hierarchy_summary
print("Testing get_hierarchy_summary...")
hierarchy_summary = get_hierarchy_summary(data)
print(f"Hierarchy summary: {hierarchy_summary}")
print()
# Test the full analysis function
print("Testing full analysis...")
try:
analysis_result = analyze_with_llm(data, "rotation_enabled")
print("✅ Analysis completed successfully!")
print(f"Analysis keys: {list(analysis_result.keys())}")
print(f"Recommended fields: {analysis_result.get('recommended_fields', [])}")
print(f"Total objects with 'rotation_enabled': {analysis_result.get('total_objects', 0)}")
except Exception as e:
print(f"❌ Analysis failed: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
test_analysis_functions()
|