File size: 5,496 Bytes
8b30412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
#!/usr/bin/env python3
"""Test script for Gradio UI components."""

import asyncio
import logging

logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)


async def test_ui_components():
    """Test UI component functions."""
    print("\n" + "=" * 60)
    print("TEST 1: UI Components")
    print("=" * 60)

    try:
        from chatassistant_retail.ui.chat_interface import (
            create_example_queries,
            format_chat_history,
            format_context_display,
            format_error_message,
            get_welcome_message,
        )

        print("\n✓ Testing chat interface functions...")

        # Test welcome message
        welcome = get_welcome_message()
        print(f"  - Welcome message: {len(welcome)} characters")
        assert len(welcome) > 0

        # Test example queries
        examples = create_example_queries()
        print(f"  - Example queries: {len(examples)} examples")
        assert len(examples) > 0

        # Test chat history formatting
        messages = [
            {"role": "user", "content": "Hello"},
            {"role": "assistant", "content": "Hi there!"},
        ]
        formatted = format_chat_history(messages)
        print(f"  - Chat history formatting: {len(formatted)} pairs")
        assert len(formatted) == 1

        # Test context display
        context = {"products": [{"name": "Test", "sku": "SKU-123", "price": 99.99, "current_stock": 10}]}
        context_str = format_context_display(context)
        print(f"  - Context display: {len(context_str)} characters")
        assert len(context_str) > 0

        # Test error formatting
        error_msg = format_error_message("Test error")
        print(f"  - Error formatting: {len(error_msg)} characters")
        assert "Error" in error_msg

        print("\n✅ UI Components Test: PASSED")
        return True

    except Exception as e:
        print(f"\n❌ UI Components Test: FAILED - {e}")
        import traceback

        traceback.print_exc()
        return False


async def test_metrics_dashboard():
    """Test metrics dashboard functions."""
    print("\n" + "=" * 60)
    print("TEST 2: Metrics Dashboard")
    print("=" * 60)

    try:
        from chatassistant_retail.ui.metrics_dashboard import (
            format_activity_log,
            format_metrics_for_display,
            get_empty_metrics,
        )

        print("\n✓ Testing metrics dashboard functions...")

        # Test empty metrics
        empty = get_empty_metrics()
        print(f"  - Empty metrics: {len(empty)} fields")
        assert empty["total_queries"] == 0

        # Test metrics formatting
        metrics = {
            "total_queries": 10,
            "avg_response_time": 1.5,
            "tool_calls_count": 3,
            "success_rate": 95.0,
        }
        total, avg, tools, success = format_metrics_for_display(metrics)
        print(f"  - Metrics formatting: total={total}, avg={avg}, tools={tools}, success={success}")
        assert total == 10

        # Test activity log formatting
        metrics_with_activity = {
            "recent_activity": [
                {"timestamp": "2025-12-05T20:00:00", "name": "test", "type": "function", "status": "success"}
            ]
        }
        activity = format_activity_log(metrics_with_activity)
        print(f"  - Activity log: {len(activity)} rows")
        assert len(activity) > 0

        print("\n✅ Metrics Dashboard Test: PASSED")
        return True

    except Exception as e:
        print(f"\n❌ Metrics Dashboard Test: FAILED - {e}")
        import traceback

        traceback.print_exc()
        return False


async def test_gradio_interface():
    """Test Gradio interface creation."""
    print("\n" + "=" * 60)
    print("TEST 3: Gradio Interface")
    print("=" * 60)

    try:
        from chatassistant_retail.ui import create_gradio_interface

        print("\n✓ Creating Gradio interface...")
        demo = create_gradio_interface()

        print("  - Interface created successfully")
        print(f"  - Type: {type(demo).__name__}")

        # Check that demo has expected attributes
        assert hasattr(demo, "launch")
        print("  - Has launch method: ✓")

        print("\n✅ Gradio Interface Test: PASSED")
        print("\nNote: To fully test the UI, run: python -m chatassistant_retail")
        return True

    except Exception as e:
        print(f"\n❌ Gradio Interface Test: FAILED - {e}")
        import traceback

        traceback.print_exc()
        return False


async def main():
    """Run all tests."""
    print("\n" + "=" * 60)
    print("PHASE 5 GRADIO UI TESTING")
    print("=" * 60)

    results = {
        "UI Components": await test_ui_components(),
        "Metrics Dashboard": await test_metrics_dashboard(),
        "Gradio Interface": await test_gradio_interface(),
    }

    # Summary
    print("\n" + "=" * 60)
    print("TEST SUMMARY")
    print("=" * 60)

    passed = sum(1 for v in results.values() if v)
    total = len(results)

    for test_name, result in results.items():
        status = "✅ PASSED" if result else "❌ FAILED"
        print(f"{test_name:25} {status}")

    print("\n" + "=" * 60)
    print(f"TOTAL: {passed}/{total} tests passed")
    print("=" * 60)

    return passed == total


if __name__ == "__main__":
    success = asyncio.run(main())
    exit(0 if success else 1)