File size: 3,615 Bytes
ead4c16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
"""
Quick test script to verify FLAN-T5 integration works correctly.
Tests the core analyzer without launching the full Gradio UI.
"""

import sys
import os

# Add src to path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))

# Set environment for testing
os.environ.setdefault("ENVIRONMENT", "development")
os.environ.setdefault("LOG_LEVEL", "INFO")
os.environ.setdefault("ENABLE_METRICS", "false")

def test_analyzer():
    """Test the WritingAnalyzer with FLAN-T5."""
    print("=" * 80)
    print("Testing FLAN-T5 Integration")
    print("=" * 80)

    try:
        from writing_studio.core.analyzer import WritingAnalyzer
        from writing_studio.core.config import settings

        print(f"\n✓ Imports successful")
        print(f"✓ Default model: {settings.default_model}")
        print(f"✓ Max model length: {settings.max_model_length}")

        # Test text from the user's previous example
        test_text = """My career ended unexpectedly. The company downsized and I was let go."""

        print(f"\n{'=' * 80}")
        print("Initializing WritingAnalyzer...")
        print(f"{'=' * 80}")

        analyzer = WritingAnalyzer()

        print(f"✓ Analyzer initialized")
        print(f"✓ Model service: {type(analyzer.model_service).__name__}")
        print(f"✓ Current model: {analyzer.model_service._current_model_name}")
        print(f"✓ Task type: {analyzer.model_service._task_type}")

        print(f"\n{'=' * 80}")
        print("Test Input:")
        print(f"{'=' * 80}")
        print(test_text)

        print(f"\n{'=' * 80}")
        print("Generating AI revision with FLAN-T5...")
        print("(This will take ~60 seconds on first run - model downloading)")
        print(f"{'=' * 80}\n")

        original, revision, feedback, diff_html, metadata = analyzer.analyze_and_compare(
            test_text,
            prompt_pack="General"
        )

        print(f"\n{'=' * 80}")
        print("RESULTS")
        print(f"{'=' * 80}")

        print(f"\n📄 Original Text:")
        print(f"{'-' * 80}")
        print(original)

        print(f"\n🤖 AI-Revised Text (FLAN-T5):")
        print(f"{'-' * 80}")
        print(revision)

        print(f"\n📊 Rubric Feedback:")
        print(f"{'-' * 80}")
        print(feedback)

        print(f"\n⏱️  Processing Time: {metadata['duration']:.2f}s")
        print(f"🤖 Model Used: {metadata['model']}")
        print(f"📝 Prompt Pack: {metadata['prompt_pack']}")

        print(f"\n{'=' * 80}")
        print("Test Result:")
        print(f"{'=' * 80}")

        # Check if revision is different from original
        if revision != original and len(revision) > 0:
            print("✅ SUCCESS: FLAN-T5 generated a revision!")
            print("✅ The revision is different from the original text")

            # Check if it's not just a continuation
            if test_text not in revision or len(revision) < len(test_text) * 2:
                print("✅ Revision appears to be a proper revision (not continuation)")

            return True
        else:
            print("❌ FAIL: Revision is identical to original or empty")
            return False

    except ImportError as e:
        print(f"❌ Import Error: {e}")
        print("Make sure all dependencies are installed: pip install -r requirements.txt")
        return False

    except Exception as e:
        print(f"❌ Error during testing: {e}")
        import traceback
        traceback.print_exc()
        return False

if __name__ == "__main__":
    success = test_analyzer()
    sys.exit(0 if success else 1)