File size: 3,898 Bytes
a2e1879
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# llm_clients/manual.py
from typing import Generator, Any, Dict
from .base import LlmClient


class ManualClient(LlmClient):
    """
    A manual LLM client that prompts the user to enter responses manually.
    This is useful for testing output guardrails.
    """

    def __init__(self, config: Dict[str, Any], system_prompt: str):
        super().__init__(config, system_prompt)
        print("βœ… Manual LLM Client initialized for output testing.")

    def generate_content(self, prompt: str) -> str:
        """Prompts the user to manually enter a response."""
        print(f"\n{'='*60}")
        print("πŸ“ MANUAL OUTPUT MODE")
        print(f"{'='*60}")
        print(f"πŸ’­ Input prompt: {prompt}")
        print("\nπŸ€– Please enter the LLM output you want to test with output guardrails:")
        print("(Press Enter twice to finish your input)\n")
        
        lines = []
        empty_line_count = 0
        
        while True:
            try:
                line = input()
                if line == "":
                    empty_line_count += 1
                    if empty_line_count >= 2:
                        break
                    lines.append(line)
                else:
                    empty_line_count = 0
                    lines.append(line)
            except KeyboardInterrupt:
                print("\n❌ Input cancelled by user.")
                return "User cancelled input."
        
        response = "\n".join(lines).strip()
        if not response:
            response = "No output provided."
            
        print(f"\nβœ… Captured output ({len(response)} characters)")
        return response

    def generate_content_stream(self, prompt: str) -> Generator[str, None, None]:
        """
        Prompts the user to manually enter a response and simulates streaming.
        """
        print(f"\n{'='*60}")
        print("πŸ“ MANUAL OUTPUT MODE (Streaming)")
        print(f"{'='*60}")
        print(f"πŸ’­ Input prompt: {prompt}")
        print("\nπŸ€– Please enter the LLM output you want to test with output guardrails:")
        print("(Press Enter twice to finish your input)\n")
        
        lines = []
        empty_line_count = 0
        
        while True:
            try:
                line = input()
                if line == "":
                    empty_line_count += 1
                    if empty_line_count >= 2:
                        break
                    lines.append(line)
                else:
                    empty_line_count = 0
                    lines.append(line)
            except KeyboardInterrupt:
                print("\n❌ Input cancelled by user.")
                yield "User cancelled input."
                return
        
        full_response = "\n".join(lines).strip()
        if not full_response:
            full_response = "No output provided."
            
        print(f"\nβœ… Captured output ({len(full_response)} characters)")
        print("\nπŸ”„ Simulating streaming output for guardrail testing...")
        
        # Simulate streaming by yielding words with small delays
        import time
        words = full_response.split()
        
        for i, word in enumerate(words):
            if i == 0:
                yield word
            else:
                yield " " + word
            time.sleep(0.1)  # Small delay to simulate streaming
            
        # If there were newlines in the original, yield them at the end
        if "\n" in full_response:
            yield "\n"
    
    def _generate_content_impl(self, prompt: str) -> str:
        """Implementation for base class compatibility."""
        return self.generate_content(prompt)

    def _generate_content_stream_impl(self, prompt: str) -> Generator[str, None, None]:
        """Implementation for base class compatibility."""
        return self.generate_content_stream(prompt)