Datasets:

ArXiv:
File size: 8,827 Bytes
9f3bc09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
#!/usr/bin/env python3
"""
Debug script to analyze model output issues and test structured generation.
"""

import json
import requests
import argparse
from typing import List, Dict, Any, Optional


def call_model(message: str, model_url: str = "http://0.0.0.0:12333/v1/chat/completions", 
               model_name: str = "eval-agent", system: str = "", temperature: float = 0.1,
               max_tokens: int = 512) -> Optional[str]:
    """Call the model with specific parameters for debugging."""
    
    messages = []
    if system:
        messages.append({"role": "system", "content": system})
    messages.append({"role": "user", "content": message})
    
    payload = {
        "model": model_name,
        "messages": messages,
        "max_tokens": max_tokens,
        "temperature": temperature,
        "stream": False
    }
    
    try:
        response = requests.post(model_url, json=payload, timeout=60)
        response.raise_for_status()
        result = response.json()
        return result["choices"][0]["message"]["content"]
    except Exception as e:
        print(f"Error: {e}")
        return None


def test_structured_output():
    """Test various prompts to debug structured output issues."""
    
    print("🔍 DEBUGGING MODEL STRUCTURED OUTPUT")
    print("="*60)
    
    # Test cases with different complexity levels
    test_cases = [
        {
            "name": "Simple Structure Test",
            "prompt": "Please respond with: <think>test thought</think> <subaspect>test aspect</subaspect> <tool>test tool</tool>",
            "system": "",
            "temperature": 0.0
        },
        {
            "name": "VBench Format Test", 
            "prompt": "How well does the model generate objects?",
            "system": "You must respond in this exact format: <think>your reasoning</think> <subaspect>specific aspect</subaspect> <tool>evaluation tool</tool>",
            "temperature": 0.1
        },
        {
            "name": "Training Data Example",
            "prompt": "How accurately does the model generate specific object classes as described in the text prompt?",
            "system": """You are an expert in evaluating video generation models. You must respond in this exact format:

<think>Your detailed reasoning about what to evaluate</think> <subaspect>The specific aspect to focus on</subaspect> <tool>Object Class</tool>

Available tools: Object Class, Scene, Color, Spatial Relationship, Human Action, Dynamic Degree, Multiple Objects, Overall Consistency, Aesthetic Quality, Imaging Quality, Motion Smoothness, Subject Consistency, Background Consistency""",
            "temperature": 0.0
        }
    ]
    
    for i, test in enumerate(test_cases, 1):
        print(f"\n{i}. {test['name']}")
        print("-" * 40)
        print(f"Prompt: {test['prompt'][:100]}...")
        print(f"Temperature: {test['temperature']}")
        
        response = call_model(
            message=test['prompt'],
            system=test['system'],
            temperature=test['temperature']
        )
        
        if response:
            print(f"Response: {response}")
            
            # Analyze structure
            has_think = "<think>" in response and "</think>" in response
            has_subaspect = "<subaspect>" in response and "</subaspect>" in response  
            has_tool = "<tool>" in response and "</tool>" in response
            
            print(f"Structure Analysis:")
            print(f"  ✅ Has <think> tags: {has_think}")
            print(f"  ✅ Has <subaspect> tags: {has_subaspect}")
            print(f"  ✅ Has <tool> tags: {has_tool}")
            print(f"  ✅ All tags present: {has_think and has_subaspect and has_tool}")
            
            # Check for common errors
            errors = []
            if "<think>" in response and "</tool>" in response and "</think>" not in response:
                errors.append("Missing </think> closing tag")
            if "Object Class</tool>" in response:
                errors.append("Tool name in wrong tag")
            if len([tag for tag in ["<think>", "<subaspect>", "<tool>"] if tag in response]) != len([tag for tag in ["</think>", "</subaspect>", "</tool>"] if tag in response]):
                errors.append("Mismatched opening/closing tags")
                
            if errors:
                print(f"  ❌ Errors found: {', '.join(errors)}")
        else:
            print("❌ No response received")


def test_temperature_effects():
    """Test how temperature affects structured output quality."""
    
    print("\n\n🌡️  TEMPERATURE EFFECTS ON STRUCTURED OUTPUT")
    print("="*60)
    
    prompt = "How accurately does the model generate specific object classes?"
    system = "Respond in format: <think>reasoning</think> <subaspect>aspect</subaspect> <tool>Object Class</tool>"
    
    temperatures = [0.0, 0.1, 0.3, 0.7, 1.0]
    
    for temp in temperatures:
        print(f"\nTemperature: {temp}")
        print("-" * 30)
        
        response = call_model(
            message=prompt,
            system=system,
            temperature=temp,
            max_tokens=200
        )
        
        if response:
            print(f"Response: {response[:150]}...")
            
            # Check if structure is maintained
            correct_structure = (
                "<think>" in response and "</think>" in response and
                "<subaspect>" in response and "</subaspect>" in response and
                "<tool>" in response and "</tool>" in response
            )
            print(f"Correct structure: {'✅' if correct_structure else '❌'}")
        else:
            print("❌ No response")


def analyze_training_sample():
    """Analyze a training sample to understand expected format."""
    
    print("\n\n📚 TRAINING DATA ANALYSIS")
    print("="*60)
    
    # Load a training sample
    try:
        with open("data/postprocess_20250819/ea_cot_dataset_10k.json", 'r') as f:
            data = json.load(f)
            
        sample = data[0]  # First sample
        
        print("Training Sample:")
        print(f"Instruction: {sample['instruction']}")
        print(f"Expected Output: {sample['output']}")
        
        # Test model with exact training example
        print("\n🧪 Testing with exact training example:")
        response = call_model(
            message=sample['instruction'],
            system=sample.get('system', ''),
            temperature=0.0
        )
        
        print(f"Model Response: {response}")
        
        # Compare
        expected = sample['output']
        if response and expected in response:
            print("✅ Model output matches training data!")
        else:
            print("❌ Model output differs from training data")
            
            # Detailed comparison
            if response:
                print("\nDetailed Analysis:")
                print(f"Expected think: {expected[expected.find('<think>')+7:expected.find('</think>')][:50]}...")
                print(f"Expected subaspect: {expected[expected.find('<subaspect>')+11:expected.find('</subaspect>')]}")
                print(f"Expected tool: {expected[expected.find('<tool>')+6:expected.find('</tool>')]}")
                
                if '<think>' in response:
                    think_content = response[response.find('<think>')+7:response.find('</think>')] if '</think>' in response else "INCOMPLETE"
                    print(f"Actual think: {think_content[:50]}...")
                    
    except Exception as e:
        print(f"Could not load training data: {e}")


def main():
    parser = argparse.ArgumentParser(description="Debug model structured output issues")
    parser.add_argument("--model_url", default="http://0.0.0.0:12333/v1/chat/completions")
    parser.add_argument("--model_name", default="eval-agent")
    
    args = parser.parse_args()
    
    # Test connection first
    print("🔗 Testing connection...")
    response = call_model("Hello", model_url=args.model_url, model_name=args.model_name)
    if not response:
        print("❌ Cannot connect to model server")
        return
    
    print("✅ Connected successfully!")
    
    # Run all tests
    test_structured_output()
    test_temperature_effects()
    analyze_training_sample()
    
    print("\n\n💡 RECOMMENDATIONS:")
    print("="*60)
    print("1. Use temperature=0.0 or very low temperature for structured output")
    print("2. Include explicit format instructions in system prompt") 
    print("3. Consider retraining with more structured output examples")
    print("4. Add format validation in your evaluation pipeline")
    print("5. Use constrained generation or parsing to fix malformed output")


if __name__ == "__main__":
    main()