File size: 13,202 Bytes
217abc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
"""Run manager for tracking test runs and managing output folders."""

import os
import json
import time
from datetime import datetime
from typing import Dict, Any, Optional, List
from pathlib import Path


class RunManager:
    """Manages test runs, folders, and logging."""

    def __init__(self, base_dir: str = "results", save_outputs: bool = True):
        self.base_dir = base_dir
        self.save_outputs = save_outputs
        self.current_run_id: Optional[str] = None
        self.current_run_dir: Optional[str] = None
        self.current_question_run_id: Optional[str] = None  # Track current question run ID
        self.log_file: Optional[str] = None
        self.run_start_time: Optional[float] = None
        self.last_objective_params: Optional[Dict[str, Any]] = None

        # Create base results directory
        if self.save_outputs:
            os.makedirs(self.base_dir, exist_ok=True)

    def _get_next_run_id(self) -> str:
        """Generate the next unique run ID."""
        existing_runs = [d for d in os.listdir(self.base_dir)
                        if d.startswith("test_id") and os.path.isdir(os.path.join(self.base_dir, d))]

        if not existing_runs:
            return "test_id00001"

        # Extract numbers and find max
        numbers = []
        for run in existing_runs:
            try:
                num = int(run.replace("test_id", ""))
                numbers.append(num)
            except ValueError:
                continue

        next_num = max(numbers) + 1 if numbers else 1
        return f"test_id{next_num:05d}"

    def _create_run_structure(self, run_id: str) -> str:
        """Create folder structure for a run."""
        run_dir = os.path.join(self.base_dir, run_id)
        if self.save_outputs:
            os.makedirs(run_dir, exist_ok=True)
            os.makedirs(os.path.join(run_dir, "learning objectives"), exist_ok=True)
            os.makedirs(os.path.join(run_dir, "questions"), exist_ok=True)
        return run_dir

    def _get_next_question_run_id(self) -> str:
        """Generate the next unique question run ID for the current test run."""
        if self.current_run_dir is None:
            return "q_run_001"

        questions_dir = os.path.join(self.current_run_dir, "questions")
        if not os.path.exists(questions_dir):
            return "q_run_001"

        # Find existing question run folders
        existing_q_runs = [d for d in os.listdir(questions_dir)
                          if d.startswith("q_run_") and os.path.isdir(os.path.join(questions_dir, d))]

        if not existing_q_runs:
            return "q_run_001"

        # Extract numbers and find max
        numbers = []
        for run in existing_q_runs:
            try:
                num = int(run.replace("q_run_", ""))
                numbers.append(num)
            except ValueError:
                continue

        next_num = max(numbers) + 1 if numbers else 1
        return f"q_run_{next_num:03d}"

    def _params_changed(self, new_params: Dict[str, Any]) -> bool:
        """Check if objective generation parameters have changed."""
        if self.last_objective_params is None:
            return True

        # Compare relevant parameters
        keys_to_compare = ["files", "num_objectives", "num_runs", "model",
                          "incorrect_answer_model", "temperature"]

        for key in keys_to_compare:
            if new_params.get(key) != self.last_objective_params.get(key):
                return True

        return False

    def start_objective_run(self, files: List[str], num_objectives: int, num_runs: str,
                           model: str, incorrect_answer_model: str, temperature: str) -> str:
        """
        Start a new objective generation run or continue existing one.
        Returns the run ID.
        """
        params = {
            "files": sorted(files),  # Sort for consistent comparison
            "num_objectives": num_objectives,
            "num_runs": num_runs,
            "model": model,
            "incorrect_answer_model": incorrect_answer_model,
            "temperature": temperature
        }

        # Check if we need a new run
        if self._params_changed(params):
            # Create new run
            self.current_run_id = self._get_next_run_id()
            self.current_run_dir = self._create_run_structure(self.current_run_id)
            self.log_file = os.path.join(self.current_run_dir, "log.log")
            self.last_objective_params = params

            # Log header
            self.log(f"=== New Learning Objectives Run: {self.current_run_id} ===", level="INFO")
            self.log(f"Inputs: {[os.path.basename(f) for f in files]}", level="INFO")
            self.log("Variables:", level="INFO")
            self.log(f"  Number of Learning Objectives per Run: {num_objectives}", level="INFO")
            self.log(f"  Number of Generation Runs: {num_runs}", level="INFO")
            self.log(f"  Model: {model}", level="INFO")
            self.log(f"  Model for Incorrect Answer Suggestions: {incorrect_answer_model}", level="INFO")
            self.log(f"  Temperature (0.0: Deterministic, 1.0: Creative): {temperature}", level="INFO")
            self.log("", level="INFO")  # Blank line
        else:
            # Continue existing run
            self.log("", level="INFO")  # Blank line
            self.log(f"=== Continuing Learning Objectives Run: {self.current_run_id} ===", level="INFO")

        self.run_start_time = time.time()
        return self.current_run_id

    def start_question_run(self, objectives_count: int, model: str,
                          temperature: str, num_questions: int, num_runs: int) -> str:
        """
        Start a question generation run (continues logging to same run).
        Returns the run ID.
        """
        if self.current_run_id is None:
            # No objective run exists, create new run
            self.current_run_id = self._get_next_run_id()
            self.current_run_dir = self._create_run_structure(self.current_run_id)
            self.log_file = os.path.join(self.current_run_dir, "log.log")
            self.log(f"=== New Questions Run: {self.current_run_id} ===", level="INFO")
        else:
            self.log("", level="INFO")  # Blank line
            self.log(f"=== Generate Questions Run ===", level="INFO")

        # Get next question run ID for this test run
        self.current_question_run_id = self._get_next_question_run_id()
        self.log(f"Question Run ID: {self.current_question_run_id}", level="INFO")

        self.log("Variables:", level="INFO")
        self.log(f"  Number of Learning Objectives: {objectives_count}", level="INFO")
        self.log(f"  Number of Questions to Generate: {num_questions}", level="INFO")
        self.log(f"  Model: {model}", level="INFO")
        self.log(f"  Temperature (0.0: Deterministic, 1.0: Creative): {temperature}", level="INFO")
        self.log(f"  Number of Question Generation Runs: {num_runs}", level="INFO")
        self.log("", level="INFO")  # Blank line

        self.run_start_time = time.time()
        return self.current_run_id

    def log(self, message: str, level: str = "INFO"):
        """Write a log message with timestamp."""
        # Always print to console
        print(f"[{level}] {message}")

        if not self.save_outputs or self.log_file is None:
            return

        timestamp = datetime.now().strftime("%m/%d %H:%M:%S")
        log_line = f"[{timestamp}][{level}] {message}\n"

        with open(self.log_file, "a", encoding="utf-8") as f:
            f.write(log_line)

    def end_run(self, run_type: str = "Learning Objectives"):
        """End the current run and log total time."""
        if self.run_start_time is None:
            return

        elapsed = time.time() - self.run_start_time
        self.log(f"Total time for {run_type}: +{elapsed:.0f}s", level="INFO")
        self.log("", level="INFO")  # Blank line

    def save_objectives_outputs(self, best_in_group: str, all_grouped: str,
                               raw_ungrouped: str, params: Dict[str, Any]):
        """Save learning objectives outputs to files."""
        if not self.save_outputs or self.current_run_dir is None:
            return

        obj_dir = os.path.join(self.current_run_dir, "learning objectives")

        # Save JSON outputs
        with open(os.path.join(obj_dir, "best_in_group.json"), "w", encoding="utf-8") as f:
            f.write(best_in_group)

        with open(os.path.join(obj_dir, "all_grouped.json"), "w", encoding="utf-8") as f:
            f.write(all_grouped)

        with open(os.path.join(obj_dir, "raw_ungrouped.json"), "w", encoding="utf-8") as f:
            f.write(raw_ungrouped)

        # Save input parameters
        with open(os.path.join(obj_dir, "input_parameters.json"), "w", encoding="utf-8") as f:
            json.dump(params, f, indent=2)

        # Save best-in-group learning objectives as Markdown
        try:
            objectives_data = json.loads(best_in_group)
            md_content = "# Learning Objectives\n\n"
            for i, obj in enumerate(objectives_data, 1):
                learning_objective = obj.get("learning_objective", "")
                md_content += f"{i}. {learning_objective}\n"

            with open(os.path.join(obj_dir, "best_in_group.md"), "w", encoding="utf-8") as f:
                f.write(md_content)
        except Exception as e:
            self.log(f"Error creating markdown output: {e}", level="ERROR")

        self.log(f"Saved learning objectives outputs to {obj_dir}", level="INFO")

    def save_questions_outputs(self, best_ranked: str, all_grouped: str,
                              formatted_quiz: str, params: Dict[str, Any]):
        """Save questions outputs to files in a numbered subfolder."""
        if not self.save_outputs or self.current_run_dir is None:
            return

        # Create subfolder for this question run
        q_base_dir = os.path.join(self.current_run_dir, "questions")
        q_run_dir = os.path.join(q_base_dir, self.current_question_run_id if self.current_question_run_id else "q_run_001")
        os.makedirs(q_run_dir, exist_ok=True)

        # Save JSON outputs
        with open(os.path.join(q_run_dir, "best_ranked.json"), "w", encoding="utf-8") as f:
            f.write(best_ranked)

        with open(os.path.join(q_run_dir, "all_grouped.json"), "w", encoding="utf-8") as f:
            f.write(all_grouped)

        # Save formatted quiz as markdown
        with open(os.path.join(q_run_dir, "formatted_quiz.md"), "w", encoding="utf-8") as f:
            f.write(formatted_quiz)

        # Save input parameters
        with open(os.path.join(q_run_dir, "input_parameters.json"), "w", encoding="utf-8") as f:
            json.dump(params, f, indent=2)

        self.log(f"Saved questions outputs to {q_run_dir}", level="INFO")

    def get_current_run_id(self) -> Optional[str]:
        """Get the current run ID."""
        return self.current_run_id

    def get_current_run_dir(self) -> Optional[str]:
        """Get the current run directory."""
        return self.current_run_dir

    def get_current_question_run_id(self) -> Optional[str]:
        """Get the current question run ID."""
        return self.current_question_run_id

    def get_latest_formatted_quiz_path(self) -> Optional[str]:
        """Find the formatted_quiz.md from the latest question run."""
        if self.current_run_dir is None:
            return None

        questions_dir = os.path.join(self.current_run_dir, "questions")
        if not os.path.exists(questions_dir):
            return None

        q_runs = sorted([
            d for d in os.listdir(questions_dir)
            if d.startswith("q_run_") and os.path.isdir(os.path.join(questions_dir, d))
        ])
        if not q_runs:
            return None

        quiz_path = os.path.join(questions_dir, q_runs[-1], "formatted_quiz.md")
        return quiz_path if os.path.exists(quiz_path) else None

    def save_edited_quiz(self, content: str, filename: str = "formatted_quiz_edited.md") -> Optional[str]:
        """Save edited quiz to the latest question run folder."""
        if not self.save_outputs or self.current_run_dir is None:
            return None

        questions_dir = os.path.join(self.current_run_dir, "questions")
        if not os.path.exists(questions_dir):
            return None

        q_runs = sorted([
            d for d in os.listdir(questions_dir)
            if d.startswith("q_run_") and os.path.isdir(os.path.join(questions_dir, d))
        ])
        if not q_runs:
            return None

        output_path = os.path.join(questions_dir, q_runs[-1], filename)
        with open(output_path, "w", encoding="utf-8") as f:
            f.write(content)

        self.log(f"Saved edited quiz to {output_path}", level="INFO")
        return output_path


# Global run manager instance
_run_manager = None

def get_run_manager() -> RunManager:
    """Get or create the global run manager instance."""
    global _run_manager
    if _run_manager is None:
        _run_manager = RunManager()
    return _run_manager