File size: 13,417 Bytes
23d652c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
#!/usr/bin/env python3
"""
Script to normalize JSONL entries for LLM fine-tuning.
Filters out entries without text and normalizes content using OpenAI-compatible API.
"""

import json
import logging
import os
import sys
from pathlib import Path
from typing import Dict, Any, Optional
import time
import argparse

try:
    import openai
except ImportError:
    print("Error: openai package not found. Install with: pip install openai")
    sys.exit(1)

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('normalize_log.txt'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class JSONLNormalizer:
    def __init__(self, api_key: str, base_url: str = None, model: str = "gpt-3.5-turbo"):
        """
        Initialize the normalizer with OpenAI-compatible API settings.
        
        Args:
            api_key: API key for the service
            base_url: Base URL for API (optional, defaults to OpenAI)
            model: Model name to use for normalization
        """
        self.client = openai.OpenAI(
            api_key=api_key,
            base_url=base_url
        )
        self.model = model
        self.processed_count = 0
        self.skipped_count = 0
        self.failed_count = 0
        self.already_normalized_count = 0
        
    def normalize_text(self, text: str, title: str = "", subtitle: str = "") -> Optional[str]:
        """
        Normalize text content using the API.
        
        Args:
            text: Main text content to normalize
            title: Article title for context
            subtitle: Article subtitle for context
            
        Returns:
            Normalized text or None if normalization fails
        """
        try:
            system_prompt = """You are an expert text editor helping to prepare content for LLM fine-tuning. 

Your task is to normalize and clean text while preserving its meaning and literary quality. Make these improvements:

1. Fix obvious typos and spelling errors
2. Normalize punctuation and spacing inconsistencies
3. Remove excessive whitespace and newlines (but preserve intentional line breaks for poetry/paragraphs)
4. Ensure proper capitalization
5. Fix encoding issues or strange characters
6. Maintain the original style and voice
7. Preserve intentional formatting (like poetry line breaks)
8. Remove any metadata or non-content text

Return ONLY the cleaned text, nothing else."""

            user_prompt = f"""Title: {title}
Subtitle: {subtitle}

Text to normalize:
{text}"""

            response = self.client.chat.completions.create(
                model=self.model,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                temperature=0.1,
                max_tokens=4000
            )
            
            normalized_text = response.choices[0].message.content.strip()
            return normalized_text
            
        except Exception as e:
            logger.error(f"API normalization failed: {str(e)}")
            return None
    
    def is_valid_entry(self, entry: Dict[Any, Any]) -> bool:
        """
        Check if entry has valid text content.
        
        Args:
            entry: JSONL entry dictionary
            
        Returns:
            True if entry has non-empty text field
        """
        text = entry.get('text', '')
        return isinstance(text, str) and text.strip() != ''
    
    def is_already_normalized(self, entry: Dict[Any, Any]) -> bool:
        """
        Check if entry has already been normalized.
        
        Args:
            entry: JSONL entry dictionary
            
        Returns:
            True if entry has already been normalized
        """
        return entry.get('_normalized', False) or entry.get('_normalization_failed', False)
    
    def process_jsonl(self, input_file: str, output_file: str, failed_file: str, 
                     max_entries: Optional[int] = None, delay: float = 0.5, 
                     force_reprocess: bool = False, append: bool = False):
        """
        Process the JSONL file and normalize entries.
        
        Args:
            input_file: Path to input JSONL file
            output_file: Path to output normalized JSONL file
            failed_file: Path to file for failed normalizations
            max_entries: Maximum number of entries to process (for testing)
            delay: Delay between API calls to avoid rate limits
            force_reprocess: If True, reprocess already normalized entries
            append: If True, append to existing output files instead of overwriting them
        """
        logger.info(f"Starting normalization of {input_file}")
        logger.info(f"Output file: {output_file} (mode: {'append' if append else 'overwrite'})")
        logger.info(f"Failed entries file: {failed_file} (mode: {'append' if append else 'overwrite'})")
        if force_reprocess:
            logger.info("Force reprocess enabled - will reprocess already normalized entries")
        
        # Determine file modes based on append flag
        output_mode = 'a' if append else 'w'
        failed_mode = 'a' if append else 'w'
        
        with open(input_file, 'r', encoding='utf-8') as infile, \
             open(output_file, output_mode, encoding='utf-8') as outfile, \
             open(failed_file, failed_mode, encoding='utf-8') as failfile:
            
            for line_num, line in enumerate(infile, 1):
                try:
                    # Parse JSON line
                    entry = json.loads(line.strip())
                    
                    # Skip entries without valid text
                    if not self.is_valid_entry(entry):
                        logger.debug(f"Line {line_num}: Skipping entry without text")
                        self.skipped_count += 1
                        continue
                    
                    # Check if already normalized or failed (unless forcing reprocess)
                    if not force_reprocess and self.is_already_normalized(entry):
                        title = entry.get('title', '')
                        logger.debug(f"Line {line_num}: Entry '{title[:50]}...' already processed")
                        
                        # Write to appropriate file based on previous result
                        if entry.get('_normalized', False):
                            outfile.write(json.dumps(entry, ensure_ascii=False) + '\n')
                        elif entry.get('_normalization_failed', False):
                            failfile.write(json.dumps(entry, ensure_ascii=False) + '\n')
                        
                        self.already_normalized_count += 1
                        
                        # Check max_entries limit after counting already normalized entries
                        if max_entries and (self.processed_count + self.already_normalized_count) >= max_entries:
                            logger.info(f"Reached maximum entries limit: {max_entries}")
                            break
                        continue
                    
                    # Check max_entries limit before processing new entries
                    if max_entries and (self.processed_count + self.already_normalized_count) >= max_entries:
                        logger.info(f"Reached maximum entries limit: {max_entries}")
                        break
                    
                    # Extract content for normalization
                    original_text = entry['text']
                    title = entry.get('title', '')
                    subtitle = entry.get('subtitle', '')
                    
                    logger.info(f"Line {line_num}: Normalizing entry '{title[:50]}...'")
                    
                    # Normalize the text
                    normalized_text = self.normalize_text(original_text, title, subtitle)
                    
                    if normalized_text:
                        # Update entry with normalized text
                        entry['text'] = normalized_text
                        entry['_original_length'] = len(original_text)
                        entry['_normalized_length'] = len(normalized_text)
                        entry['_normalized'] = True
                        
                        # Write to output file
                        outfile.write(json.dumps(entry, ensure_ascii=False) + '\n')
                        self.processed_count += 1
                        logger.info(f"Line {line_num}: Successfully normalized")
                    else:
                        # Write failed entry to failed file
                        entry['_normalization_failed'] = True
                        failfile.write(json.dumps(entry, ensure_ascii=False) + '\n')
                        self.failed_count += 1
                        logger.warning(f"Line {line_num}: Normalization failed")
                    
                    # Rate limiting delay (only for new API calls)
                    if delay > 0:
                        time.sleep(delay)
                        
                except json.JSONDecodeError as e:
                    logger.error(f"Line {line_num}: JSON decode error: {str(e)}")
                    self.failed_count += 1
                except Exception as e:
                    logger.error(f"Line {line_num}: Unexpected error: {str(e)}")
                    self.failed_count += 1
                    
                # Progress update
                if line_num % 10 == 0:
                    total_processed = self.processed_count + self.already_normalized_count
                    logger.info(f"Progress: Processed {line_num} lines, "
                              f"Total processed: {total_processed}, "
                              f"Newly normalized: {self.processed_count}, "
                              f"Already processed: {self.already_normalized_count}, "
                              f"Skipped: {self.skipped_count}, "
                              f"Failed: {self.failed_count}")
        
        # Final summary
        total_processed = self.processed_count + self.already_normalized_count
        logger.info("=" * 50)
        logger.info("NORMALIZATION COMPLETE")
        logger.info(f"Total lines processed: {line_num}")
        logger.info(f"Total entries processed: {total_processed}")
        logger.info(f"Newly normalized: {self.processed_count}")
        logger.info(f"Already processed (skipped): {self.already_normalized_count}")
        logger.info(f"Skipped (no text): {self.skipped_count}")
        logger.info(f"Failed: {self.failed_count}")
        logger.info("=" * 50)

def main():
    parser = argparse.ArgumentParser(description='Normalize JSONL entries for LLM fine-tuning')
    parser.add_argument('input_file', help='Input JSONL file path')
    parser.add_argument('-o', '--output', default='normalized_entries.jsonl', 
                       help='Output file for normalized entries (default: normalized_entries.jsonl)')
    parser.add_argument('-f', '--failed', default='failed_normalizations.jsonl',
                       help='Output file for failed entries (default: failed_normalizations.jsonl)')
    parser.add_argument('-k', '--api-key', help='OpenAI API key (or set OPENAI_API_KEY env var)')
    parser.add_argument('-u', '--base-url', help='Base URL for OpenAI-compatible API')
    parser.add_argument('-m', '--model', default='gpt-3.5-turbo', 
                       help='Model to use (default: gpt-3.5-turbo)')
    parser.add_argument('--max-entries', type=int, help='Maximum entries to process (for testing)')
    parser.add_argument('--delay', type=float, default=0.5, 
                       help='Delay between API calls in seconds (default: 0.5)')
    parser.add_argument('--force-reprocess', action='store_true',
                       help='Force reprocessing of already normalized entries')
    parser.add_argument('--append', action='store_true',
                       help='Append to existing output files instead of overwriting them')
    
    args = parser.parse_args()
    
    # Get API key
    api_key = args.api_key or os.getenv('OPENAI_API_KEY')
    if not api_key:
        logger.error("API key required. Use --api-key or set OPENAI_API_KEY environment variable")
        sys.exit(1)
    
    # Check input file exists
    if not Path(args.input_file).exists():
        logger.error(f"Input file not found: {args.input_file}")
        sys.exit(1)
    
    # Initialize normalizer
    normalizer = JSONLNormalizer(
        api_key=api_key,
        base_url=args.base_url,
        model=args.model
    )
    
    # Process the file
    try:
        normalizer.process_jsonl(
            input_file=args.input_file,
            output_file=args.output,
            failed_file=args.failed,
            max_entries=args.max_entries,
            delay=args.delay,
            force_reprocess=args.force_reprocess,
            append=args.append
        )
    except KeyboardInterrupt:
        logger.info("Process interrupted by user")
    except Exception as e:
        logger.error(f"Process failed: {str(e)}")
        sys.exit(1)

if __name__ == "__main__":
    main()