SerendibLLM-PoemSong-Dataset / clean_lyrics.py
Chamaka8's picture
Upload clean_lyrics.py with huggingface_hub
41cce5a verified
"""
clean_lyrics.py - Cleans raw scraped lyrics JSONL into training-ready JSONL.
Usage:
python3 clean_lyrics.py
python3 clean_lyrics.py --input my_raw.jsonl
python3 clean_lyrics.py --no-training
"""
import json
import re
import os
import argparse
RAW_FILE = "lyrics_raw.jsonl"
CLEAN_FILE = "lyrics_clean.jsonl"
TRAINING_FILE = "sinhala_songs_training.jsonl"
BOILERPLATE = [
'ගී පද',
'ගීතයේ පද පෙළ',
'මෙම වෙබ් අඩවියේ',
'ඔව්, මෙම',
]
def is_sinhala(text):
return any(0x0D80 <= ord(c) <= 0x0DFF for c in text)
def sinhala_char_count(text):
return sum(1 for c in text if 0x0D80 <= ord(c) <= 0x0DFF)
def clean_lyrics(text):
# 1. Remove boilerplate lines
lines = text.split('\n')
lines = [l for l in lines if not any(m in l for m in BOILERPLATE)]
text = '\n'.join(lines)
# 2. Remove URL lines and non-Sinhala lines
lines = text.split('\n')
lines = [l for l in lines if 'http' not in l and 'www.' not in l]
lines = [l for l in lines if is_sinhala(l) or l.strip() == '']
# 3. Remove trailing bare title line (short line with no verse markers)
while lines:
last = lines[-1].strip().rstrip('\r')
if last == '':
lines.pop()
continue
if len(last) < 50 and '...' not in last and '//' not in last:
lines.pop()
else:
break
# 4. Collapse 3+ blank lines into one
text = '\n'.join(lines)
text = re.sub(r'\n{3,}', '\n\n', text)
return text.strip()
def to_training_examples(item):
content = item['content']
title = item['title']
artist = item.get('artist', '')
instructions = [
'සිංහල ආදර ගීතයක් ලියන්න',
'Give me a Sinhala love song',
'Create a meaningful Sinhala love song',
'සිංහල ගීතයක් ලියන්න',
"'" + title + "' වැනි සිංහල ගීතයක් ලියන්න",
]
if artist and artist != 'Unknown':
instructions.append(artist + ' ගේ ශෛලියෙන් සිංහල ගීතයක් ලියන්න')
examples = []
for inst in instructions:
prompt = (
'Below is an instruction that describes a task. '
'Write a response that appropriately completes the request.\n\n'
'### Instruction:\n' + inst + '\n\n'
'### Response:\n' + content
)
examples.append({
'text': prompt, 'instruction': inst, 'output': content,
'source': item.get('source', ''), 'type': item.get('type', 'song'),
'title': title,
})
return examples
def main(raw_file, clean_file, training_file, skip_training):
if not os.path.exists(raw_file):
print('Input file not found: ' + raw_file)
return
total = kept = skipped = training_count = 0
f_train = None if skip_training else open(training_file, 'w', encoding='utf-8')
with open(raw_file, 'r', encoding='utf-8') as f_in, \
open(clean_file, 'w', encoding='utf-8') as f_clean:
for line in f_in:
line = line.strip()
if not line:
continue
total += 1
try:
item = json.loads(line)
except json.JSONDecodeError:
print(' Skipping malformed JSON on line ' + str(total))
skipped += 1
continue
cleaned = clean_lyrics(item.get('content', ''))
if sinhala_char_count(cleaned) < 50:
skipped += 1
print(' Skipped (too short): ' + item.get('title', '')[:50])
continue
item['content'] = cleaned
f_clean.write(json.dumps(item, ensure_ascii=False) + '\n')
kept += 1
if f_train:
for ex in to_training_examples(item):
f_train.write(json.dumps(ex, ensure_ascii=False) + '\n')
training_count += 1
if f_train:
f_train.close()
print('\n' + '=' * 50)
print(' CLEANING COMPLETE')
print('=' * 50)
print(' Input songs: ' + str(total))
print(' Kept: ' + str(kept))
print(' Skipped: ' + str(skipped))
print(' Clean JSONL: ' + clean_file)
if not skip_training:
print(' Training examples: ' + str(training_count) + ' -> ' + training_file)
print('=' * 50)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', default=RAW_FILE)
parser.add_argument('--output', default=CLEAN_FILE)
parser.add_argument('--training', default=TRAINING_FILE)
parser.add_argument('--no-training', action='store_true')
args = parser.parse_args()
main(args.input, args.output, args.training, args.no_training)