File size: 13,766 Bytes
1c70d34 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 | #!/usr/bin/env python3
"""
Data loader for Quillan multimodal training
Handles JSONL fine-tuning data, song lyrics, and knowledge files
"""
import json
import os
import glob
import torch
from typing import List, Dict, Any, Tuple
import random
from PIL import Image
import numpy as np
class QuillanDataset:
def __init__(self, data_dir: str = "."):
self.data_dir = data_dir
self.samples = []
self.image_files = []
self.audio_files = []
self.video_files = []
self.tokenizer = None # Initialize tokenizer
self.load_all_data()
def set_tokenizer(self, tokenizer):
"""Set the tokenizer for proper text tokenization"""
self.tokenizer = tokenizer
def load_all_data(self):
"""Load all available datasets including multimedia"""
print("π Loading Quillan multimodal datasets...")
# 1. Load text data
self.load_jsonl_data()
self.load_song_lyrics()
self.load_knowledge_files()
# 2. Load multimedia data
self.load_image_files()
self.load_audio_files()
self.load_video_files()
print(f"β
Loaded {len(self.samples)} text samples")
print(f"β
Loaded {len(self.image_files)} image files")
print(f"β
Loaded {len(self.audio_files)} audio files")
print(f"β
Loaded {len(self.video_files)} video files")
print(f"β
Total training samples: {len(self.samples) + len(self.image_files) + len(self.audio_files) + len(self.video_files)}")
def load_image_files(self):
"""Load image files from Main images folder"""
# Go up two levels: from Quillan-v4.2-model to Quillan
images_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "Main images")
if os.path.exists(images_dir):
print(f"πΌοΈ Loading images from {images_dir}")
image_extensions = ['*.png', '*.jpg', '*.jpeg', '*.bmp', '*.tiff']
for ext in image_extensions:
pattern = os.path.join(images_dir, ext)
self.image_files.extend(glob.glob(pattern))
print(f"β
Found {len(self.image_files)} image files")
else:
print(f"β οΈ Main images directory not found at {images_dir}")
def load_audio_files(self):
"""Load audio files from Mp3 files folder"""
audio_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "Mp3 files")
if os.path.exists(audio_dir):
print(f"π΅ Loading audio from {audio_dir}")
audio_extensions = ['*.mp3', '*.wav', '*.flac', '*.aac', '*.ogg']
for ext in audio_extensions:
pattern = os.path.join(audio_dir, ext)
self.audio_files.extend(glob.glob(pattern))
print(f"β
Found {len(self.audio_files)} audio files")
else:
print(f"β οΈ Mp3 files directory not found at {audio_dir}")
def load_video_files(self):
"""Load video files from Lyric Videos folder"""
video_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "Lyric Videos")
if os.path.exists(video_dir):
print(f"π¬ Loading videos from {video_dir}")
video_extensions = ['*.mp4', '*.avi', '*.mov', '*.mkv', '*.webm']
for ext in video_extensions:
pattern = os.path.join(video_dir, ext)
self.video_files.extend(glob.glob(pattern))
print(f"β
Found {len(self.video_files)} video files")
else:
print(f"β οΈ Lyric Videos directory not found at {video_dir}")
def load_jsonl_data(self):
"""Load the fine-tuning JSONL dataset - handles both old and new formats"""
jsonl_path = os.path.join(self.data_dir, "Quillan_finetune_full_dataset.jsonl")
if os.path.exists(jsonl_path):
print(f"π Loading JSONL data from {jsonl_path}")
with open(jsonl_path, 'r', encoding='utf-8') as f:
for line_num, line in enumerate(f, 1):
try:
line = line.strip()
if not line:
continue
data = json.loads(line)
# Handle new simplified format with direct "text" field
if 'text' in data:
text = data['text']
if text and len(text.strip()) > 10:
self.samples.append({
'text': text,
'source': 'jsonl',
'line': line_num
})
# Handle old complex format with Output_Sections
elif 'Output_Sections' in data and 'Final output' in data['Output_Sections']:
text = data['Output_Sections']['Final output']
if text and len(text.strip()) > 10:
self.samples.append({
'text': text,
'source': 'jsonl',
'line': line_num
})
except (json.JSONDecodeError, KeyError) as e:
# Skip problematic lines but continue loading
continue
loaded_count = len([s for s in self.samples if s['source'] == 'jsonl'])
if loaded_count > 0:
print(f"β
Loaded {loaded_count} JSONL samples")
else:
print("β οΈ No valid JSONL samples loaded, trying other sources...")
def load_song_lyrics(self):
"""Load song lyrics from the Songs Lyrics directory in parent folder"""
lyrics_dir = os.path.join(os.path.dirname(os.path.abspath(self.data_dir)), "Songs Lyrics")
if os.path.exists(lyrics_dir):
print(f"π΅ Loading song lyrics from {lyrics_dir}")
lyrics_files = glob.glob(os.path.join(lyrics_dir, "*.md"))
for file_path in lyrics_files:
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
if content and len(content.strip()) > 50:
self.samples.append({
'text': content,
'source': 'lyrics',
'file': os.path.basename(file_path)
})
except Exception as e:
print(f"β οΈ Error loading {file_path}: {e}")
print(f"β
Loaded {len([s for s in self.samples if s['source'] == 'lyrics'])} lyric files")
def load_knowledge_files(self):
"""Load knowledge files from Quillan Knowledge files directory in parent folder"""
knowledge_dir = os.path.join(os.path.dirname(os.path.abspath(self.data_dir)), "Quillan Knowledge files")
if os.path.exists(knowledge_dir):
print(f"π§ Loading knowledge files from {knowledge_dir}")
knowledge_files = glob.glob(os.path.join(knowledge_dir, "*.md"))
for file_path in knowledge_files:
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
if content and len(content.strip()) > 100:
self.samples.append({
'text': content,
'source': 'knowledge',
'file': os.path.basename(file_path)
})
except Exception as e:
print(f"β οΈ Error loading {file_path}: {e}")
print(f"β
Loaded {len([s for s in self.samples if s['source'] == 'knowledge'])} knowledge files")
def get_training_batch(self, batch_size: int = 4, seq_len: int = 512) -> Dict[str, torch.Tensor]:
"""Get a training batch with proper multimodal inputs using real data"""
if len(self.samples) == 0:
raise ValueError("No data loaded!")
# Sample random texts
batch_samples = random.sample(self.samples, min(batch_size, len(self.samples)))
# Create text inputs
texts = [sample['text'][:seq_len*4] for sample in batch_samples] # Allow longer for tokenization
# Tokenize texts
if self.tokenizer:
# Use real tokenizer
text_ids = [self.tokenizer.encode(text, max_length=seq_len) for text in texts]
else:
# Fallback to mock tokenization
text_ids = []
for text in texts:
tokens = [min(ord(c), 999) for c in text] # Clamp to vocab size
if len(tokens) < seq_len:
tokens.extend([0] * (seq_len - len(tokens))) # Pad
else:
tokens = tokens[:seq_len] # Truncate
text_ids.append(tokens)
text_tensor = torch.tensor(text_ids, dtype=torch.long)
# Load real multimedia data when available
image_tensor = self._load_batch_images(batch_size)
audio_tensor = self._load_batch_audio(batch_size)
video_tensor = self._load_batch_video(batch_size)
return {
'text_tokens': text_tensor,
'image': image_tensor,
'audio': audio_tensor,
'video': video_tensor,
'raw_texts': texts
}
def _load_batch_images(self, batch_size: int) -> torch.Tensor:
"""Load a batch of real images with timeout protection, or generate random"""
if len(self.image_files) == 0:
return torch.randn(batch_size, 3, 256, 256)
batch_images = []
for _ in range(batch_size):
img_path = random.choice(self.image_files)
try:
# Quick check if file exists and has content
if not os.path.exists(img_path) or os.path.getsize(img_path) < 100:
batch_images.append(torch.randn(3, 256, 256))
continue
# Load with faster resize - use BILINEAR instead of LANCZOS
img = Image.open(img_path).convert('RGB')
img = img.resize((128, 128), Image.Resampling.BILINEAR) # Faster
# Pad to expected size
img_full = Image.new('RGB', (256, 256), (0, 0, 0))
img_full.paste(img, (64, 64)) # Center the 128x128 image
img_tensor = torch.tensor(np.array(img_full), dtype=torch.float32).permute(2, 0, 1) / 255.0
batch_images.append(img_tensor)
except Exception:
# Silent fail - don't spam warnings, just use random
batch_images.append(torch.randn(3, 256, 256))
return torch.stack(batch_images)
def _load_batch_audio(self, batch_size: int) -> torch.Tensor:
"""Load a batch of real audio, or generate random if no audio files"""
if len(self.audio_files) == 0:
# Fallback to random audio matching model expectations (2048 length)
return torch.randn(batch_size, 1, 2048)
batch_audio = []
for _ in range(batch_size):
# For now, just generate random audio (would need librosa for real audio processing)
# This is a placeholder - real implementation would load audio files
# Using correct length: 2048 to match model expectations
batch_audio.append(torch.randn(1, 2048))
return torch.stack(batch_audio)
def _load_batch_video(self, batch_size: int) -> torch.Tensor:
"""Load a batch of real video frames, or generate random if no video files"""
if len(self.video_files) == 0:
# Fallback to random video matching model expectations (8,32,32)
return torch.randn(batch_size, 3, 8, 32, 32)
batch_video = []
for _ in range(batch_size):
# For now, just generate random video (would need opencv for real video processing)
# This is a placeholder - real implementation would load video frames
# Using correct dimensions: (8,32,32) to match model expectations
batch_video.append(torch.randn(3, 8, 32, 32))
return torch.stack(batch_video)
def get_dataset_stats(self):
"""Get statistics about the loaded dataset"""
stats = {
'total_samples': len(self.samples),
'sources': {}
}
for sample in self.samples:
source = sample['source']
if source not in stats['sources']:
stats['sources'][source] = 0
stats['sources'][source] += 1
return stats
if __name__ == "__main__":
# Test the data loader
dataset = QuillanDataset()
stats = dataset.get_dataset_stats()
print("\nπ Dataset Statistics:")
print(f"Total samples: {stats['total_samples']}")
for source, count in stats['sources'].items():
print(f" {source}: {count}")
# Test batch generation
batch = dataset.get_training_batch(batch_size=2)
print(f"\nπ― Batch shapes:")
for key, tensor in batch.items():
if isinstance(tensor, torch.Tensor):
print(f" {key}: {tensor.shape}")
else:
print(f" {key}: {type(tensor)}")
|