File size: 39,991 Bytes
91510de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
# summarizer_tool.py
# Import necessary classes from the transformers library
# pipeline is a high-level API for using models for various tasks
# AutoTokenizer and AutoModelForSeq2SeqLM are used to load specific pre-trained models
import os
import torch # Import torch to ensure it's available as a backend
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSpeechSeq2Seq, AutoModelForImageClassification, AutoModelForObjectDetection
from PIL import Image
from pydub import AudioSegment
import soundfile as sf
import numpy as np # For numerical operations with audio/image data
import io # To handle in-memory audio conversions
from torch.utils.data import DataLoader
from datasets import load_dataset, Audio, concatenate_datasets, Dataset, DatasetDict
from torch.optim import AdamW # <--- Corrected import for PyTorch's AdamW
from accelerate import Accelerator
from tqdm.auto import tqdm
from transformers import (
    AutoFeatureExtractor,
    AutoModelForSpeechSeq2Seq,
    Seq2SeqTrainingArguments,
    Seq2SeqTrainer,
    DataCollatorForSeq2Seq, AutoTokenizer, AutoModelForSequenceClassification, DataCollatorWithPadding
)
import evaluate
import logging
import re
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
from langchain.chains import RetrievalQA
from gtts import gTTS
import tempfile
import time

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# --- Configuration ---
MODEL_NAME = "bert-base-uncased"
DATASET_NAME = "glue"  # Example: GLUE benchmark, you can choose any other
SUBSET_NAME = "sst2"  # Example: SST-2 (Stanford Sentiment Treebank)
NUM_EPOCHS = 3
BATCH_SIZE = 8
LEARNING_RATE = 2e-5
MAX_SEQ_LENGTH = 128
SAVE_MODEL_EVERY_EPOCH = False  # Set to True to save after each epoch

# --- Configuration for text classification training ---
MODEL_CHECKPOINT = "bert-base-uncased" # Or "distilbert-base-uncased" for a smaller model
TRAIN_SPLIT = "train"
NUM_TRAIN_EPOCHS = 3 # Number of epochs for training


# --- 1. Load Dataset in Streaming Mode ---
def load_dataset_streaming(dataset_name, subset_name=None, split="train"):
    logging.info(f"Loading dataset '{dataset_name}' (subset: {subset_name}) in streaming mode for split '{split}'...")
    try:
        if subset_name:
            dataset = load_dataset(dataset_name, subset_name, split=split, streaming=True)
        else:
            dataset = load_dataset(dataset_name, split=split, streaming=True)
        logging.info(f"Dataset '{dataset_name}' loaded successfully in streaming mode.")
        return dataset
    except Exception as e:
        logging.error(f"Error loading dataset '{dataset_name}': {e}")
        return None


# --- 2. Data Preprocessing Function ---
def preprocess_function(examples, tokenizer, max_seq_length):
    """Tokenizes and prepares text data for the model."""
    return tokenizer(examples["sentence"], truncation=True, padding="max_length", max_length=max_seq_length)


# --- 3. Main Training Loop ---
def train_model_with_streaming_data():
    accelerator = Accelerator()
    logging.info(f"Accelerator device: {accelerator.device}")

    logging.info(f"Loading dataset 'glue' (subset: sst2) in streaming mode for split '{TRAIN_SPLIT}'...")
    train_dataset = load_dataset('glue', 'sst2', split=TRAIN_SPLIT,
                                 streaming=True)  # <-- This line defines 'train_dataset'
    logging.info("Dataset 'glue' loaded successfully in streaming mode.")

    # ... inside train_model_with_streaming_data function ...

    tokenizer = AutoTokenizer.from_pretrained(MODEL_CHECKPOINT)
    model = AutoModelForSequenceClassification.from_pretrained(MODEL_CHECKPOINT, num_labels=2)

    # MAKE SURE THIS 'process_function' DEFINITION IS PRESENT AND CORRECTLY INDENTED HERE:
    def process_function(examples):
        tokenized_examples = tokenizer(examples["sentence"], truncation=True)
        # Ensure the 'label' from the dataset is mapped to 'labels' for the model
        tokenized_examples["labels"] = examples["label"]
        return tokenized_examples

    # END OF process_function DEFINITION

    # Your code should then proceed to call .map() like this:
    tokenized_train_dataset = train_dataset.map(
        process_function, batched=True, remove_columns=["sentence", "idx", "label"]
    )

    # Define the data collator specific for text classification here
    # This ensures 'labels' are correctly padded and converted to tensors for BertForSequenceClassification
    data_collator_for_classification = DataCollatorWithPadding(tokenizer=tokenizer)  ## <-- ADD THIS LINE

    # Inside train_model_with_streaming_data function, before line 92
    logging.info(f"Loading dataset 'glue' (subset: sst2) in streaming mode for split '{TRAIN_SPLIT}'...")
    train_dataset = load_dataset('glue', 'sst2', split=TRAIN_SPLIT,
                                 streaming=True)  # <-- This line defines 'train_dataset'
    logging.info("Dataset 'glue' loaded successfully in streaming mode.")

    # Apply the processing function to the dataset
    tokenized_train_dataset = train_dataset.map(
        process_function, batched=True, remove_columns=["sentence", "idx", "label"]
    )

    # Ensure this line is tokenized_train_dataset.with_format("torch") if you had the 'columns' TypeError fix
    tokenized_train_dataset = tokenized_train_dataset.with_format("torch")

    # DataLoader for streaming dataset
    train_dataloader = DataLoader(
        tokenized_train_dataset,
        batch_size=BATCH_SIZE,
        collate_fn=data_collator_for_classification  ## <-- CHANGE THIS LINE to use the new collator
    )

    optimizer = AdamW(model.parameters(), lr=LEARNING_RATE)
    accelerator = Accelerator()
    model, optimizer, train_dataloader = accelerator.prepare(
        model, optimizer, train_dataloader
    )

    model.train()
    for epoch in range(NUM_TRAIN_EPOCHS):
        logging.info(f"Starting Epoch {epoch + 1}/{NUM_TRAIN_EPOCHS}")
        for i, batch in enumerate(tqdm(train_dataloader)):
            if "label" in batch:  # This fix was for the previous TypeError, keep it.
                batch.pop("label")
            outputs = model(**batch)
            loss = outputs.loss
            accelerator.backward(loss)
            optimizer.step()
            optimizer.zero_grad()

            if i % 100 == 0:
                logging.info(f"Step {i}, Loss: {loss.item()}")

    # Load dataset (train and validation splits for full training loop)
    # Note: For streaming, you typically iterate over a single split.
    # If you want to use a validation set, you might load it separately or use a smaller, non-streaming eval set.
    # For simplicity, we'll stream only the 'train' split for this example.
    # For real continuous learning, you might have a system that appends new data to your "training stream".
    train_dataset = load_dataset_streaming(DATASET_NAME, SUBSET_NAME, split="train")

    if train_dataset is None:
        return

    # Preprocess the streaming dataset
    # We use a wrapper function for map with streaming to ensure it works correctly
    # with the IterableDataset and can handle features.
    # Note: map on IterableDataset works differently than on Dataset.
    # It applies transformations on the fly.
    tokenized_train_dataset = train_dataset.map(
        lambda examples: preprocess_function(examples, tokenizer, MAX_SEQ_LENGTH),
        batched=True,
        remove_columns=["sentence", "idx"]  # Remove original columns if not needed by the model
    )

    # Set the format for PyTorch
    tokenized_train_dataset = tokenized_train_dataset.with_format("torch")

    # Create DataLoader - remember IterableDataset has no len(), so DataLoader needs careful handling
    # For training, you'll want to shuffle. buffer_size is important for streaming shuffle.
    train_dataloader = DataLoader(tokenized_train_dataset.shuffle(seed=42, buffer_size=10_000), batch_size=BATCH_SIZE)

    optimizer = AdamW(model.parameters(), lr=LEARNING_RATE)

    # Prepare for distributed training with Accelerator
    model, optimizer, train_dataloader = accelerator.prepare(
        model, optimizer, train_dataloader
    )

    # --- Training Loop ---
    model.train()
    total_steps = 0  # To track total steps across epochs
    for epoch in range(NUM_EPOCHS):
        logging.info(f"Starting Epoch {epoch + 1}/{NUM_EPOCHS}")
        # When streaming, set_epoch is crucial for reshuffling and deterministic behavior
        # (if you use a seed)
        if hasattr(train_dataloader.dataset, 'set_epoch'):
            train_dataloader.dataset.set_epoch(epoch)

        progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch + 1}", disable=not accelerator.is_local_main_process)

        for batch_idx, batch in enumerate(progress_bar):
            # Move batch to device (handled by accelerator.prepare, but good to be aware)
            # batch = {k: v.to(accelerator.device) for k, v in batch.items()}
            if "label" in batch:
                batch.pop("label")
            outputs = model(**batch)
            loss = outputs.loss
            accelerator.backward(loss)
            optimizer.step()
            optimizer.zero_grad()

            progress_bar.set_postfix({'loss': loss.item()})
            total_steps += 1

            # Optional: Log loss more frequently than epoch end
            if total_steps % 100 == 0:
                accelerator.log({"train_loss": loss.item()}, step=total_steps)

        logging.info(f"Epoch {epoch + 1} finished. Avg Loss: {loss.item()}")  # Reports last batch loss, not avg

        if SAVE_MODEL_EVERY_EPOCH:
            output_dir = f"./model_epoch_{epoch + 1}"
            accelerator.wait_for_everyone()
            unwrapped_model = accelerator.unwrap_model(model)
            unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
            tokenizer.save_pretrained(output_dir)
            logging.info(f"Model saved to {output_dir}")

    logging.info("Training complete.")

    # --- Saving the final model ---
    final_output_dir = "./final_model"
    accelerator.wait_for_everyone()
    unwrapped_model = accelerator.unwrap_model(model)
    unwrapped_model.save_pretrained(final_output_dir, save_function=accelerator.save)
    tokenizer.save_pretrained(final_output_dir)
    logging.info(f"Final model saved to {final_output_dir}")

    # You can then load this model for inference
    # from transformers import pipeline
    # classifier = pipeline("sentiment-analysis", model=final_output_dir, tokenizer=final_output_dir)
    # print(classifier("This movie was great!"))


if __name__ == "__main__":
    train_model_with_streaming_data()

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# --- Global Cache for Pipelines ---
# This prevents reloading the same model multiple times
_pipeline_cache = {}


def get_pipeline(task_name, model_name=None, **kwargs):
    """

    Retrieves a Hugging Face pipeline, caching it for efficiency.

    """
    cache_key = f"{task_name}-{model_name}-{hash(frozenset(kwargs.items()))}"
    if cache_key not in _pipeline_cache:
        logging.info(f"Loading pipeline for task '{task_name}' with model '{model_name}'...")
        if model_name:
            _pipeline_cache[cache_key] = pipeline(task_name, model=model_name, **kwargs)
        else:
            _pipeline_cache[cache_key] = pipeline(task_name, **kwargs)  # Uses default model for task
        logging.info(f"Pipeline '{task_name}' loaded.")
    return _pipeline_cache[cache_key]


class AIToolDispatcher:
    def __init__(self):
        logging.info("Initializing AI Tool Dispatcher...")
        # Define default models for various tasks.
        # You can change these to other models from Hugging Face Hub.
        self.default_models = {
            "text-classification": "distilbert-base-uncased-finetuned-sst-2-english",
            "sentiment-analysis": "distilbert-base-uncased-finetuned-sst-2-english",
            # Same as text-classification for sentiment
            "summarization": "sshleifer/distilbart-cnn-12-6",
            "text-generation": "gpt2",
            "translation_en_to_fr": "Helsinki-NLP/opus-mt-en-fr",
            "image-classification": "google/vit-base-patch16-224",
            "object-detection": "facebook/detr-resnet-50",
            "automatic-speech-recognition": "openai/whisper-tiny.en",  # For English ASR
            # "question-answering": "distilbert-base-uncased-distilled-squad", # Example for QA
            # "fill-mask": "bert-base-uncased", # Example for fill-mask
            # "token-classification": "dbmdz/bert-large-cased-finetuned-conll03-eng", # Example for NER
        }
        logging.info("Dispatcher initialized.")

    def _get_task_pipeline(self, task: str, model_name: str = None):
        """Helper to get a cached pipeline for a specific task."""
        final_model_name = model_name if model_name else self.default_models.get(task)
        if not final_model_name:
            raise ValueError(f"No default model specified for task '{task}'. Please provide `model_name`.")
        return get_pipeline(task, model_name=final_model_name)

    def process_text(self, text: str, task: str = "text-classification", **kwargs):
        """Processes text input for a given NLP task."""
        if not isinstance(text, str):
            raise TypeError("Text input must be a string.")

        logging.info(f"Processing text for task: {task}")
        pipeline = self._get_task_pipeline(task)
        result = pipeline(text, **kwargs)
        return result

    def process_image(self, image_path: str, task: str = "image-classification", **kwargs):
        """Processes image file input for a given computer vision task."""
        if not os.path.exists(image_path):
            raise FileNotFoundError(f"Image file not found: {image_path}")

        logging.info(f"Processing image for task: {task}")

        try:
            image = Image.open(image_path)
        except Exception as e:
            raise ValueError(f"Could not open image file: {e}")

        pipeline = self._get_task_pipeline(task)
        result = pipeline(image, **kwargs)
        return result

    def process_audio(self, audio_path: str, task: str = "automatic-speech-recognition", **kwargs):
        """Processes audio file input for a given audio task."""
        if not os.path.exists(audio_path):
            raise FileNotFoundError(f"Audio file not found: {audio_path}")

        logging.info(f"Processing audio for task: {task}")

        # Whisper models expect audio in a specific format (16kHz, mono, float32)
        # We'll use pydub and soundfile to convert if necessary
        try:
            audio = AudioSegment.from_file(audio_path)
            # Convert to mono, 16kHz
            audio = audio.set_channels(1).set_frame_rate(16000)

            # Export to a format soundfile can read in memory
            buffer = io.BytesIO()
            audio.export(buffer, format="wav")
            buffer.seek(0)

            # Read with soundfile to get numpy array and sample rate
            array, sampling_rate = sf.read(buffer)

            # Ensure it's float32
            if array.dtype != np.float32:
                array = array.astype(np.float32)

        except Exception as e:
            logging.error(f"Error processing audio file for conversion: {e}")
            raise ValueError(f"Could not prepare audio file: {e}")

        pipeline = self._get_task_pipeline(task)
        result = pipeline(array.tolist(), sampling_rate=sampling_rate,
                          **kwargs)  # Pass as list or array, and sample_rate
        return result

    def process_input(self, input_data: str, task: str = None, **kwargs):
        """

        Main entry point for the AI tool. Tries to determine input type and

        dispatches to the appropriate processing function.



        Args:

            input_data (str): Can be raw text or a file path (for image/audio/video).

            task (str, optional): The specific AI task to perform (e.g., "summarization",

                                  "object-detection", "automatic-speech-recognition").

                                  REQUIRED for non-text inputs.

                                  For text, it defaults to "text-classification".

            **kwargs: Additional arguments to pass to the specific pipeline.



        Returns:

            dict or list: The result from the AI model.

        """
        if not isinstance(input_data, str):
            raise TypeError("Input data must be a string (raw text or file path).")

        # --- Modality Detection ---
        if os.path.exists(input_data):
            # It's a file path
            file_extension = input_data.split('.')[-1].lower()

            if file_extension in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tiff']:
                if not task: task = "image-classification"
                if not task.startswith("image-"):
                    logging.warning(
                        f"Task '{task}' may not be suitable for image input. Defaulting to '{task}' anyway.")
                return self.process_image(input_data, task=task, **kwargs)

            elif file_extension in ['mp3', 'wav', 'ogg', 'flac', 'm4a']:
                if not task: task = "automatic-speech-recognition"
                if not task.startswith("audio-") and not task.startswith("automatic-speech-recognition"):
                    logging.warning(
                        f"Task '{task}' may not be suitable for audio input. Defaulting to '{task}' anyway.")
                return self.process_audio(input_data, task=task, **kwargs)

            elif file_extension in ['mp4', 'avi', 'mov', 'mkv']:
                # Video processing is complex and often involves frame extraction + image/audio processing
                # For a true "all-in-one" model, this would need dedicated video models or
                # a pipeline to extract frames and audio, then pass to image/audio models.
                # This example will skip full video processing for simplicity, but you can build upon it.
                logging.warning("Video processing is highly complex and not fully implemented in this example. "
                                "You'd typically extract frames/audio and process them separately.")
                raise NotImplementedError("Full video processing is beyond this generalized example.")
            else:
                raise ValueError(f"Unsupported file type: .{file_extension}. Or specify task for this file.")
        else:
            # Assume it's raw text if not a file path
            if not task: task = "text-classification"  # Default task for text
            if task not in self.default_models and not kwargs.get('model'):
                logging.warning(
                    f"No default model for task '{task}'. Using default model for text-classification if available.")
                if task not in ["text-classification", "sentiment-analysis", "summarization", "text-generation",
                                "translation_en_to_fr"]:
                    raise ValueError(
                        f"Unknown text task: '{task}'. Please choose from supported text tasks or provide a model_name.")
            return self.process_text(input_data, task=task, **kwargs)


# --- Example Usage ---
if __name__ == "__main__":
    dispatcher = AIToolDispatcher()

    # --- Text Examples ---
    print("\n--- Text Examples ---")
    text_input = "The new movie was absolutely fantastic! I highly recommend it."
    print(f"Input: '{text_input}'")
    print(f"Sentiment: {dispatcher.process_input(text_input, task='sentiment-analysis')}")

    text_to_summarize = (
        "Artificial intelligence (AI) is intelligence—perceiving, synthesizing, and inferring information—demonstrated by machines, as opposed to intelligence displayed by animals or humans. Example tasks in which AI is used include speech recognition, computer vision, translation, and others. AI applications include advanced web search engines, recommendation systems, understanding human speech, self-driving cars, and competing at the highest level in strategic game systems.")
    print(f"\nInput: '{text_to_summarize}'")
    print(f"Summary: {dispatcher.process_input(text_to_summarize, task='summarization', max_length=50, min_length=10)}")

    text_to_generate = "In a galaxy far, far away, a brave knight"
    print(f"\nInput: '{text_to_generate}'")
    generated_text = dispatcher.process_input(text_to_generate, task='text-generation', max_new_tokens=50,
                                              num_return_sequences=1)
    print(f"Generated Text: {generated_text[0]['generated_text']}")

    # --- Image Examples ---
    print("\n--- Image Examples ---")
    # To run image examples, you need to have image files.
    # Let's create dummy image files for demonstration purposes if they don't exist.
    # In a real scenario, replace these with actual paths to your images.
    dummy_image_path_1 = "dummy_cat_image.jpg"
    dummy_image_path_2 = "dummy_building_image.png"

    if not os.path.exists(dummy_image_path_1):
        print(f"Creating a dummy image file at {dummy_image_path_1} for demonstration.")
        try:
            # Minimal image creation with PIL
            img = Image.new('RGB', (60, 30), color='red')
            img.save(dummy_image_path_1)
            # You would replace this with a real image path to get meaningful results
            # e.g., dummy_image_path_1 = "path/to/your/cat_picture.jpg"
            # NOTE: For actual object detection/image classification, you NEED real, diverse images.
            # A plain red image will yield uninteresting results.
        except ImportError:
            print(
                "Pillow not installed. Skipping dummy image creation. Please install Pillow and provide real image paths.")
            dummy_image_path_1 = None
        except Exception as e:
            print(f"Could not create dummy image: {e}. Skipping image examples.")
            dummy_image_path_1 = None

    if dummy_image_path_1 and os.path.exists(dummy_image_path_1):
        print(f"\nImage Input: {dummy_image_path_1}")
        try:
            print(f"Image Classification: {dispatcher.process_input(dummy_image_path_1, task='image-classification')}")
        except Exception as e:
            print(f"Error during image classification: {e}")

        # Object detection might require larger or more complex images
        # The dummy image is too simple for meaningful object detection.
        # You'll need real images for this.
        # print(f"\nImage Input for Object Detection: {dummy_image_path_1}")
        # print(f"Object Detection: {dispatcher.process_input(dummy_image_path_1, task='object-detection')}")

    # --- Audio Examples ---
    print("\n--- Audio Examples ---")
    # To run audio examples, you need to have audio files.
    # Let's create a dummy audio file for demonstration.
    # NOTE: Requires `pydub` and `soundfile`, and `ffmpeg` installed on your system.
    dummy_audio_path = "dummy_audio.wav"

    if not os.path.exists(dummy_audio_path):
        print(
            f"Creating a dummy audio file at {dummy_audio_path} for demonstration (requires pydub, soundfile, ffmpeg).")
        try:
            from pydub.generators import Sine

            sine_wave = Sine(440).to_audio_segment(duration=1000)  # 1 second of 440 Hz tone
            sine_wave.export(dummy_audio_path, format="wav")
            # For actual ASR, you NEED real speech audio.
            # e.g., dummy_audio_path = "path/to/your/speech_recording.mp3"
        except ImportError:
            print(
                "pydub not installed. Skipping dummy audio creation. Please install pydub, soundfile, and ffmpeg (system-wide) and provide real audio paths.")
            dummy_audio_path = None
        except Exception as e:
            print(f"Could not create dummy audio: {e}. Skipping audio examples.")
            dummy_audio_path = None

    if dummy_audio_path and os.path.exists(dummy_audio_path):
        print(f"\nAudio Input: {dummy_audio_path}")
        try:
            # Whisper-tiny.en is an English-only model.
            # If your audio contains different language, use a multilingual Whisper model like "openai/whisper-tiny".
            transcription = dispatcher.process_input(dummy_audio_path, task='automatic-speech-recognition')
            print(f"Audio Transcription: {transcription['text']}")
        except NotImplementedError:
            print("Skipping audio due to unimplemented feature or missing dependencies.")
        except Exception as e:
            print(f"Error during audio transcription: {e}")

    # --- Clean up dummy files ---
    if os.path.exists(dummy_image_path_1) and "dummy_cat_image.jpg" in dummy_image_path_1:
        os.remove(dummy_image_path_1)
    if os.path.exists(dummy_audio_path) and "dummy_audio.wav" in dummy_audio_path:
        os.remove(dummy_audio_path)

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# --- Configuration ---
MODEL_CHECKPOINT = "openai/whisper-tiny.en"  # Using English-specific tiny model
# For larger, multilingual models, use "openai/whisper-tiny", "openai/whisper-base", etc.
LANGUAGE_ABBREVIATION = "en"  # Language of the dataset
DATASET_NAME = "common_voice"
DATASET_CONFIG = "ja"  # Example for Japanese, change to "en" for English Common Voice
SPLIT_TRAIN = "train"
SPLIT_VALID = "validation"  # Or "test"
BATCH_SIZE = 16  # Adjust based on your GPU VRAM
GRADIENT_ACCUMULATION_STEPS = 1  # Increase for larger effective batch size if VRAM is limited
LEARNING_RATE = 1e-5
NUM_TRAIN_EPOCHS = 3  # Start small, increase for better results
OUTPUT_DIR = "./whisper-tiny-finetuned"
MAX_INPUT_LENGTH_IN_S = 30.0  # Max duration of audio in seconds the model can handle


# --- 1. Load Feature Extractor, Tokenizer, and Model ---
def load_asr_components(model_checkpoint: str):
    logging.info(f"Loading feature extractor, tokenizer, and model for {model_checkpoint}...")
    feature_extractor = AutoFeatureExtractor.from_pretrained(model_checkpoint)
    tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, language=LANGUAGE_ABBREVIATION, task="transcribe")
    model = AutoModelForSpeechSeq2Seq.from_pretrained(model_checkpoint)

    # Set pad token id for generation (important for Beam Search)
    model.config.forced_decoder_ids = None
    model.config.suppress_tokens = []  # Remove suppressed tokens if you want to generate anything

    # Ensure model's generation config matches tokenizer's language & task
    model.config.decoder_start_token_id = tokenizer.bos_token_id
    model.config.pad_token_id = tokenizer.pad_token_id

    logging.info("Components loaded.")
    return feature_extractor, tokenizer, model


# --- 2. Load and Preprocess Dataset ---
def prepare_dataset(

        dataset_name: str,

        dataset_config: str,

        split: str,

        feature_extractor,

        tokenizer,

        max_input_length_in_s: float

):
    logging.info(f"Loading dataset '{dataset_name}' with config '{dataset_config}' split '{split}'...")
    # Load dataset with 'audio' feature type for automatic resampling
    dataset = load_dataset(dataset_name, dataset_config, split=split, trust_remote_code=True)

    # Cast audio column to a specific sampling rate (usually 16kHz for ASR models)
    dataset = dataset.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate))

    # Filter out examples that are too long (optional, but good for performance)
    dataset = dataset.filter(
        lambda example: example["audio"]["array"].shape[0] < max_input_length_in_s * feature_extractor.sampling_rate,
        num_proc=os.cpu_count()  # Use multiple processes for faster filtering
    )

    logging.info(f"Dataset loaded and casted. Number of examples: {len(dataset)}")

    # Preprocessing function
    def prepare_example(example):
        # Load and resample audio (handled by .cast_column("audio", Audio(...)))
        audio = example["audio"]

        # Compute log-mel spectrograms from the audio waveform
        input_features = feature_extractor(
            audio["array"], sampling_rate=audio["sampling_rate"]
        ).input_features[0]

        # Tokenize target text (transcription)
        # Normalize text: lowercase, remove punctuation (adjust as needed for your data)
        sentence = example["sentence"]

        # Simple text normalization (customize heavily based on dataset and language)
        # Example for English Common Voice: remove common noise, punctuation etc.
        # This normalization MUST match your model's expected input for the target text.
        text_normalized = re.sub(r"[^\w\s]", "", sentence).lower()

        labels = tokenizer(text_normalized).input_ids

        return {"input_features": input_features, "labels": labels}

    # Apply preprocessing
    logging.info("Applying preprocessing to dataset...")
    # Use map with batched=False as audio processing is per example
    processed_dataset = dataset.map(
        prepare_example,
        remove_columns=dataset.column_names,  # Remove original columns, keep only processed features and labels
        num_proc=os.cpu_count() if os.cpu_count() else 1  # Use multiple processes for faster preprocessing
    )
    logging.info("Preprocessing complete.")
    return processed_dataset


# --- 3. Data Collator ---
# Pad input features and labels to max length within a batch
class DataCollatorSpeechSeq2SeqWithPadding(DataCollatorForSeq2Seq):
    def __call__(self, features):
        input_features = [{"input_features": feature["input_features"]} for feature in features]
        batch = self.tokenizer.feature_extractor.pad(input_features, return_tensors="pt")

        label_features = [{"input_ids": feature["labels"]} for feature in features]
        labels_batch = self.tokenizer.pad(label_features, return_tensors="pt")

        # replace padding with -100 to ignore loss correctly
        labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)

        batch["labels"] = labels

        return batch


# --- 4. Define Evaluation Metric ---
def compute_metrics(pred, tokenizer):
    metric = evaluate.load("wer")  # Load Word Error Rate metric
    pred_ids = pred.predictions
    label_ids = pred.label_ids

    # Replace -100 in labels as we did in the DataCollator
    label_ids[label_ids == -100] = tokenizer.pad_token_id

    pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
    label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True)

    wer = 100 * metric.compute(predictions=pred_str, references=label_str)
    return {"wer": wer}


# --- Main Fine-tuning Function ---
def fine_tune_asr_model():
    feature_extractor, tokenizer, model = load_asr_components(MODEL_CHECKPOINT)
    if None in [feature_extractor, tokenizer, model]:
        return

    # Load and preprocess datasets (using a small fraction for demonstration)
    # For full training, remove .select(range(...))
    try:
        train_dataset = prepare_dataset(
            DATASET_NAME, DATASET_CONFIG, SPLIT_TRAIN, feature_extractor, tokenizer, MAX_INPUT_LENGTH_IN_S
        ).select(range(500))  # Take first 500 examples for quick demo

        eval_dataset = prepare_dataset(
            DATASET_NAME, DATASET_CONFIG, SPLIT_VALID, feature_extractor, tokenizer, MAX_INPUT_LENGTH_IN_S
        ).select(range(100))  # Take first 100 examples for quick demo
    except Exception as e:
        logging.error(f"Error preparing dataset: {e}")
        logging.info("Make sure the dataset name/config is correct and you have internet access.")
        logging.info("Also ensure the 'audio' and 'sentence' columns exist in your chosen dataset.")
        return

    # Initialize data collator
    data_collator = DataCollatorSpeechSeq2SeqWithPadding(tokenizer=tokenizer)

    # Initialize training arguments
    training_args = Seq2SeqTrainingArguments(
        output_dir=OUTPUT_DIR,
        per_device_train_batch_size=BATCH_SIZE,
        gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
        learning_rate=LEARNING_RATE,
        num_train_epochs=NUM_TRAIN_EPOCHS,
        predict_with_generate=True,  # Essential for ASR to generate sequences
        generation_max_length=MAX_INPUT_LENGTH_IN_S,  # Max length of generated transcription
        fp16=torch.cuda.is_available(),  # Use mixed precision if GPU is available
        save_steps=500,  # Save checkpoint every 500 steps
        eval_steps=500,  # Evaluate every 500 steps
        logging_steps=25,  # Log every 25 steps
        report_to=["tensorboard"],  # Log to TensorBoard for visualization
        load_best_model_at_end=True,  # Load best model based on eval_metric
        metric_for_best_model="wer",  # Use WER for best model selection
        greater_is_better=False,  # Lower WER is better
        push_to_hub=False,  # Set to True to push model to Hugging Face Hub (requires login)
    )

    # Initialize Trainer
    trainer = Seq2SeqTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        tokenizer=tokenizer,
        feature_extractor=feature_extractor,
        data_collator=data_collator,
        compute_metrics=lambda p: compute_metrics(p, tokenizer),
    )

    # Train the model
    logging.info("Starting model training...")
    trainer.train()
    logging.info("Training complete. Saving final model.")

    # Save the final model
    trainer.save_model(OUTPUT_DIR)
    tokenizer.save_pretrained(OUTPUT_DIR)
    feature_extractor.save_pretrained(OUTPUT_DIR)
    logging.info(f"Fine-tuned ASR model saved to {OUTPUT_DIR}")

    logging.info("\n--- How to use the fine-tuned model ---")
    logging.info(f"from transformers import pipeline")
    logging.info(
        f"transcriber = pipeline('automatic-speech-recognition', model='{OUTPUT_DIR}', tokenizer='{OUTPUT_DIR}', feature_extractor='{OUTPUT_DIR}')")
    logging.info(f"print(transcriber('path/to/your/audio.wav'))")


if __name__ == "__main__":
    # Ensure you have ffmpeg installed system-wide for audio processing!
    # (Refer to previous instructions if you haven't)
    fine_tune_asr_model()

# --- Pipeline Cache ---
_pipeline_cache = {}


def get_pipeline(task, model_name=None, **kwargs):
    key = f"{task}-{model_name}-{hash(frozenset(kwargs.items()))}"
    if key not in _pipeline_cache:
        _pipeline_cache[key] = pipeline(task, model=model_name, **kwargs) if model_name else pipeline(task, **kwargs)
    return _pipeline_cache[key]


# --- Default Models ---
def_models = {
    "sentiment-analysis": "distilbert-base-uncased-finetuned-sst-2-english",
    "summarization": "sshleifer/distilbart-cnn-12-6",
    "text-generation": "gpt2",
    "translation_en_to_fr": "Helsinki-NLP/opus-mt-en-fr",
    "image-classification": "google/vit-base-patch16-224",
    "object-detection": "facebook/detr-resnet-50",
    "automatic-speech-recognition": "openai/whisper-tiny.en"
}


# --- RAG Components ---
def build_rag_qa(pdf_path):
    loader = PyPDFLoader(pdf_path)
    docs = loader.load()
    splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
    split_docs = splitter.split_documents(docs)
    embeddings = HuggingFaceEmbeddings()
    vectorstore = FAISS.from_documents(split_docs, embeddings)
    return RetrievalQA.from_chain_type(llm=pipeline("text-generation"), retriever=vectorstore.as_retriever())


# --- Main Dispatcher ---
class AllInOneDispatcher:
    def __init__(self):
        self.memory = []

    def _pipe(self, task, model=None):
        return get_pipeline(task, model_name=model or def_models.get(task))

    def _is_file(self, path):
        return os.path.exists(path)

    def handle_text(self, text, task="sentiment-analysis", **kwargs):
        result = self._pipe(task)(text, **kwargs)
        self.memory.append((task, text, result))
        return result

    def handle_image(self, path, task="image-classification", **kwargs):
        image = Image.open(path)
        result = self._pipe(task)(image, **kwargs)
        self.memory.append((task, path, result))
        return result

    def handle_audio(self, path, task="automatic-speech-recognition", **kwargs):
        audio = AudioSegment.from_file(path).set_channels(1).set_frame_rate(16000)
        buf = io.BytesIO()
        audio.export(buf, format="wav")
        buf.seek(0)
        data, sr = sf.read(buf)
        if data.dtype != np.float32:
            data = data.astype(np.float32)
        result = self._pipe(task)(data.tolist(), sampling_rate=sr)
        self.memory.append((task, path, result))
        return result

    def handle_video(self, path):
        import cv2
        frames, audio_file = [], "extracted_audio.wav"
        cap = cv2.VideoCapture(path)
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
            frames.append(Image.fromarray(frame))
            if len(frames) >= 5: break
        cap.release()
        os.system(f"ffmpeg -i {path} -q:a 0 -map a {audio_file} -y")
        image_result = self.handle_image(frames[0], task="image-classification")
        audio_result = self.handle_audio(audio_file)
        os.remove(audio_file)
        return {"image": image_result, "audio": audio_result}

    def handle_pdf(self, path):
        qa = build_rag_qa(path)
        return qa.run("Summarize this document")

    def handle_tts(self, text, lang='en'):
        tts = gTTS(text=text, lang=lang)
        temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3").name
        tts.save(temp_path)
        return temp_path

    def process(self, input_data, task=None, **kwargs):
        if self._is_file(input_data):
            ext = input_data.split('.')[-1].lower()
            if ext in ['jpg', 'jpeg', 'png']: return self.handle_image(input_data, task, **kwargs)
            elif ext in ['mp3', 'wav']: return self.handle_audio(input_data, task, **kwargs)
            elif ext in ['mp4', 'mov']: return self.handle_video(input_data)
            elif ext in ['pdf']: return self.handle_pdf(input_data)
            else: raise ValueError(f"Unsupported file type: {ext}")
        else:
            if task == "tts": return self.handle_tts(input_data)
            return self.handle_text(input_data, task, **kwargs)


# --- Example Usage ---
if __name__ == "__main__":
    ai = AllInOneDispatcher()
    print("Text:", ai.process("The weather is great today!", task="sentiment-analysis"))
    print("Summarize:", ai.process("Artificial intelligence is a broad field...", task="summarization"))
    print("TTS path:", ai.process("This is a test speech.", task="tts"))