File size: 2,516 Bytes
fc7b4a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from src.preprocessing.preprocessor import single_preprocessing
from src.spectttra.spectttra_trainer import spectttra_predict
from src.llm2vectrain.model import load_llm2vec_model
from src.llm2vectrain.llm2vec_trainer import l2vec_single_train, load_pca_model
from src.models.mlp import build_mlp, load_config
from pathlib import Path
from src.utils.config_loader import DATASET_NPZ
from src.utils.dataset import instance_scaler

from pathlib import Path
import numpy as np
import torch


def predict_pipeline(audio, lyrics: str):
    """
    Predict script which includes preprocessing, feature extraction, and
    training the MLP model for a single data sample.

    Parameters
    ----------
    audio : audio_object
        Audio object file

    lyric : string
        Lyric string

    Returns
    -------
    prediction : str
        A string result of the prediction

    label : int
        A numerical representation of the prediction
    """

    # Instantiate X and Y vectors
    X, Y = None, None

    # Instantiate LLM2Vec Model
    llm2vec_model = load_llm2vec_model()

    # Preprocess both audio and lyrics
    audio, lyrics = single_preprocessing(audio, lyrics)

    # Call the train method for both models
    audio_features = spectttra_predict(audio)
    lyrics_features = l2vec_single_train(llm2vec_model, lyrics)

    # Reduce the lyrics using saved PCA model
    reduced_lyrics = load_pca_model(lyrics_features)

    # Scale the vectors using Z-Score
    audio_features, reduced_lyrics = instance_scaler(audio_features, reduced_lyrics)

    # Concatenate the vectors of audio_features + lyrics_features
    results = np.concatenate([audio_features, reduced_lyrics], axis=1)

    # ---- Load MLP Classifier ----
    config = load_config("config/model_config.yml")
    classifier = build_mlp(input_dim=results.shape[1], config=config)

    # Load trained weights (make sure this path matches where you saved your model)
    model_path = "models/mlp/mlp_multimodal.pth"
    classifier.load_model(model_path)
    classifier.model.eval()

    # Run prediction
    probability, prediction, label = classifier.predict_single(results)

    return {
        "probability": probability,
        "label": label,
        "prediction": "AI-Generated" if prediction == 0 else "Human-Composed",
    }


if __name__ == "__main__":
    # Example usage (replace with real inputs, place song inside data/raw.)
    audio = "sample"
    lyrics = "Some lyrics text here"
    print(predict_pipeline(audio, lyrics))