{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "markdown", "source": [ "## Donwload depedency" ], "metadata": { "id": "iLN3LdAwEiit" } }, { "cell_type": "code", "source": [ "!pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118\n", "!pip install --upgrade librosa pydub" ], "metadata": { "id": "1jyjtthXDZ14" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Download dataset and processing features" ], "metadata": { "id": "7s9Bv3o6Eoxt" } }, { "cell_type": "code", "source": [ "!mkdir /content/datasets\n", "!wget https://os.unil.cloud.switch.ch/fma/fma_small.zip\n", "!wget https://huggingface.co/datasets/johaness14/ProcessedFeaturesFMASmall/resolve/main/ProcessedFeaturesFMASmall.zip?download=true -O ProcessedFeaturesFMASmall.zip\n", "!unzip /content/fma_small.zip\n", "!unzip /content/ProcessedFeaturesFMASmall.zip\n", "!mv /content/fma_small /content/datasets\n", "!mv /content/ProcessedFeaturesFMASmall /content/datasets" ], "metadata": { "id": "I9amJ6FgDuAm" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Download pre-trained models" ], "metadata": { "id": "ieLp8mMbaBEw" } }, { "cell_type": "code", "source": [ "!mkdir /content/models\n", "!wget https://huggingface.co/johaness14/AI_MiniDJ/resolve/main/mixability_dataset.pkl?download=true -O mixability_dataset.pkl\n", "!wget https://huggingface.co/johaness14/AI_MiniDJ/resolve/main/scaler.pkl?download=true -O scaler.pkl\n", "!wget https://huggingface.co/johaness14/AI_MiniDJ/resolve/main/model_checkpoint.pth?download=true -O model_checkpoint.pth\n", "!mv /content/mixability_dataset.pkl /content/models\n", "!mv /content/scaler.pkl /content/models\n", "!mv /content/model_checkpoint.pth /content/models" ], "metadata": { "id": "-BsPkA3CaDvo" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Import library" ], "metadata": { "id": "h_byAuaXEyk9" } }, { "cell_type": "code", "source": [ "import torch\n", "import torch.nn as nn\n", "\n", "import os\n", "import glob\n", "import random\n", "import pickle\n", "import librosa\n", "import numpy as np\n", "from tqdm import tqdm\n", "from pydub import AudioSegment\n", "from IPython.display import Audio, display" ], "metadata": { "id": "5VrUtRMiDgUO" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Configure the path" ], "metadata": { "id": "NaedUn7IGGVf" } }, { "cell_type": "code", "source": [ "# --- Path ---\n", "BASE_PATH = \"./datasets\"\n", "FEATURES_PATH = os.path.join(BASE_PATH, \"ProcessedFeaturesFMASmall\")\n", "FMA_AUDIO_PATH = os.path.join(BASE_PATH, \"fma_small\")\n", "MIX_OUTPUT_PATH = os.path.join(BASE_PATH, \"Mix_Results\")\n", "os.makedirs(MIX_OUTPUT_PATH, exist_ok=True)\n", "\n", "MODELS_PATH = \"./models\"\n", "SCALER_FILE = os.path.join(MODELS_PATH, \"scaler.pkl\")\n", "CHECKPOINT_FILE = os.path.join(MODELS_PATH, \"model_checkpoint.pth\")\n", "\n", "# --- Setup Device ---\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "print(f\"Use device: {device}\")" ], "metadata": { "id": "eNtvwZYsEeLU" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Model" ], "metadata": { "id": "wNBTmjxsGKTf" } }, { "cell_type": "code", "source": [ "# Redefine model architecture\n", "class MixabilityScorer(nn.Module):\n", " def __init__(self, input_features=7):\n", " super(MixabilityScorer, self).__init__()\n", " self.network = nn.Sequential(\n", " nn.Linear(input_features, 64), nn.ReLU(), nn.Dropout(0.5),\n", " nn.Linear(64, 32), nn.ReLU(),\n", " nn.Linear(32, 16), nn.ReLU(),\n", " nn.Linear(16, 1), nn.Sigmoid()\n", " )\n", "\n", " def forward(self, x):\n", " return self.network(x)\n", "\n", "# Initialize model and load checkpoint\n", "model = MixabilityScorer().to(device)\n", "checkpoint = torch.load(CHECKPOINT_FILE, map_location=device)\n", "model.load_state_dict(checkpoint['model_state_dict'])\n", "model.eval()\n", "print(\"Model loaded from checkpoint successfully.\")\n", "\n", "# Load scaler\n", "with open(SCALER_FILE, 'rb') as f:\n", " scaler = pickle.load(f)\n", "print(\"Scaler loaded successfully.\")\n", "\n", "# Load all features into memory for hunting\n", "print(\"Loading all feature data into memory for hunting...\")\n", "all_feature_files = glob.glob(os.path.join(FEATURES_PATH, '*.npy'))\n", "all_features_data = [np.load(f, allow_pickle=True).item()\n", " for f in tqdm(all_feature_files, desc=\"Loading Features\")]\n", "print(f\"{len(all_features_data)} feature data ready.\")" ], "metadata": { "id": "IslYzyNtE72X" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# Helper function to check key compatibility\n", "def are_keys_compatible(key1, key2):\n", " \"\"\"Check if two musical keys are compatible for mixing\"\"\"\n", " key_map = {'C': 1, 'C#': 2, 'D': 3, 'D#': 4, 'E': 5, 'F': 6,\n", " 'F#': 7, 'G': 8, 'G#': 9, 'A': 10, 'A#': 11, 'B': 12}\n", "\n", " def parse_key(key_str):\n", " parts = key_str.split()\n", " return key_map.get(parts[0], 0), parts[1]\n", "\n", " note1, mode1 = parse_key(key1)\n", " note2, mode2 = parse_key(key2)\n", "\n", " # Return 0 if invalid keys\n", " if note1 == 0 or note2 == 0:\n", " return 0.0\n", "\n", " # Same key and mode\n", " if note1 == note2 and mode1 == mode2:\n", " return 1.0\n", "\n", " # Adjacent keys (semitone apart)\n", " if abs(note1 - note2) == 1 or abs(note1 - note2) == 11:\n", " return 1.0\n", "\n", " # Relative major/minor keys\n", " if mode1 != mode2 and (note1 - note2) % 12 == 3 or (note2 - note1) % 12 == 3:\n", " return 1.0\n", "\n", " return 0.0\n", "\n", "\n", "def find_audio_path(track_id, root_path):\n", " \"\"\"Find audio file path based on track ID\"\"\"\n", " subfolder = track_id[:3]\n", " return os.path.join(root_path, subfolder, f\"{track_id}.mp3\")\n", "\n", "\n", "def run_mixing_engine(track_id1, track_id2):\n", " \"\"\"Execute mixing engine to create crossfaded mix of two tracks\"\"\"\n", " print(\" High score! Running Mixing Engine...\")\n", "\n", " try:\n", " # Load feature data for both tracks\n", " f1 = next(item for item in all_features_data if item[\"track_id\"] == track_id1)\n", " f2 = next(item for item in all_features_data if item[\"track_id\"] == track_id2)\n", "\n", " # Load audio files\n", " y1, sr1 = librosa.load(find_audio_path(track_id1, FMA_AUDIO_PATH), sr=None)\n", " y2, sr2 = librosa.load(find_audio_path(track_id2, FMA_AUDIO_PATH), sr=sr1)\n", "\n", " # Extract BPM and beat timing data\n", " bpm1, beats1 = float(f1['bpm']), f1['beat_times']\n", " bpm2, beats2 = float(f2['bpm']), f2['beat_times']\n", "\n", " # Time-stretch second track to match first track's BPM\n", " stretch_rate = bpm1 / bpm2\n", " y2_stretched = librosa.effects.time_stretch(y2, rate=stretch_rate)\n", " beats2_stretched = beats2 / stretch_rate\n", "\n", " # Calculate transition parameters\n", " song1_duration = librosa.get_duration(y=y1, sr=sr1)\n", " beat_duration_s = 60.0 / bpm1\n", " transition_duration_s = beat_duration_s * 8 # 8 beats transition\n", " transition_start_point_s1 = song1_duration - transition_duration_s\n", "\n", " # Check if song is long enough for transition\n", " if transition_start_point_s1 < beats1.min():\n", " return False\n", "\n", " # Find beat-aligned cut points\n", " last_beat_s1 = beats1[beats1 < transition_start_point_s1].max()\n", " if not beats2_stretched.any():\n", " return False\n", " first_beat_s2 = beats2_stretched[0]\n", "\n", " # Trim audio at beat boundaries\n", " trim_sample_s1 = librosa.time_to_samples(last_beat_s1, sr=sr1)\n", " trim_sample_s2 = librosa.time_to_samples(first_beat_s2, sr=sr1)\n", " y1_trimmed = y1[:trim_sample_s1]\n", " y2_synced = y2_stretched[trim_sample_s2:]\n", "\n", " # Convert to AudioSegment for crossfading\n", " song1_segment = AudioSegment(\n", " np.int16(y1_trimmed * 32767).tobytes(),\n", " frame_rate=sr1, sample_width=2, channels=y1_trimmed.ndim\n", " )\n", "\n", " song2_segment = AudioSegment(\n", " np.int16(y2_synced * 32767).tobytes(),\n", " frame_rate=sr1, sample_width=2, channels=y2_synced.ndim\n", " )\n", "\n", " # Create crossfaded mix\n", " crossfade_duration_ms = int(transition_duration_s * 1000)\n", " final_mix = song1_segment.append(song2_segment, crossfade=crossfade_duration_ms)\n", "\n", " # Export final mix\n", " output_filename = f\"AI_REC_{track_id1}_to_{track_id2}.mp3\"\n", " output_filepath = os.path.join(MIX_OUTPUT_PATH, output_filename)\n", " final_mix.export(output_filepath, format=\"mp3\")\n", "\n", " print(f\" AI MIX RECOMMENDATION CREATED: {output_filepath}\")\n", " display(Audio(output_filepath))\n", " return True\n", "\n", " except Exception as e:\n", " print(f\" Failed to run mixing engine: {e}\")\n", " return False" ], "metadata": { "id": "iM5g5QSSFVBy" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Hunting mode with AI" ], "metadata": { "id": "yK_gekX8GiDB" } }, { "cell_type": "code", "source": [ "# AI Hunting Mode - Search for highly compatible track pairs\n", "print(\"\\n>>> Part 3: Starting Hunting Mode...\")\n", "print(\"AI will randomly search until finding 2 highly compatible pairs.\")\n", "\n", "found_pairs_count = 0\n", "attempts = 0\n", "max_attempts = 10000\n", "\n", "while found_pairs_count < 2 and attempts < max_attempts:\n", " attempts += 1\n", "\n", " # Randomly select two tracks from loaded feature data\n", " f1, f2 = random.sample(all_features_data, 2)\n", "\n", " try:\n", " # Extract BPM and calculate key compatibility\n", " bpm1, bpm2 = float(f1['bpm']), float(f2['bpm'])\n", " key_compat_score = are_keys_compatible(f1['key'], f2['key'])\n", "\n", " # Create feature vector for prediction\n", " feature_vector = np.array([[\n", " bpm1, bpm2, key_compat_score,\n", " float(f1['rms']), float(f2['rms']),\n", " float(f1['spectral_centroid']), float(f2['spectral_centroid'])\n", " ]])\n", "\n", " # Scale features and predict compatibility\n", " feature_vector_scaled = scaler.transform(feature_vector)\n", " input_tensor = torch.tensor(feature_vector_scaled, dtype=torch.float32).to(device)\n", "\n", " with torch.no_grad():\n", " score = model(input_tensor).item()\n", "\n", " # Check for high compatibility score\n", " if score > 0.95:\n", " print(\"\\n\" + \"=\"*50)\n", " print(f\"COMPATIBLE PAIR FOUND! (Attempt #{attempts})\")\n", " print(f\" Track IDs: {f1['track_id']} vs {f2['track_id']}\")\n", " print(f\" AI Prediction Score: {score:.4f}\")\n", "\n", " # Display feature analysis\n", " print(\" --- Raw Feature Analysis ---\")\n", " bpm_diff_percent = abs(bpm1 - bpm2) / max(bpm1, bpm2) * 100\n", " print(f\" BPM: {bpm1:.2f} vs {bpm2:.2f} (Diff: {bpm_diff_percent:.2f}%)\")\n", " print(f\" Key: {f1['key']} vs {f2['key']} (Compatible: {key_compat_score == 1.0})\")\n", " print(\" \" + \"-\"*28)\n", "\n", " # Run mixing engine and increment counter on success\n", " if run_mixing_engine(f1['track_id'], f2['track_id']):\n", " found_pairs_count += 1\n", "\n", " except Exception as e:\n", " # Continue on data errors\n", " continue\n", "\n", "print(\"\\n>>> HUNTING COMPLETE! <<<\")" ], "metadata": { "id": "a7G_BkGfGCDO" }, "execution_count": null, "outputs": [] } ] }