daw-audio-workstation / components /AudioUpload.tsx
OnyxMunk's picture
๐Ÿš€ Deploy AudioForge: AI-Powered Digital Audio Workstation
46c3324
"use client";
import { useState, useCallback, useRef } from "react";
import { Upload, X, FileAudio, Loader2 } from "lucide-react";
import { useAudioStore } from "@/store/audio-store";
import { audioProcessingAPI } from "@/lib/api/audio-processing-api";
interface AudioUploadProps {
onFileUploaded?: (file: File, audioBuffer: AudioBuffer) => void;
onError?: (error: Error) => void;
accept?: string;
maxSize?: number; // in MB
}
export function AudioUpload({
onFileUploaded,
onError,
accept = "audio/*",
maxSize = 100,
}: AudioUploadProps) {
const [isDragging, setIsDragging] = useState(false);
const [isProcessing, setIsProcessing] = useState(false);
const [uploadedFile, setUploadedFile] = useState<File | null>(null);
const fileInputRef = useRef<HTMLInputElement>(null);
const { audioEngine, addTrack, addAudioClip, updateTrack } = useAudioStore();
const processFile = useCallback(
async (file: File) => {
if (!audioEngine) {
onError?.(new Error("Audio engine not initialized"));
return;
}
setIsProcessing(true);
setUploadedFile(file);
try {
// Validate file size
const fileSizeMB = file.size / (1024 * 1024);
if (fileSizeMB > maxSize) {
throw new Error(`File size exceeds ${maxSize}MB limit`);
}
// Decode audio file
const arrayBuffer = await file.arrayBuffer();
const audioContext = audioEngine.getContext();
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
// Create a new track
const trackId = addTrack({
name: file.name.replace(/\.[^/.]+$/, ""),
type: "audio",
volume: 1,
pan: 0,
muted: false,
solo: false,
});
// Add clip to timeline
addAudioClip({
trackId,
startTime: 0,
duration: audioBuffer.duration,
audioBuffer,
name: file.name,
});
// Load into track node
const trackNode = audioEngine.getTrack(trackId);
if (trackNode) {
await trackNode.loadAudioBuffer(audioBuffer);
}
onFileUploaded?.(file, audioBuffer);
// Background: Analyze audio metadata via backend
audioProcessingAPI.analyzeAudio(file, "full")
.then((analysis) => {
console.log("Audio analysis complete:", analysis);
const metadata = {
duration: analysis.metadata?.duration || audioBuffer.duration,
sampleRate: analysis.metadata?.sample_rate || audioBuffer.sampleRate,
channels: analysis.metadata?.channels || audioBuffer.numberOfChannels,
format: analysis.metadata?.format,
bpm: analysis.tempo?.bpm,
key: analysis.key?.key,
};
updateTrack(trackId, {
metadata: metadata as any // Type assertion needed until store types catch up fully
});
})
.catch((err) => {
console.warn("Audio analysis failed, using local defaults:", err);
// Even if backend fails, we have basic info from AudioBuffer
updateTrack(trackId, {
metadata: {
duration: audioBuffer.duration,
sampleRate: audioBuffer.sampleRate,
channels: audioBuffer.numberOfChannels
} as any
});
});
} catch (error) {
const err = error instanceof Error ? error : new Error(String(error));
console.error("File processing error:", err);
onError?.(err);
setUploadedFile(null);
} finally {
setIsProcessing(false);
}
},
[audioEngine, addTrack, addAudioClip, updateTrack, onFileUploaded, onError, maxSize]
);
const handleDragOver = useCallback((e: React.DragEvent) => {
e.preventDefault();
e.stopPropagation();
setIsDragging(true);
}, []);
const handleDragLeave = useCallback((e: React.DragEvent) => {
e.preventDefault();
e.stopPropagation();
setIsDragging(false);
}, []);
const handleDrop = useCallback(
(e: React.DragEvent) => {
e.preventDefault();
e.stopPropagation();
setIsDragging(false);
const file = e.dataTransfer.files[0];
if (file && file.type.startsWith("audio/")) {
processFile(file);
}
},
[processFile]
);
const handleFileSelect = useCallback(
(e: React.ChangeEvent<HTMLInputElement>) => {
const file = e.target.files?.[0];
if (file) {
processFile(file);
}
},
[processFile]
);
const handleClick = useCallback(() => {
fileInputRef.current?.click();
}, []);
const handleClear = useCallback(() => {
setUploadedFile(null);
if (fileInputRef.current) {
fileInputRef.current.value = "";
}
}, []);
return (
<div className="w-full">
<input
ref={fileInputRef}
type="file"
accept={accept}
onChange={handleFileSelect}
className="hidden"
disabled={isProcessing}
/>
<div
onDragOver={handleDragOver}
onDragLeave={handleDragLeave}
onDrop={handleDrop}
onClick={handleClick}
className={`
relative border-2 border-dashed rounded-lg p-8
transition-all duration-200 cursor-pointer
${
isDragging
? "border-blue-500 bg-blue-500/10"
: "border-gray-700 hover:border-gray-600 bg-gray-900/50"
}
${isProcessing ? "opacity-50 cursor-wait" : ""}
`}
>
{isProcessing ? (
<div className="flex flex-col items-center justify-center gap-3">
<Loader2 className="w-8 h-8 animate-spin text-blue-500" />
<p className="text-sm text-gray-400">Processing audio file...</p>
</div>
) : uploadedFile ? (
<div className="flex items-center justify-between gap-4">
<div className="flex items-center gap-3 flex-1 min-w-0">
<FileAudio className="w-6 h-6 text-blue-500 flex-shrink-0" />
<div className="min-w-0 flex-1">
<p className="text-sm font-medium text-white truncate">
{uploadedFile.name}
</p>
<p className="text-xs text-gray-400">
{(uploadedFile.size / (1024 * 1024)).toFixed(2)} MB
</p>
</div>
</div>
<button
onClick={(e) => {
e.stopPropagation();
handleClear();
}}
className="p-1 hover:bg-gray-800 rounded transition-colors"
>
<X className="w-4 h-4 text-gray-400" />
</button>
</div>
) : (
<div className="flex flex-col items-center justify-center gap-3">
<Upload className="w-10 h-10 text-gray-500" />
<div className="text-center">
<p className="text-sm font-medium text-gray-300">
Drag & drop audio file here
</p>
<p className="text-xs text-gray-500 mt-1">
or click to browse (max {maxSize}MB)
</p>
</div>
</div>
)}
</div>
</div>
);
}