Spaces:
Sleeping
Sleeping
| "use client"; | |
| import { useState } from "react"; | |
| import { | |
| Loader2, | |
| Zap, | |
| CheckCircle2, | |
| XCircle, | |
| Play, | |
| FileAudio, | |
| Music2, | |
| FileMusic, | |
| } from "lucide-react"; | |
| import { | |
| audioProcessingAPI, | |
| AgenticWorkflowResult, | |
| } from "@/lib/api/audio-processing-api"; | |
| import { useAudioStore } from "@/store/audio-store"; | |
| interface AgenticWorkflowPanelProps { | |
| audioFile: File | null; | |
| onWorkflowComplete?: (result: AgenticWorkflowResult) => void; | |
| } | |
| type WorkflowType = "full-production" | "stem-only" | "midi-only" | "analyze"; | |
| export function AgenticWorkflowPanel({ | |
| audioFile, | |
| onWorkflowComplete, | |
| }: AgenticWorkflowPanelProps) { | |
| const [isProcessing, setIsProcessing] = useState(false); | |
| const [result, setResult] = useState<AgenticWorkflowResult | null>(null); | |
| const [error, setError] = useState<string | null>(null); | |
| const [selectedWorkflow, setSelectedWorkflow] = | |
| useState<WorkflowType>("full-production"); | |
| const [progress, setProgress] = useState<string>(""); | |
| const { audioEngine, addTrack, addAudioClip } = useAudioStore(); | |
| const workflows = [ | |
| { | |
| id: "full-production" as WorkflowType, | |
| name: "Full Production", | |
| description: "Analyze, separate stems, and extract MIDI", | |
| icon: Zap, | |
| }, | |
| { | |
| id: "stem-only" as WorkflowType, | |
| name: "Stem Separation Only", | |
| description: "Separate audio into individual stems", | |
| icon: Music2, | |
| }, | |
| { | |
| id: "midi-only" as WorkflowType, | |
| name: "MIDI Extraction Only", | |
| description: "Extract MIDI notes from audio", | |
| icon: FileMusic, | |
| }, | |
| { | |
| id: "analyze" as WorkflowType, | |
| name: "Audio Analysis", | |
| description: "Analyze tempo, key, and other properties", | |
| icon: FileAudio, | |
| }, | |
| ]; | |
| const handleRunWorkflow = async () => { | |
| if (!audioFile) { | |
| setError("Please upload an audio file first"); | |
| return; | |
| } | |
| setIsProcessing(true); | |
| setError(null); | |
| setResult(null); | |
| setProgress("Initializing workflow..."); | |
| try { | |
| const workflowOptions = { | |
| stem_model: "demucs", | |
| stems: ["vocals", "drums", "bass", "other"], | |
| format: "wav", | |
| midi_method: "basic-pitch", | |
| threshold: 0.5, | |
| min_note_duration: 0.1, | |
| }; | |
| setProgress("Processing audio file..."); | |
| const workflowResult = await audioProcessingAPI.runAgenticWorkflow( | |
| audioFile, | |
| selectedWorkflow, | |
| workflowOptions | |
| ); | |
| setResult(workflowResult); | |
| // Automatically load results into DAW | |
| await loadResultsIntoDAW(workflowResult); | |
| setProgress("Complete!"); | |
| onWorkflowComplete?.(workflowResult); | |
| } catch (err) { | |
| const errorMessage = | |
| err instanceof Error ? err.message : "Workflow execution failed"; | |
| setError(errorMessage); | |
| setProgress(""); | |
| console.error("Workflow error:", err); | |
| } finally { | |
| setIsProcessing(false); | |
| } | |
| }; | |
| const loadResultsIntoDAW = async (result: AgenticWorkflowResult) => { | |
| if (!audioEngine) return; | |
| // Load stems if available | |
| if (result.stems && result.stems.stems) { | |
| setProgress("Loading stems into DAW..."); | |
| for (const [stemName, stemInfo] of Object.entries(result.stems.stems)) { | |
| try { | |
| const blob = await audioProcessingAPI.downloadFile(stemInfo.path); | |
| const arrayBuffer = await blob.arrayBuffer(); | |
| const audioContext = audioEngine.getContext(); | |
| const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); | |
| const trackId = addTrack({ | |
| name: `${stemName.charAt(0).toUpperCase() + stemName.slice(1)} Stem`, | |
| type: "audio", | |
| volume: 1, | |
| pan: 0, | |
| muted: false, | |
| solo: false, | |
| color: getStemColor(stemName), | |
| }); | |
| addAudioClip({ | |
| trackId, | |
| startTime: 0, | |
| duration: audioBuffer.duration, | |
| audioBuffer, | |
| name: stemInfo.filename, | |
| }); | |
| const trackNode = audioEngine.getTrack(trackId); | |
| if (trackNode) { | |
| await trackNode.loadAudioBuffer(audioBuffer); | |
| } | |
| } catch (err) { | |
| console.error(`Failed to load stem ${stemName}:`, err); | |
| } | |
| } | |
| } | |
| // Load MIDI if available | |
| if (result.midi && result.midi.midi_file) { | |
| setProgress("Loading MIDI into DAW..."); | |
| // TODO: Implement MIDI loading | |
| console.log("MIDI available:", result.midi.midi_file); | |
| } | |
| }; | |
| const getStemColor = (stemName: string): string => { | |
| const colors: Record<string, string> = { | |
| vocals: "#ef4444", | |
| drums: "#f59e0b", | |
| bass: "#10b981", | |
| other: "#6366f1", | |
| }; | |
| return colors[stemName] || "#6b7280"; | |
| }; | |
| return ( | |
| <div className="bg-gray-900 rounded-lg p-4 space-y-4"> | |
| <div className="flex items-center gap-2"> | |
| <Zap className="w-5 h-5 text-yellow-500" /> | |
| <h3 className="text-lg font-semibold text-white">Agentic Workflow</h3> | |
| </div> | |
| {error && ( | |
| <div className="bg-red-500/10 border border-red-500/50 rounded p-3 flex items-center gap-2"> | |
| <XCircle className="w-4 h-4 text-red-500" /> | |
| <p className="text-sm text-red-400">{error}</p> | |
| </div> | |
| )} | |
| {result && ( | |
| <div className="bg-green-500/10 border border-green-500/50 rounded p-3 space-y-2"> | |
| <div className="flex items-center gap-2"> | |
| <CheckCircle2 className="w-4 h-4 text-green-500" /> | |
| <p className="text-sm font-medium text-green-400">Workflow Complete!</p> | |
| </div> | |
| <div className="text-xs text-gray-400 space-y-1"> | |
| <p>Steps completed: {result.steps.join(", ")}</p> | |
| {result.analysis && ( | |
| <p> | |
| Tempo: {result.analysis.tempo?.bpm || "N/A"} BPM | Key:{" "} | |
| {result.analysis.key?.key || "N/A"} | |
| </p> | |
| )} | |
| {result.stems && ( | |
| <p>Stems: {Object.keys(result.stems.stems).length} separated</p> | |
| )} | |
| {result.midi && <p>MIDI extracted successfully</p>} | |
| </div> | |
| </div> | |
| )} | |
| <div className="space-y-3"> | |
| <div> | |
| <label className="text-xs font-medium text-gray-400 mb-2 block"> | |
| Workflow Type | |
| </label> | |
| <div className="grid grid-cols-1 gap-2"> | |
| {workflows.map((workflow) => { | |
| const Icon = workflow.icon; | |
| return ( | |
| <label | |
| key={workflow.id} | |
| className={` | |
| flex items-start gap-3 p-3 rounded-lg border cursor-pointer transition-all | |
| ${ | |
| selectedWorkflow === workflow.id | |
| ? "border-blue-500 bg-blue-500/10" | |
| : "border-gray-700 bg-gray-800 hover:border-gray-600" | |
| } | |
| `} | |
| > | |
| <input | |
| type="radio" | |
| name="workflow" | |
| value={workflow.id} | |
| checked={selectedWorkflow === workflow.id} | |
| onChange={(e) => | |
| setSelectedWorkflow(e.target.value as WorkflowType) | |
| } | |
| disabled={isProcessing} | |
| className="mt-0.5" | |
| /> | |
| <div className="flex-1"> | |
| <div className="flex items-center gap-2"> | |
| <Icon className="w-4 h-4 text-gray-400" /> | |
| <span className="text-sm font-medium text-white"> | |
| {workflow.name} | |
| </span> | |
| </div> | |
| <p className="text-xs text-gray-500 mt-1"> | |
| {workflow.description} | |
| </p> | |
| </div> | |
| </label> | |
| ); | |
| })} | |
| </div> | |
| </div> | |
| {isProcessing && progress && ( | |
| <div className="bg-gray-800 rounded p-2"> | |
| <p className="text-xs text-gray-400">{progress}</p> | |
| </div> | |
| )} | |
| <button | |
| onClick={handleRunWorkflow} | |
| disabled={isProcessing || !audioFile} | |
| className="w-full bg-yellow-600 hover:bg-yellow-700 disabled:bg-gray-700 disabled:cursor-not-allowed text-white px-4 py-2 rounded transition-colors flex items-center justify-center gap-2" | |
| > | |
| {isProcessing ? ( | |
| <> | |
| <Loader2 className="w-4 h-4 animate-spin" /> | |
| Processing... | |
| </> | |
| ) : ( | |
| <> | |
| <Play className="w-4 h-4" /> | |
| Run Workflow | |
| </> | |
| )} | |
| </button> | |
| </div> | |
| </div> | |
| ); | |
| } | |