HomePilot / frontend /src /ui /CreatorStudioEditor.tsx
HomePilot Deploy Bot
chore(hf): sync HomePilot to HF Space
23b413b
import React, { useEffect, useState, useCallback } from "react";
import {
ArrowLeft,
Play,
Pause,
SkipBack,
SkipForward,
Plus,
RefreshCw,
Check,
Loader2,
ImageIcon,
Monitor,
Edit3,
Wand2,
Save,
X,
Settings,
FileText,
Sparkles,
AlertCircle,
Download,
Tv,
Smartphone,
Presentation,
Camera,
Palette,
Star,
Lock,
Shield,
Film,
MoreHorizontal,
Sliders,
ChevronDown,
} from "lucide-react";
import { useTVModeStore } from "./studio/stores/tvModeStore";
import type { TVScene } from "./studio/stores/tvModeStore";
import { TVModeContainer } from "./studio/components/TVMode/TVModeContainer";
import CreatorStudioSettings, {
CREATOR_STUDIO_PARAM_DEFAULTS,
type CreatorStudioGenerationParams,
} from "./CreatorStudioSettings";
// Types
type SceneStatus = "pending" | "generating" | "ready" | "error";
type PlatformPreset = "youtube_16_9" | "shorts_9_16" | "slides_16_9";
type ContentRating = "sfw" | "mature";
type Scene = {
id: string;
videoId: string;
idx: number;
narration: string;
imagePrompt: string;
negativePrompt: string;
imageUrl: string | null;
videoUrl: string | null;
audioUrl: string | null;
status: SceneStatus;
durationSec: number;
createdAt: number;
updatedAt: number;
};
type Project = {
id: string;
title: string;
logline: string;
status: "draft" | "in_review" | "approved" | "archived";
platformPreset: PlatformPreset;
contentRating: ContentRating;
tags?: string[];
createdAt: number;
updatedAt: number;
metadata?: {
story_outline?: StoryOutline;
generationMode?: "video" | "slideshow";
};
};
type SceneOutline = {
scene_number: number;
title: string;
description: string;
narration: string;
image_prompt: string;
negative_prompt: string;
duration_sec: number;
};
type StoryOutline = {
title: string;
logline: string;
visual_style: string;
tone: string;
story_arc: {
beginning: string;
rising_action: string;
climax: string;
falling_action: string;
resolution: string;
};
scenes: SceneOutline[];
};
type AvailableModel = {
id: string;
name: string;
provider?: string;
};
interface CreatorStudioEditorProps {
projectId: string;
backendUrl: string;
apiKey?: string;
onExit: () => void;
autoGenerateFirst?: boolean;
targetSceneCount?: number;
defaultLLMModel?: string;
imageProvider?: string;
imageModel?: string;
imageWidth?: number;
imageHeight?: number;
imageSteps?: number;
imageCfg?: number;
/** Video model for AI video generation */
videoModel?: string;
/** Enable video generation after image generation */
enableVideoGeneration?: boolean;
}
/**
* CreatorStudioEditor - Professional editor for Creator Studio projects
* Styled like Play Story mode but enhanced for creators
*/
export function CreatorStudioEditor({
projectId,
backendUrl,
apiKey,
onExit,
autoGenerateFirst = false,
targetSceneCount = 8,
defaultLLMModel = "",
imageProvider = "comfyui",
imageModel,
imageWidth = 1344,
imageHeight = 768,
imageSteps,
imageCfg,
videoModel,
enableVideoGeneration = false,
}: CreatorStudioEditorProps) {
const authKey = (apiKey || "").trim();
const [hasAutoTriggered, setHasAutoTriggered] = useState(false);
const [hasAutoGeneratedOutline, setHasAutoGeneratedOutline] = useState(false);
// TV Mode store
const tvModeActive = useTVModeStore((s) => s.isActive);
const enterTVMode = useTVModeStore((s) => s.enterTVMode);
const updateSceneImageByIdx = useTVModeStore((s) => s.updateSceneImageByIdx);
// State
const [project, setProject] = useState<Project | null>(null);
const [scenes, setScenes] = useState<Scene[]>([]);
const [loading, setLoading] = useState(true);
// Story outline state
const [storyOutline, setStoryOutline] = useState<StoryOutline | null>(null);
const [isGeneratingOutline, setIsGeneratingOutline] = useState(false);
const [showOutlinePanel, setShowOutlinePanel] = useState(false);
// Scene editor state
const [showSceneEditor, setShowSceneEditor] = useState(false);
const [editingScene, setEditingScene] = useState<Scene | null>(null);
const [editNarration, setEditNarration] = useState("");
const [editImagePrompt, setEditImagePrompt] = useState("");
const [editNegativePrompt, setEditNegativePrompt] = useState("");
const [isSavingScene, setIsSavingScene] = useState(false);
// Model selection state
const [availableLLMModels, setAvailableLLMModels] = useState<AvailableModel[]>([]);
const [availableImageModels, setAvailableImageModels] = useState<AvailableModel[]>([]);
const [selectedLLMModel, setSelectedLLMModel] = useState<string>(defaultLLMModel);
const [selectedImageModel, setSelectedImageModel] = useState<string>(imageModel || "");
const [selectedVideoModel, setSelectedVideoModel] = useState<string>(videoModel || "");
const [settingsLLMModel, setSettingsLLMModel] = useState<string>(defaultLLMModel);
const [error, setError] = useState<string | null>(null);
const [currentSceneIndex, setCurrentSceneIndex] = useState(0);
const [isPlaying, setIsPlaying] = useState(false);
const [isTTSSpeaking, setIsTTSSpeaking] = useState(false);
const [isSaving, setIsSaving] = useState(false);
const [lastSaved, setLastSaved] = useState<Date | null>(null);
const [isGeneratingScene, setIsGeneratingScene] = useState(false);
const [isGeneratingImage, setIsGeneratingImage] = useState(false);
const [isGeneratingVideo, setIsGeneratingVideo] = useState(false);
const [hoveredSceneIdx, setHoveredSceneIdx] = useState<number | null>(null);
const [canPlayWebm, setCanPlayWebm] = useState<boolean>(true);
// Batch generation state (generates all scenes from outline)
const [isBatchGenerating, setIsBatchGenerating] = useState(false);
const [batchProgress, setBatchProgress] = useState<{ current: number; total: number; phase: 'scene' | 'image' | 'video' }>({ current: 0, total: 0, phase: 'scene' });
// Project Settings Modal state
const [showSettingsModal, setShowSettingsModal] = useState(false);
const [settingsTitle, setSettingsTitle] = useState("");
const [settingsLogline, setSettingsLogline] = useState("");
const [settingsPlatform, setSettingsPlatform] = useState<PlatformPreset>("youtube_16_9");
const [settingsGoal, setSettingsGoal] = useState<"Entertain" | "Educate" | "Inspire">("Educate");
const [settingsVisualStyle, setSettingsVisualStyle] = useState<"Cinematic" | "Digital Art" | "Anime">("Cinematic");
const [settingsTones, setSettingsTones] = useState<string[]>(["Documentary", "Calm"]);
const [settingsSceneCount, setSettingsSceneCount] = useState(8);
const [settingsSceneDuration, setSettingsSceneDuration] = useState(5);
const [settingsLockIdentity, setSettingsLockIdentity] = useState(true);
const [settingsContentRating, setSettingsContentRating] = useState<ContentRating>("sfw");
const [settingsEnableVideo, setSettingsEnableVideo] = useState(false); // Enable video generation capability
const [isSavingSettings, setIsSavingSettings] = useState(false);
// Model override state for Project Settings
const [settingsImageModel, setSettingsImageModel] = useState<string>("");
const [settingsVideoModel, setSettingsVideoModel] = useState<string>("");
const [availableVideoModels, setAvailableVideoModels] = useState<AvailableModel[]>([]);
const [loadingModels, setLoadingModels] = useState(false);
// Generation parameters state (advanced customization)
const [genParams, setGenParams] = useState<CreatorStudioGenerationParams>(CREATOR_STUDIO_PARAM_DEFAULTS);
// Project-level capability: can this project generate videos?
// IMPORTANT: do NOT rely only on enableVideoGeneration (wizard-only, transient)
// This derives video capability from project metadata/tags for existing projects
const projectWantsVideo = Boolean(
project?.metadata?.generationMode === "video" ||
project?.tags?.includes("mode:video") ||
project?.tags?.includes("projectType:video") ||
project?.tags?.includes("projectType:video_series") ||
enableVideoGeneration === true // backward compatibility with wizard prop
);
// API helpers
const fetchApi = useCallback(
async <T,>(path: string): Promise<T> => {
const url = `${backendUrl.replace(/\/+$/, "")}${path}`;
const res = await fetch(url, {
method: "GET",
headers: {
"Content-Type": "application/json",
...(authKey ? { "x-api-key": authKey } : {}),
},
});
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`HTTP ${res.status}${text ? `: ${text}` : ""}`);
}
return (await res.json()) as T;
},
[backendUrl, authKey]
);
const postApi = useCallback(
async <T,>(path: string, body: any): Promise<T> => {
const url = `${backendUrl.replace(/\/+$/, "")}${path}`;
const res = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
...(authKey ? { "x-api-key": authKey } : {}),
},
body: JSON.stringify(body),
});
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`HTTP ${res.status}${text ? `: ${text}` : ""}`);
}
return (await res.json()) as T;
},
[backendUrl, authKey]
);
const patchApi = useCallback(
async <T,>(path: string, body: any): Promise<T> => {
const url = `${backendUrl.replace(/\/+$/, "")}${path}`;
const res = await fetch(url, {
method: "PATCH",
headers: {
"Content-Type": "application/json",
...(authKey ? { "x-api-key": authKey } : {}),
},
body: JSON.stringify(body),
});
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`HTTP ${res.status}${text ? `: ${text}` : ""}`);
}
return (await res.json()) as T;
},
[backendUrl, authKey]
);
const deleteApi = useCallback(
async <T,>(path: string): Promise<T> => {
const url = `${backendUrl.replace(/\/+$/, "")}${path}`;
const res = await fetch(url, {
method: "DELETE",
headers: {
"Content-Type": "application/json",
...(authKey ? { "x-api-key": authKey } : {}),
},
});
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`HTTP ${res.status}${text ? `: ${text}` : ""}`);
}
return (await res.json()) as T;
},
[backendUrl, authKey]
);
// ---------- API <-> UI normalization helpers ----------
// Backend may return snake_case (image_url/video_url). UI uses camelCase (imageUrl/videoUrl).
const normalizeScene = useCallback((raw: any): Scene => {
return {
id: raw.id,
videoId: raw.videoId ?? raw.video_id ?? "",
idx: raw.idx ?? raw.index ?? raw.scene_index ?? 0,
narration: raw.narration ?? "",
imagePrompt: raw.imagePrompt ?? raw.image_prompt ?? "",
negativePrompt: raw.negativePrompt ?? raw.negative_prompt ?? "",
imageUrl: raw.imageUrl ?? raw.image_url ?? null,
videoUrl: raw.videoUrl ?? raw.video_url ?? null,
audioUrl: raw.audioUrl ?? raw.audio_url ?? null,
status: raw.status ?? "pending",
durationSec: raw.durationSec ?? raw.duration_sec ?? 5,
createdAt: raw.createdAt ?? raw.created_at ?? 0,
updatedAt: raw.updatedAt ?? raw.updated_at ?? 0,
} as Scene;
}, []);
const normalizeScenes = useCallback((arr: any[]): Scene[] => {
if (!Array.isArray(arr)) return [];
return arr.map(normalizeScene).sort((a, b) => a.idx - b.idx);
}, [normalizeScene]);
// Convert UI patch (camelCase) to backend-friendly payload (include snake_case too)
const toScenePatch = useCallback((patch: any) => {
const out: any = { ...patch };
if ("imageUrl" in out) out.image_url = out.imageUrl;
if ("videoUrl" in out) out.video_url = out.videoUrl;
if ("audioUrl" in out) out.audio_url = out.audioUrl;
if ("imagePrompt" in out) out.image_prompt = out.imagePrompt;
if ("negativePrompt" in out) out.negative_prompt = out.negativePrompt;
if ("durationSec" in out) out.duration_sec = out.durationSec;
return out;
}, []);
// Authoritative refresh from backend (source of truth)
const refreshScenes = useCallback(async () => {
try {
const scenesRes = await fetchApi<{ scenes: any[] }>(`/studio/videos/${projectId}/scenes`);
setScenes(normalizeScenes((scenesRes as any).scenes));
} catch (e) {
// Non-critical - log but don't alert
console.warn('[CreatorStudioEditor] Failed to refresh scenes:', e);
}
}, [fetchApi, projectId, normalizeScenes]);
// Proxy video URL through backend for correct Content-Type headers
// This ensures WebM videos play correctly in browsers
const proxyVideoUrl = useCallback((rawUrl: string | null | undefined): string | null => {
if (!rawUrl) return null;
// Only proxy ComfyUI localhost URLs
if (rawUrl.startsWith('http://localhost:8188/') || rawUrl.startsWith('http://127.0.0.1:8188/')) {
return `${backendUrl.replace(/\/+$/, '')}/studio/media?url=${encodeURIComponent(rawUrl)}`;
}
// Already proxied or external URL - return as-is
return rawUrl;
}, [backendUrl]);
// Detect when "videoUrl" is actually an animated image (e.g. .webp, .gif)
// Some Comfy workflows output animated WebP instead of WebM when ffmpeg is unavailable
const isAnimatedImageUrl = useCallback((u: string | null | undefined): boolean => {
if (!u) return false;
const s = u.toLowerCase();
// Covers direct URLs and ComfyUI view?filename=... patterns
return s.includes(".webp") || s.includes(".gif");
}, []);
// Detect if URL is a WebM video
const isWebmUrl = useCallback((u: string | null | undefined): boolean => {
if (!u) return false;
return u.toLowerCase().includes(".webm");
}, []);
// Sync outline with current scenes (keeps outline in sync with actual scene data)
const syncOutlineWithScenes = useCallback(async () => {
if (!projectId) return;
try {
console.log('[CreatorStudioEditor] Syncing outline with scenes...');
const data = await postApi<{ ok: boolean; outline: any; scene_count: number }>(
`/studio/videos/${projectId}/sync-outline`,
{}
);
if (data.ok && data.outline) {
setStoryOutline(data.outline);
console.log(`[CreatorStudioEditor] Outline synced: ${data.scene_count} scenes`);
}
} catch (e: any) {
console.warn('[CreatorStudioEditor] Failed to sync outline:', e.message);
// Non-critical - don't alert user
}
}, [projectId, postApi]);
// Generate AI-powered story outline
const generateStoryOutline = useCallback(async () => {
if (!project || isGeneratingOutline) return;
setIsGeneratingOutline(true);
try {
console.log('[CreatorStudioEditor] Generating story outline...');
const data = await postApi<{ ok: boolean; outline: StoryOutline; model_used: string }>(
`/studio/videos/${projectId}/generate-outline`,
{
target_scenes: targetSceneCount,
scene_duration: 5,
ollama_model: selectedLLMModel || undefined,
}
);
if (data.ok && data.outline) {
setStoryOutline(data.outline);
console.log('[CreatorStudioEditor] Story outline generated:', data.outline.title);
}
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to generate outline:', e);
alert(`Failed to generate outline: ${e.message}`);
} finally {
setIsGeneratingOutline(false);
}
}, [project, projectId, targetSceneCount, selectedLLMModel, isGeneratingOutline, postApi]);
// Load existing story outline
const loadStoryOutline = useCallback(async () => {
try {
const data = await fetchApi<{ ok: boolean; outline: StoryOutline | null }>(
`/studio/videos/${projectId}/outline`
);
if (data.ok && data.outline) {
setStoryOutline(data.outline);
}
} catch (e) {
console.log('[CreatorStudioEditor] No existing outline found');
}
}, [projectId, fetchApi]);
// Open scene editor
const openSceneEditor = useCallback((scene: Scene) => {
setEditingScene(scene);
setEditNarration(scene.narration || "");
setEditImagePrompt(scene.imagePrompt || "");
setEditNegativePrompt(scene.negativePrompt || "");
setShowSceneEditor(true);
}, []);
// Save scene edits
const saveSceneEdits = useCallback(async () => {
if (!editingScene) return;
setIsSavingScene(true);
try {
await patchApi(`/studio/videos/${projectId}/scenes/${editingScene.id}`, {
narration: editNarration,
imagePrompt: editImagePrompt,
negativePrompt: editNegativePrompt,
});
setScenes((prev) =>
prev.map((s) =>
s.id === editingScene.id
? { ...s, narration: editNarration, imagePrompt: editImagePrompt, negativePrompt: editNegativePrompt }
: s
)
);
setLastSaved(new Date());
setShowSceneEditor(false);
setEditingScene(null);
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to save scene:', e);
alert(`Failed to save scene: ${e.message}`);
} finally {
setIsSavingScene(false);
}
}, [editingScene, editNarration, editImagePrompt, editNegativePrompt, projectId, patchApi]);
// Parse tags from project
const parseTagsFromProject = useCallback((proj: Project) => {
const tags = proj.tags || [];
let goal: "Entertain" | "Educate" | "Inspire" = "Educate";
let visualStyle: "Cinematic" | "Digital Art" | "Anime" = "Cinematic";
let tones: string[] = [];
let lockIdentity = true;
let sceneCount = 8;
let sceneDuration = 5;
tags.forEach((tag) => {
if (tag.startsWith("goal:")) {
const g = tag.replace("goal:", "");
if (g === "entertain") goal = "Entertain";
else if (g === "educate") goal = "Educate";
else if (g === "inspire") goal = "Inspire";
} else if (tag.startsWith("visual:")) {
const v = tag.replace("visual:", "").replace(/_/g, " ");
if (v.toLowerCase() === "cinematic") visualStyle = "Cinematic";
else if (v.toLowerCase() === "digital art") visualStyle = "Digital Art";
else if (v.toLowerCase() === "anime") visualStyle = "Anime";
} else if (tag.startsWith("tone:")) {
const t = tag.replace("tone:", "").replace(/_/g, " ");
const capitalizedTone = t.charAt(0).toUpperCase() + t.slice(1);
tones.push(capitalizedTone);
} else if (tag === "lock:identity") {
lockIdentity = true;
} else if (tag.startsWith("scenes:")) {
sceneCount = parseInt(tag.replace("scenes:", ""), 10) || 8;
} else if (tag.startsWith("duration:")) {
sceneDuration = parseInt(tag.replace("duration:", ""), 10) || 5;
}
});
return { goal, visualStyle, tones, lockIdentity, sceneCount, sceneDuration };
}, []);
// Parse generation params from project tags
const parseGenParamsFromTags = useCallback((tags: string[] | undefined | null): CreatorStudioGenerationParams => {
const t = tags || [];
const next = { ...CREATOR_STUDIO_PARAM_DEFAULTS };
const get = (k: string) => t.find(x => x.startsWith(`gen:${k}=`))?.split("=")[1];
next.enabled = get("enabled") === "1";
next.steps = Number(get("steps") ?? next.steps);
next.cfgScale = Number(get("cfg") ?? next.cfgScale);
next.creativity = Number(get("creativity") ?? next.creativity);
next.lockSeed = get("seedlock") === "1";
next.seed = Number(get("seed") ?? next.seed);
return next;
}, []);
// Open settings modal
const openSettingsModal = useCallback(async () => {
if (!project) return;
setSettingsTitle(project.title || "");
setSettingsLogline(project.logline || "");
setSettingsPlatform(project.platformPreset || "youtube_16_9");
setSettingsContentRating(project.contentRating || "sfw");
const parsed = parseTagsFromProject(project);
setSettingsGoal(parsed.goal);
setSettingsVisualStyle(parsed.visualStyle);
setSettingsTones(parsed.tones.length > 0 ? parsed.tones : ["Documentary", "Calm"]);
setSettingsSceneCount(parsed.sceneCount);
setSettingsSceneDuration(parsed.sceneDuration);
setSettingsLockIdentity(parsed.lockIdentity);
// Set the LLM model from current selection or project tags
const tags = (project as any).tags || [];
const llmTag = tags.find((t: string) => t.startsWith("llm:"));
setSettingsLLMModel(llmTag ? llmTag.replace("llm:", "") : selectedLLMModel);
// Initialize video generation enabled state from tags or enableVideoGeneration prop
const hasVideoMode = tags.includes("mode:video") ||
tags.includes("projectType:video") ||
tags.includes("projectType:video_series") ||
project?.metadata?.generationMode === "video" ||
enableVideoGeneration === true;
setSettingsEnableVideo(hasVideoMode);
// Initialize generation params from project tags
const parsedGenParams = parseGenParamsFromTags(tags);
// Also restore negative prompt from localStorage
try {
const key = `creatorstudio:genparams:${projectId}:neg`;
const stored = localStorage.getItem(key);
if (stored) {
const neg = JSON.parse(stored);
parsedGenParams.useCustomNegativePrompt = Boolean(neg?.use);
parsedGenParams.customNegativePrompt = String(neg?.text || "");
}
} catch {}
setGenParams(parsedGenParams);
// Initialize image/video model overrides from project tags or props
const imageModelTag = tags.find((t: string) => t.startsWith("imageModel:"));
setSettingsImageModel(imageModelTag?.replace("imageModel:", "") || imageModel || "");
const videoModelTag = tags.find((t: string) => t.startsWith("videoModel:"));
setSettingsVideoModel(videoModelTag?.replace("videoModel:", "") || videoModel || "");
// Fetch available models to ensure the dropdowns are populated
setLoadingModels(true);
try {
// Fetch LLM, Image, and Video models in parallel
const [llmData, imgData, vidData] = await Promise.all([
fetchApi<{ models: { id: string; name?: string }[] }>('/models?provider=ollama'),
fetchApi<{ models: string[] }>('/models?provider=comfyui&model_type=image'),
fetchApi<{ models: string[] }>('/models?provider=comfyui&model_type=video'),
]);
if (llmData.models) {
const models = llmData.models.map(m => ({ id: m.id, name: m.name || m.id }));
setAvailableLLMModels(models);
}
if (imgData.models) {
const models = imgData.models.map(m => ({ id: m, name: m }));
setAvailableImageModels(models);
}
if (vidData.models) {
const models = vidData.models.map(m => ({ id: m, name: m }));
setAvailableVideoModels(models);
}
} catch (e) {
console.log('[CreatorStudioEditor] Failed to fetch models for settings:', e);
} finally {
setLoadingModels(false);
}
setShowSettingsModal(true);
}, [project, projectId, parseTagsFromProject, parseGenParamsFromTags, selectedLLMModel, imageModel, videoModel, enableVideoGeneration, fetchApi]);
// Toggle tone in settings
const toggleSettingsTone = useCallback((tone: string) => {
setSettingsTones((prev) => {
if (prev.includes(tone)) {
return prev.filter((t) => t !== tone);
}
return [...prev, tone];
});
}, []);
// Build tags from settings
const buildTagsFromSettings = useCallback(() => {
const tags: string[] = [];
// Video generation mode - critical for "Make Video" button visibility
tags.push(`mode:${settingsEnableVideo ? "video" : "slideshow"}`);
if (settingsGoal) tags.push(`goal:${settingsGoal.toLowerCase()}`);
if (settingsVisualStyle) tags.push(`visual:${settingsVisualStyle.toLowerCase().replace(/ /g, "_")}`);
settingsTones.forEach((t) => tags.push(`tone:${t.toLowerCase().replace(/ /g, "_")}`));
if (settingsLockIdentity) tags.push("lock:identity");
tags.push(`scenes:${settingsSceneCount}`);
tags.push(`duration:${settingsSceneDuration}`);
if (settingsLLMModel) tags.push(`llm:${settingsLLMModel}`);
// Model overrides - allows per-project model selection
if (settingsImageModel) tags.push(`imageModel:${settingsImageModel}`);
if (settingsVideoModel) tags.push(`videoModel:${settingsVideoModel}`);
return tags;
}, [settingsEnableVideo, settingsGoal, settingsVisualStyle, settingsTones, settingsLockIdentity, settingsSceneCount, settingsSceneDuration, settingsLLMModel, settingsImageModel, settingsVideoModel]);
// Build tags including generation params
const buildGenTags = useCallback((p: CreatorStudioGenerationParams) => {
// Start with base settings tags, filter out any existing gen:* tags
const tags = buildTagsFromSettings().filter(t => !t.startsWith("gen:"));
// Add generation params
tags.push(`gen:enabled=${p.enabled ? "1" : "0"}`);
tags.push(`gen:steps=${p.steps}`);
tags.push(`gen:cfg=${p.cfgScale}`);
tags.push(`gen:creativity=${p.creativity}`);
tags.push(`gen:seedlock=${p.lockSeed ? "1" : "0"}`);
tags.push(`gen:seed=${p.seed}`);
return tags;
}, [buildTagsFromSettings]);
// Save project settings
const saveProjectSettings = useCallback(async () => {
if (!project) return;
setIsSavingSettings(true);
try {
const tags = buildGenTags(genParams);
await patchApi(`/studio/videos/${projectId}`, {
title: settingsTitle.trim(),
logline: settingsLogline.trim(),
platformPreset: settingsPlatform,
contentRating: settingsContentRating,
tags,
});
// Persist negative prompt locally (can be long, so not in tags)
try {
const key = `creatorstudio:genparams:${projectId}:neg`;
localStorage.setItem(key, JSON.stringify({
use: genParams.useCustomNegativePrompt,
text: genParams.customNegativePrompt,
}));
} catch {}
// Update local project state
setProject((prev) => prev ? {
...prev,
title: settingsTitle.trim(),
logline: settingsLogline.trim(),
platformPreset: settingsPlatform,
contentRating: settingsContentRating,
tags,
} : null);
// Update selected models to match settings
setSelectedLLMModel(settingsLLMModel);
if (settingsImageModel) setSelectedImageModel(settingsImageModel);
if (settingsVideoModel) setSelectedVideoModel(settingsVideoModel);
setLastSaved(new Date());
setShowSettingsModal(false);
console.log('[CreatorStudioEditor] Project settings saved');
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to save settings:', e);
alert(`Failed to save settings: ${e.message}`);
} finally {
setIsSavingSettings(false);
}
}, [project, projectId, settingsTitle, settingsLogline, settingsPlatform, settingsContentRating, settingsLLMModel, settingsImageModel, settingsVideoModel, genParams, buildGenTags, patchApi]);
// Delete scene
const deleteScene = useCallback(async (sceneId: string) => {
if (scenes.length <= 1) {
alert("Cannot delete the last scene.");
return;
}
if (!window.confirm("Delete this scene? This cannot be undone.")) {
return;
}
try {
console.log('[CreatorStudioEditor] Deleting scene:', sceneId);
await deleteApi<{ ok: boolean }>(`/studio/videos/${projectId}/scenes/${sceneId}`);
const deletedIndex = scenes.findIndex((s) => s.id === sceneId);
setScenes((prev) => {
const newScenes = prev
.filter((s) => s.id !== sceneId)
.map((s, i) => ({ ...s, idx: i }));
return newScenes;
});
if (deletedIndex >= 0 && deletedIndex <= currentSceneIndex) {
setCurrentSceneIndex((prev) => Math.max(0, prev - 1));
}
setLastSaved(new Date());
console.log('[CreatorStudioEditor] Scene deleted successfully');
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to delete scene:', e);
alert(`Failed to delete scene: ${e.message}`);
}
}, [projectId, scenes, currentSceneIndex, deleteApi]);
// Fetch available models
const fetchAvailableModels = useCallback(async () => {
try {
const llmData = await fetchApi<{ models: { id: string; name?: string }[] }>(
'/models?provider=ollama'
);
if (llmData.models) {
const models = llmData.models.map(m => ({ id: m.id, name: m.name || m.id }));
setAvailableLLMModels(models);
// Auto-select a model if none selected
if (!selectedLLMModel && models.length > 0) {
// Prefer llama3:8b if available, otherwise first model
const preferred = models.find(m => m.id === "llama3:8b") || models[0];
setSelectedLLMModel(preferred.id);
setSettingsLLMModel(preferred.id);
}
}
} catch (e) {
console.log('[CreatorStudioEditor] Failed to fetch LLM models:', e);
}
try {
const imgData = await fetchApi<{ models: string[] }>(
'/models?provider=comfyui&model_type=image'
);
if (imgData.models) {
setAvailableImageModels(imgData.models.map(m => ({ id: m, name: m })));
}
} catch (e) {
console.log('[CreatorStudioEditor] Failed to fetch image models:', e);
}
}, [fetchApi, selectedLLMModel]);
// Convert Creator Studio scene to TV Mode scene format
const sceneToTVScene = useCallback((scene: Scene): TVScene => {
return {
idx: scene.idx,
narration: scene.narration || "",
image_prompt: scene.imagePrompt || "",
negative_prompt: scene.negativePrompt || "",
duration_s: scene.durationSec || 5,
tags: {},
image_url: scene.imageUrl || null,
status: scene.status === "ready" ? "ready" : "pending",
imageStatus: scene.imageUrl ? "ready" : "pending",
};
}, []);
// Enter TV Mode with current scenes
const handleEnterTVMode = useCallback(() => {
if (!project || scenes.length === 0) return;
const tvScenes = scenes.map(sceneToTVScene);
enterTVMode(projectId, project.title, tvScenes, currentSceneIndex);
}, [project, projectId, scenes, currentSceneIndex, enterTVMode, sceneToTVScene]);
// Editor playback with TTS - speak scene narration when playing
useEffect(() => {
if (!isPlaying || scenes.length === 0) {
// Stop any ongoing speech when not playing
if (window.SpeechService?.stopSpeaking) {
window.SpeechService.stopSpeaking();
}
setIsTTSSpeaking(false);
return;
}
const currentScene = scenes[currentSceneIndex];
if (!currentScene?.narration) {
// No narration, advance to next scene after a short delay
const timer = setTimeout(() => {
if (currentSceneIndex < scenes.length - 1) {
setCurrentSceneIndex((i) => i + 1);
} else {
setIsPlaying(false); // End of scenes
}
}, 2000);
return () => clearTimeout(timer);
}
// Speak the current scene's narration
const svc = window.SpeechService;
if (!svc?.speak) {
// Fallback if no TTS - use fixed timer
const timer = setTimeout(() => {
if (currentSceneIndex < scenes.length - 1) {
setCurrentSceneIndex((i) => i + 1);
} else {
setIsPlaying(false);
}
}, 5000);
return () => clearTimeout(timer);
}
console.log(`[Editor] Speaking scene ${currentSceneIndex + 1} narration...`);
setIsTTSSpeaking(true);
svc.speak(currentScene.narration, {
onStart: () => setIsTTSSpeaking(true),
onEnd: () => {
setIsTTSSpeaking(false);
// Auto-advance to next scene after narration finishes
if (currentSceneIndex < scenes.length - 1) {
setCurrentSceneIndex((i) => i + 1);
} else {
setIsPlaying(false); // End of scenes
}
},
onError: () => {
setIsTTSSpeaking(false);
// On error, advance anyway after delay
setTimeout(() => {
if (currentSceneIndex < scenes.length - 1) {
setCurrentSceneIndex((i) => i + 1);
} else {
setIsPlaying(false);
}
}, 2000);
},
});
return () => {
svc.stopSpeaking?.();
};
}, [isPlaying, currentSceneIndex, scenes]);
// Generate image for a scene
const generateImageForScene = useCallback(
async (sceneId: string, imagePrompt: string, force: boolean = false) => {
if (isGeneratingImage && !force) {
console.log('[CreatorStudioEditor] Already generating image, skipping');
return;
}
setIsGeneratingImage(true);
console.log('[CreatorStudioEditor] Generating image for scene:', sceneId);
try {
const llmProvider = imageProvider === 'comfyui' ? 'ollama' : imageProvider;
// Apply generation parameters if enabled
const effectiveSteps = genParams.enabled ? genParams.steps : imageSteps;
const effectiveCfg = genParams.enabled ? genParams.cfgScale : imageCfg;
// Get scene-level negative prompt if available
const scene = scenes.find(s => s.id === sceneId);
const sceneNeg = scene?.negativePrompt || "";
const combinedNegativePrompt = (genParams.enabled && genParams.useCustomNegativePrompt && genParams.customNegativePrompt.trim())
? [sceneNeg, genParams.customNegativePrompt.trim()].filter(Boolean).join(", ")
: sceneNeg || undefined;
// Send explicit width/height from wizard selection to backend
// This ensures the resolution selected in Step 2 is actually used
const data = await postApi<{ media?: { images?: string[] } }>(
'/chat',
{
message: `imagine ${imagePrompt}`,
mode: 'imagine',
provider: llmProvider,
imgModel: selectedImageModel || imageModel || undefined,
// Pass explicit resolution from wizard (if available)
// Backend will use these values directly instead of computing from aspect ratio
...(imageWidth ? { imgWidth: imageWidth } : {}),
...(imageHeight ? { imgHeight: imageHeight } : {}),
imgSteps: effectiveSteps,
imgCfg: effectiveCfg,
negativePrompt: combinedNegativePrompt,
imgSeed: genParams.enabled && genParams.lockSeed ? genParams.seed : undefined,
creativity: genParams.enabled ? genParams.creativity : undefined,
promptRefinement: false,
}
);
const imageUrl = data?.media?.images?.[0];
if (imageUrl) {
console.log('[CreatorStudioEditor] Image generated:', imageUrl);
await patchApi(`/studio/videos/${projectId}/scenes/${sceneId}`, toScenePatch({
imageUrl,
status: 'ready',
}));
setScenes((prev) =>
prev.map((s) =>
s.id === sceneId ? { ...s, imageUrl, status: 'ready' as SceneStatus } : s
)
);
setLastSaved(new Date());
} else {
console.warn('[CreatorStudioEditor] No image returned from backend');
}
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to generate image:', e);
} finally {
setIsGeneratingImage(false);
}
},
[projectId, imageProvider, imageModel, selectedImageModel, imageWidth, imageHeight, imageSteps, imageCfg, postApi, patchApi, isGeneratingImage, toScenePatch, genParams, scenes]
);
// Generate video for a scene (converts image to video)
const generateVideoForScene = useCallback(
async (sceneId: string, imageUrl: string, prompt: string) => {
if (isGeneratingVideo) {
console.log('[CreatorStudioEditor] Already generating video, skipping');
return;
}
if (!imageUrl) {
console.warn('[CreatorStudioEditor] No image URL to animate');
return;
}
setIsGeneratingVideo(true);
console.log('[CreatorStudioEditor] Generating video for scene:', sceneId);
try {
// IMPORTANT:
// Backend animate mode detects the reference image via URL in the message (in this project version).
// Backend also expects vidModel (not videoModel).
const data = await postApi<{ media?: any }>(
'/chat',
{
message: `${prompt || 'Animate this scene with subtle motion'} ${imageUrl}`,
mode: 'animate',
provider: 'ollama',
vidModel: selectedVideoModel || videoModel || undefined,
// Apply generation parameters if enabled
vidSeed: genParams.enabled && genParams.lockSeed ? genParams.seed : undefined,
vidSteps: genParams.enabled ? genParams.steps : undefined,
vidCfg: genParams.enabled ? genParams.cfgScale : undefined,
creativity: genParams.enabled ? genParams.creativity : undefined,
}
);
// Backend returns media.video_url (NOT always media.videos[]).
const rawVideoUrl =
data?.media?.video_url ||
data?.media?.videos?.[0] ||
null;
if (rawVideoUrl) {
// Proxy the URL for correct Content-Type headers (WebM playback)
const proxiedVideoUrl = proxyVideoUrl(rawVideoUrl) || rawVideoUrl;
console.log('[CreatorStudioEditor] Video generated:', rawVideoUrl, '-> proxied:', proxiedVideoUrl);
await patchApi(`/studio/videos/${projectId}/scenes/${sceneId}`, toScenePatch({
videoUrl: proxiedVideoUrl,
status: 'ready',
}));
setScenes((prev) =>
prev.map((s) =>
s.id === sceneId ? { ...s, videoUrl: proxiedVideoUrl, status: 'ready' as SceneStatus } : s
)
);
setLastSaved(new Date());
} else {
console.warn('[CreatorStudioEditor] No video returned from backend');
}
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to generate video:', e);
} finally {
setIsGeneratingVideo(false);
}
},
[projectId, videoModel, selectedVideoModel, postApi, patchApi, isGeneratingVideo, toScenePatch, proxyVideoUrl, genParams]
);
// Remove video and fall back to image-only for a scene
const removeVideoForScene = useCallback(async (sceneId: string) => {
if (!window.confirm("Remove the video for this scene and keep the image?")) return;
try {
await patchApi(`/studio/videos/${projectId}/scenes/${sceneId}`, toScenePatch({ videoUrl: null, status: 'ready' }));
setScenes(prev => prev.map(s => (s.id === sceneId ? { ...s, videoUrl: null, status: 'ready' as SceneStatus } : s)));
setLastSaved(new Date());
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to remove video:', e);
alert(`Failed to remove video: ${e.message}`);
}
}, [projectId, patchApi, toScenePatch]);
// Generate scene from outline
const generateSceneFromOutline = useCallback(async (sceneIndex: number) => {
if (!storyOutline || sceneIndex >= storyOutline.scenes.length) return;
setIsGeneratingScene(true);
try {
const data = await postApi<{ ok: boolean; scene: Scene }>(
`/studio/videos/${projectId}/scenes/generate-from-outline?scene_index=${sceneIndex}`,
{}
);
if (data.ok && data.scene) {
setScenes((prev) => [...prev, data.scene]);
setCurrentSceneIndex(scenes.length);
setLastSaved(new Date());
generateImageForScene(data.scene.id, data.scene.imagePrompt);
}
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to generate scene from outline:', e);
alert(`Failed to generate scene: ${e.message}`);
} finally {
setIsGeneratingScene(false);
}
}, [projectId, storyOutline, scenes.length, postApi, generateImageForScene]);
// Generate ALL scenes from outline in sequence (batch generation)
const generateAllScenesFromOutline = useCallback(async () => {
if (!storyOutline || !storyOutline.scenes || storyOutline.scenes.length === 0) {
console.log('[CreatorStudioEditor] No outline scenes to generate');
return;
}
const totalScenes = storyOutline.scenes.length;
console.log(`[CreatorStudioEditor] Starting batch generation of ${totalScenes} scenes`);
setIsBatchGenerating(true);
setBatchProgress({ current: 0, total: totalScenes, phase: 'scene' });
const generatedScenes: Scene[] = [];
try {
// Generate all scenes first (without images for speed)
for (let i = 0; i < totalScenes; i++) {
setBatchProgress({ current: i + 1, total: totalScenes, phase: 'scene' });
console.log(`[CreatorStudioEditor] Generating scene ${i + 1}/${totalScenes}`);
try {
const data = await postApi<{ ok: boolean; scene: Scene }>(
`/studio/videos/${projectId}/scenes/generate-from-outline?scene_index=${i}`,
{}
);
if (data.ok && data.scene) {
generatedScenes.push(data.scene);
setScenes((prev) => [...prev, data.scene]);
setCurrentSceneIndex(i);
}
} catch (sceneErr: any) {
console.error(`[CreatorStudioEditor] Failed to generate scene ${i + 1}:`, sceneErr);
// Continue with remaining scenes even if one fails
}
// Small delay between scene creations to avoid overwhelming the server
if (i < totalScenes - 1) {
await new Promise(resolve => setTimeout(resolve, 200));
}
}
console.log(`[CreatorStudioEditor] Created ${generatedScenes.length} scenes, now generating images...`);
// Phase 2: Generate images for all scenes
const scenesWithImages: Array<{ scene: Scene; imageUrl: string }> = [];
for (let i = 0; i < generatedScenes.length; i++) {
const scene = generatedScenes[i];
setBatchProgress({ current: i + 1, total: generatedScenes.length, phase: 'image' });
console.log(`[CreatorStudioEditor] Generating image ${i + 1}/${generatedScenes.length}`);
try {
// Generate image inline (can't use generateImageForScene due to isGeneratingImage lock)
const llmProvider = imageProvider === 'comfyui' ? 'ollama' : imageProvider;
const data = await postApi<{ media?: { images?: string[] } }>(
'/chat',
{
message: `imagine ${scene.imagePrompt}`,
mode: 'imagine',
provider: llmProvider,
imgModel: selectedImageModel || imageModel || undefined,
imgAspectRatio: '16:9',
imgSteps: imageSteps,
imgCfg: imageCfg,
promptRefinement: false,
}
);
const imageUrl = data?.media?.images?.[0];
if (imageUrl) {
await patchApi(`/studio/videos/${projectId}/scenes/${scene.id}`, toScenePatch({
imageUrl,
status: projectWantsVideo ? 'generating' : 'ready',
}));
setScenes((prev) =>
prev.map((s) =>
s.id === scene.id ? { ...s, imageUrl, status: (projectWantsVideo ? 'generating' : 'ready') as SceneStatus } : s
)
);
scenesWithImages.push({ scene, imageUrl });
}
} catch (imgErr: any) {
console.error(`[CreatorStudioEditor] Failed to generate image for scene ${i + 1}:`, imgErr);
// Continue with remaining images
}
}
// Phase 3: Generate videos from images (if enabled)
if (projectWantsVideo && scenesWithImages.length > 0) {
console.log(`[CreatorStudioEditor] Phase 3: Generating ${scenesWithImages.length} videos...`);
for (let i = 0; i < scenesWithImages.length; i++) {
const { scene, imageUrl } = scenesWithImages[i];
setBatchProgress({ current: i + 1, total: scenesWithImages.length, phase: 'video' });
console.log(`[CreatorStudioEditor] Generating video ${i + 1}/${scenesWithImages.length}`);
try {
// Use the animate endpoint to generate video from image
// IMPORTANT: Include imageUrl in message and use vidModel (not videoModel)
const data = await postApi<{ media?: any }>(
'/chat',
{
message: `${scene.imagePrompt} ${imageUrl}`,
mode: 'animate',
provider: 'ollama',
vidModel: selectedVideoModel || videoModel || undefined,
}
);
// Backend returns media.video_url (NOT always media.videos[])
const rawVideoUrl =
data?.media?.video_url ||
data?.media?.videos?.[0] ||
null;
if (rawVideoUrl) {
// Proxy the URL for correct Content-Type headers (WebM playback)
const proxiedVideoUrl = proxyVideoUrl(rawVideoUrl) || rawVideoUrl;
await patchApi(`/studio/videos/${projectId}/scenes/${scene.id}`, toScenePatch({
videoUrl: proxiedVideoUrl,
status: 'ready',
}));
setScenes((prev) =>
prev.map((s) =>
s.id === scene.id ? { ...s, videoUrl: proxiedVideoUrl, status: 'ready' as SceneStatus } : s
)
);
} else {
// No video generated, mark scene as ready anyway (image only)
await patchApi(`/studio/videos/${projectId}/scenes/${scene.id}`, toScenePatch({
status: 'ready',
}));
setScenes((prev) =>
prev.map((s) =>
s.id === scene.id ? { ...s, status: 'ready' as SceneStatus } : s
)
);
}
} catch (vidErr: any) {
console.error(`[CreatorStudioEditor] Failed to generate video for scene ${i + 1}:`, vidErr);
// Mark scene as ready anyway (fallback to image only)
try {
await patchApi(`/studio/videos/${projectId}/scenes/${scene.id}`, toScenePatch({ status: 'ready' }));
setScenes((prev) =>
prev.map((s) =>
s.id === scene.id ? { ...s, status: 'ready' as SceneStatus } : s
)
);
} catch {}
}
}
}
setLastSaved(new Date());
setCurrentSceneIndex(0); // Go back to first scene
console.log('[CreatorStudioEditor] Batch generation complete!');
// Sync outline with actual scene data to keep them in sync
await syncOutlineWithScenes();
// Final refresh so UI shows all persisted videoUrl values
await refreshScenes();
} catch (e: any) {
console.error('[CreatorStudioEditor] Batch generation failed:', e);
} finally {
setIsBatchGenerating(false);
setBatchProgress({ current: 0, total: 0, phase: 'scene' });
}
}, [storyOutline, projectId, postApi, patchApi, imageProvider, imageModel, selectedImageModel, imageSteps, imageCfg, projectWantsVideo, videoModel, selectedVideoModel, syncOutlineWithScenes, toScenePatch, refreshScenes, proxyVideoUrl]);
// Load project and scenes
useEffect(() => {
async function loadData() {
setLoading(true);
setError(null);
try {
const [projectRes, scenesRes] = await Promise.all([
fetchApi<{ video: Project }>(`/studio/videos/${projectId}`),
fetchApi<{ scenes: any[] }>(`/studio/videos/${projectId}/scenes`),
]);
setProject(projectRes.video);
setScenes(normalizeScenes((scenesRes as any).scenes));
// Extract LLM model from project tags if not already set
const tags = (projectRes.video as any).tags || [];
const llmTag = tags.find((t: string) => t.startsWith("llm:"));
if (llmTag && !defaultLLMModel) {
const modelFromTag = llmTag.replace("llm:", "");
setSelectedLLMModel(modelFromTag);
setSettingsLLMModel(modelFromTag);
}
} catch (e: any) {
setError(e.message || String(e));
} finally {
setLoading(false);
}
}
loadData();
}, [projectId, fetchApi, defaultLLMModel, normalizeScenes]);
// Load available models and existing outline on mount
useEffect(() => {
fetchAvailableModels();
loadStoryOutline();
}, [fetchAvailableModels, loadStoryOutline]);
// Detect whether the browser can play WebM videos
useEffect(() => {
try {
const v = document.createElement("video");
const ok = Boolean(
v.canPlayType('video/webm; codecs="vp8, vorbis"') ||
v.canPlayType('video/webm; codecs="vp9"') ||
v.canPlayType("video/webm")
);
setCanPlayWebm(ok);
console.log('[CreatorStudioEditor] WebM playback support:', ok);
} catch {
setCanPlayWebm(false);
}
}, []);
// Auto-generate outline when project is newly created
useEffect(() => {
if (
autoGenerateFirst &&
!hasAutoGeneratedOutline &&
!loading &&
project &&
!storyOutline &&
!isGeneratingOutline &&
selectedLLMModel // Wait until we have a model selected
) {
console.log('[CreatorStudioEditor] Auto-generating story outline for new project');
setHasAutoGeneratedOutline(true);
generateStoryOutline();
}
}, [autoGenerateFirst, hasAutoGeneratedOutline, loading, project, storyOutline, isGeneratingOutline, selectedLLMModel, generateStoryOutline]);
// Current scene
const currentScene = scenes[currentSceneIndex] || null;
// Extract visual style from project tags
const getVisualStyle = useCallback(() => {
if (!project) return "cinematic";
const tags = (project as any).tags || [];
const visualTag = tags.find((t: string) => t.startsWith("visual:"));
if (visualTag) {
const style = visualTag.replace("visual:", "").replace(/_/g, " ");
return style;
}
return "cinematic";
}, [project]);
// Extract tone from project tags
const getTones = useCallback(() => {
if (!project) return ["documentary"];
const tags = (project as any).tags || [];
const tones = tags.filter((t: string) => t.startsWith("tone:")).map((t: string) =>
t.replace("tone:", "").replace(/_/g, " ")
);
return tones.length > 0 ? tones : ["documentary"];
}, [project]);
// Generate AI-powered scene with better prompts
const generateFirstSceneWithAI = useCallback(async () => {
if (!project || isGeneratingScene) return;
setIsGeneratingScene(true);
try {
let narration: string;
let imagePrompt: string;
let negativePrompt: string = "blurry, low quality, text, watermark, ugly, deformed, disfigured, bad anatomy, worst quality, low resolution, duplicate, clone, multiple people, two heads, two faces, split image, extra limbs";
if (storyOutline && storyOutline.scenes && storyOutline.scenes.length > 0) {
const outlineScene = storyOutline.scenes[0];
narration = outlineScene.narration;
imagePrompt = outlineScene.image_prompt;
negativePrompt = outlineScene.negative_prompt || negativePrompt;
console.log('[CreatorStudioEditor] Using story outline for first scene');
} else {
const visualStyle = getVisualStyle();
const tones = getTones();
const toneDesc = tones.join(", ");
narration = `The story begins. ${project.logline || `Welcome to "${project.title}".`}`;
imagePrompt = `${visualStyle} style, ${project.logline || project.title}, opening scene, establishing shot, ${toneDesc} mood, high quality, detailed, 4k, masterpiece`;
}
const data = await postApi<{ scene: Scene }>(
`/studio/videos/${projectId}/scenes`,
{
narration,
imagePrompt,
negativePrompt,
durationSec: 5.0,
}
);
setScenes((prev) => [...prev, data.scene]);
setCurrentSceneIndex(0);
setLastSaved(new Date());
console.log('[CreatorStudioEditor] Auto-generating image for first scene');
generateImageForScene(data.scene.id, data.scene.imagePrompt);
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to create scene:', e);
alert(`Failed to create scene: ${e.message}`);
} finally {
setIsGeneratingScene(false);
}
}, [project, projectId, isGeneratingScene, postApi, getVisualStyle, getTones, storyOutline, generateImageForScene]);
// Auto-generate ALL scenes after outline is generated (batch generation)
useEffect(() => {
if (
autoGenerateFirst &&
!hasAutoTriggered &&
!loading &&
project &&
storyOutline &&
storyOutline.scenes &&
storyOutline.scenes.length > 0 &&
scenes.length === 0 &&
!isGeneratingScene &&
!isBatchGenerating
) {
console.log('[CreatorStudioEditor] Auto-generating ALL scenes from outline');
setHasAutoTriggered(true);
generateAllScenesFromOutline();
}
}, [autoGenerateFirst, hasAutoTriggered, loading, project, storyOutline, scenes.length, isGeneratingScene, isBatchGenerating, generateAllScenesFromOutline]);
// Generate first scene (non-AI fallback)
const generateFirstScene = useCallback(async () => {
if (!project || isGeneratingScene) return;
setIsGeneratingScene(true);
try {
const data = await postApi<{ scene: Scene }>(
`/studio/videos/${projectId}/scenes`,
{
narration: `Opening scene for "${project.title}"`,
imagePrompt: `${project.logline || project.title}, cinematic, high quality, detailed`,
negativePrompt: "blurry, low quality, text, watermark",
durationSec: 5.0,
}
);
setScenes((prev) => [...prev, data.scene]);
setCurrentSceneIndex(0);
setLastSaved(new Date());
} catch (e: any) {
alert(`Failed to create scene: ${e.message}`);
} finally {
setIsGeneratingScene(false);
}
}, [project, projectId, isGeneratingScene, postApi]);
// Generate next scene with AI-powered prompts (uses backend outline for reliability)
const generateNextScene = useCallback(async () => {
if (!project || isGeneratingScene) return;
setIsGeneratingScene(true);
try {
const nextSceneIndex = scenes.length;
// First, try to generate from outline via backend (reads outline from database - most reliable)
try {
const data = await postApi<{ ok: boolean; scene: Scene; from_outline?: boolean }>(
`/studio/videos/${projectId}/scenes/generate-from-outline?scene_index=${nextSceneIndex}`,
{}
);
if (data.ok && data.scene) {
setScenes((prev) => [...prev, data.scene]);
setCurrentSceneIndex(nextSceneIndex);
setLastSaved(new Date());
console.log(`[CreatorStudioEditor] Generated scene ${nextSceneIndex + 1} from outline`);
generateImageForScene(data.scene.id, data.scene.imagePrompt);
// Sync outline with new scene
syncOutlineWithScenes();
return;
}
} catch (outlineErr: any) {
// Outline not available or scene index out of range - try AI continuation
console.log('[CreatorStudioEditor] No outline scene available, trying AI continuation:', outlineErr.message);
}
// Second: Try AI-powered continuation based on previous scenes
if (scenes.length > 0) {
try {
console.log('[CreatorStudioEditor] Generating AI continuation from previous context...');
const contData = await postApi<{ ok: boolean; scene: Scene; from_continuation?: boolean }>(
`/studio/videos/${projectId}/scenes/generate-continuation`,
{}
);
if (contData.ok && contData.scene) {
setScenes((prev) => [...prev, contData.scene]);
setCurrentSceneIndex(nextSceneIndex);
setLastSaved(new Date());
console.log(`[CreatorStudioEditor] Generated scene ${nextSceneIndex + 1} via AI continuation`);
generateImageForScene(contData.scene.id, contData.scene.imagePrompt);
// Sync outline with new scene
syncOutlineWithScenes();
return;
}
} catch (contErr: any) {
console.log('[CreatorStudioEditor] AI continuation failed, using fallback:', contErr.message);
}
}
// Final fallback: Generate a scene based on project settings (no outline, no AI)
const sceneNum = scenes.length + 1;
const visualStyle = getVisualStyle();
const tones = getTones();
const toneDesc = tones.join(", ");
const narration = `Scene ${sceneNum}. ${project.logline || `The story of "${project.title}" continues...`}`;
const imagePrompt = `${visualStyle} style, ${project.logline || project.title}, scene ${sceneNum}, ${toneDesc} mood, high quality, detailed, 4k, masterpiece`;
const negativePrompt = "blurry, low quality, text, watermark, ugly, deformed, disfigured, bad anatomy, worst quality, low resolution, duplicate, clone, multiple people, two heads, two faces, split image, extra limbs";
const data = await postApi<{ scene: Scene }>(
`/studio/videos/${projectId}/scenes`,
{
narration,
imagePrompt,
negativePrompt,
durationSec: 5.0,
}
);
setScenes((prev) => [...prev, data.scene]);
setCurrentSceneIndex(scenes.length);
setLastSaved(new Date());
console.log('[CreatorStudioEditor] Generated scene with fallback content:', sceneNum);
generateImageForScene(data.scene.id, data.scene.imagePrompt);
// Sync outline with new scene
syncOutlineWithScenes();
} catch (e: any) {
alert(`Failed to create scene: ${e.message}`);
} finally {
setIsGeneratingScene(false);
}
}, [project, projectId, scenes.length, isGeneratingScene, postApi, getVisualStyle, getTones, generateImageForScene, syncOutlineWithScenes]);
// Generate next scene for TV Mode (uses backend outline for reliability)
const generateNextForTVMode = useCallback(async () => {
if (!project || isGeneratingScene) return null;
try {
const nextSceneIndex = scenes.length;
// First, try to generate from outline via backend
try {
const data = await postApi<{ ok: boolean; scene: Scene; from_outline?: boolean }>(
`/studio/videos/${projectId}/scenes/generate-from-outline?scene_index=${nextSceneIndex}`,
{}
);
if (data.ok && data.scene) {
setScenes((prev) => [...prev, data.scene]);
console.log(`[CreatorStudioEditor] TV Mode: Generated scene ${nextSceneIndex + 1} from outline`);
return sceneToTVScene(data.scene);
}
} catch (outlineErr: any) {
console.log('[CreatorStudioEditor] TV Mode: No outline available, trying AI continuation');
}
// Second: Try AI-powered continuation based on previous scenes
if (scenes.length > 0) {
try {
console.log('[CreatorStudioEditor] TV Mode: Generating AI continuation from previous context...');
const contData = await postApi<{ ok: boolean; scene: Scene; from_continuation?: boolean }>(
`/studio/videos/${projectId}/scenes/generate-continuation`,
{}
);
if (contData.ok && contData.scene) {
setScenes((prev) => [...prev, contData.scene]);
console.log(`[CreatorStudioEditor] TV Mode: Generated scene ${nextSceneIndex + 1} via AI continuation`);
return sceneToTVScene(contData.scene);
}
} catch (contErr: any) {
console.log('[CreatorStudioEditor] TV Mode: AI continuation failed, using fallback:', contErr.message);
}
}
// Final fallback: Generate scene based on project settings
const sceneNum = scenes.length + 1;
const visualStyle = getVisualStyle();
const tones = getTones();
const toneDesc = tones.join(", ");
const narration = `Scene ${sceneNum}. ${project.logline || `The story of "${project.title}" continues...`}`;
const imagePrompt = `${visualStyle} style, ${project.logline || project.title}, scene ${sceneNum}, ${toneDesc} mood, high quality, detailed, 4k, masterpiece`;
const negativePrompt = "blurry, low quality, text, watermark, ugly, deformed, disfigured, bad anatomy, worst quality, low resolution, duplicate, clone, multiple people, two heads, two faces, split image, extra limbs";
const data = await postApi<{ scene: Scene }>(
`/studio/videos/${projectId}/scenes`,
{
narration,
imagePrompt,
negativePrompt,
durationSec: 5.0,
}
);
setScenes((prev) => [...prev, data.scene]);
return sceneToTVScene(data.scene);
} catch (e: any) {
console.error('[CreatorStudioEditor] Failed to generate scene for TV mode:', e);
return null;
}
}, [project, projectId, scenes.length, isGeneratingScene, postApi, getVisualStyle, getTones, sceneToTVScene]);
// Ensure image for TV Mode scene
const ensureImageForTVMode = useCallback((tvScene: TVScene) => {
const scene = scenes.find(s => s.idx === tvScene.idx);
if (!scene) return;
if (!tvScene.image_url && !tvScene.image) {
generateImageForScene(scene.id, scene.imagePrompt).then(() => {
const updatedScene = scenes.find(s => s.idx === tvScene.idx);
if (updatedScene?.imageUrl) {
updateSceneImageByIdx(tvScene.idx, updatedScene.imageUrl);
}
});
}
}, [scenes, generateImageForScene, updateSceneImageByIdx]);
// Status badge color
const getStatusBadge = (status: string) => {
switch (status) {
case "draft":
return { bg: "bg-amber-500/20", text: "text-amber-300", label: "Draft" };
case "approved":
return { bg: "bg-emerald-500/20", text: "text-emerald-300", label: "Finished" };
case "in_review":
return { bg: "bg-cyan-500/20", text: "text-cyan-300", label: "In Review" };
case "archived":
return { bg: "bg-slate-500/20", text: "text-slate-300", label: "Archived" };
default:
return { bg: "bg-slate-500/20", text: "text-slate-300", label: status };
}
};
// Scene status indicator
const SceneStatusIndicator = ({ status }: { status: SceneStatus }) => {
switch (status) {
case 'generating':
return (
<div className="w-4 h-4 rounded-full bg-black/60 flex items-center justify-center">
<Loader2 size={10} className="text-cyan-400 animate-spin" />
</div>
);
case 'ready':
return null;
case 'error':
return (
<div className="w-4 h-4 rounded-full bg-red-500/80 flex items-center justify-center">
<AlertCircle size={10} className="text-white" />
</div>
);
case 'pending':
default:
return (
<div className="w-4 h-4 rounded-full bg-black/60 flex items-center justify-center">
<div className="w-2 h-2 rounded-full bg-white/40" />
</div>
);
}
};
// Loading state
if (loading) {
return (
<div className="min-h-screen w-full bg-gradient-to-b from-black via-[#0a0a0f] to-[#0f0f18] text-white flex items-center justify-center">
<div className="flex flex-col items-center gap-4">
<Loader2 className="w-10 h-10 animate-spin text-cyan-400" />
<div className="text-white/60 text-sm">Loading project...</div>
</div>
</div>
);
}
// Error state
if (error || !project) {
return (
<div className="min-h-screen w-full bg-gradient-to-b from-black via-[#0a0a0f] to-[#0f0f18] text-white flex items-center justify-center">
<div className="flex flex-col items-center gap-4 max-w-md text-center">
<div className="w-16 h-16 rounded-2xl bg-red-500/10 border border-red-500/20 flex items-center justify-center mb-2">
<AlertCircle size={28} className="text-red-400" />
</div>
<div className="text-red-400 text-lg font-medium">Failed to load project</div>
<div className="text-white/50 text-sm">{error || "Project not found"}</div>
<button
onClick={onExit}
className="mt-4 px-6 py-2.5 bg-white/5 hover:bg-white/10 border border-white/10 rounded-xl transition-colors text-sm"
>
← Back to Studio
</button>
</div>
</div>
);
}
const statusBadge = getStatusBadge(project.status);
return (
<div className="min-h-screen w-full bg-gradient-to-b from-black via-[#0a0a0f] to-[#0f0f18] text-white flex flex-col">
{/* Batch Generation Progress Overlay */}
{isBatchGenerating && (
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/90 backdrop-blur-sm">
<div className="bg-[#1a1a2e] border border-white/10 rounded-2xl p-8 max-w-md w-full mx-4 shadow-2xl">
<div className="text-center">
{/* Animated Icon */}
<div className="mb-6 flex justify-center">
<div className="relative">
<div className="w-20 h-20 rounded-full border-4 border-[#3ea6ff]/20" />
<div
className="absolute inset-0 w-20 h-20 rounded-full border-4 border-transparent border-t-[#3ea6ff] animate-spin"
style={{ animationDuration: '1s' }}
/>
<div className="absolute inset-0 flex items-center justify-center">
{batchProgress.phase === 'scene' ? (
<Sparkles size={28} className="text-[#3ea6ff]" />
) : batchProgress.phase === 'image' ? (
<ImageIcon size={28} className="text-[#3ea6ff]" />
) : (
<Film size={28} className="text-[#3ea6ff]" />
)}
</div>
</div>
</div>
{/* Title */}
<h2 className="text-xl font-semibold text-white mb-2">
{batchProgress.phase === 'scene' ? 'Creating Scenes' :
batchProgress.phase === 'image' ? 'Generating Images' : 'Generating Videos'}
</h2>
{/* Progress Text */}
<p className="text-white/60 mb-6">
{batchProgress.phase === 'scene'
? `Building scene ${batchProgress.current} of ${batchProgress.total}...`
: batchProgress.phase === 'image'
? `Generating image ${batchProgress.current} of ${batchProgress.total}...`
: `Generating video ${batchProgress.current} of ${batchProgress.total}...`
}
</p>
{/* Progress Bar */}
<div className="mb-4">
<div className="h-2 bg-white/10 rounded-full overflow-hidden">
<div
className="h-full bg-gradient-to-r from-[#3ea6ff] to-[#6ec7ff] transition-all duration-300 ease-out"
style={{
width: batchProgress.total > 0
? `${(batchProgress.current / batchProgress.total) * 100}%`
: '0%'
}}
/>
</div>
<div className="mt-2 text-xs text-white/40">
{batchProgress.phase === 'scene'
? (projectWantsVideo ? 'Phase 1/3: Creating scenes' : 'Phase 1/2: Creating scenes')
: batchProgress.phase === 'image'
? (projectWantsVideo ? 'Phase 2/3: Generating images' : 'Phase 2/2: Generating images')
: 'Phase 3/3: Generating videos'
}
</div>
</div>
{/* Tip */}
<p className="text-xs text-white/30 mt-4">
{batchProgress.phase === 'video'
? 'Video generation may take several minutes per scene'
: 'This may take a few minutes depending on your hardware'
}
</p>
</div>
</div>
</div>
)}
{/* Header - Compact & Cinematic */}
<header className="flex items-center justify-between px-4 py-3 border-b border-white/5 bg-black/40 backdrop-blur-md">
<div className="flex items-center gap-4">
<button
onClick={onExit}
className="flex items-center gap-2 px-3 py-2 text-sm text-white/50 hover:text-white hover:bg-white/5 rounded-lg transition-all"
>
<ArrowLeft size={16} />
<span className="hidden sm:inline">Back</span>
</button>
<div className="h-6 w-px bg-white/10" />
<div>
<h1 className="text-base font-semibold text-white">{project.title}</h1>
<div className="text-xs text-white/40">
{scenes.length} scene{scenes.length !== 1 ? "s" : ""} • Creator Studio
</div>
</div>
</div>
<div className="flex items-center gap-2">
{/* Status Badge */}
<span className={`text-xs px-2.5 py-1 rounded-full font-medium ${statusBadge.bg} ${statusBadge.text}`}>
{statusBadge.label}
</span>
{/* Save Indicator */}
<div className="hidden sm:flex items-center gap-1.5 text-xs text-white/40 px-2">
{isSaving ? (
<>
<Loader2 size={12} className="animate-spin" />
<span>Saving...</span>
</>
) : lastSaved ? (
<>
<Check size={12} className="text-emerald-400" />
<span>Saved</span>
</>
) : null}
</div>
{/* Project Settings Button */}
<button
onClick={openSettingsModal}
className="p-2 bg-white/5 hover:bg-white/10 border border-white/10 rounded-lg transition-all"
title="Project Settings"
>
<Settings size={16} className="text-white/60" />
</button>
{/* Story Outline Button */}
<button
onClick={() => setShowOutlinePanel(true)}
className="flex items-center gap-2 px-3 py-2 bg-white/5 hover:bg-white/10 border border-white/10 rounded-lg text-sm transition-all"
title="Story Outline"
>
<Wand2 size={14} className="text-cyan-400" />
<span className="hidden sm:inline">Outline</span>
</button>
{/* Export Button */}
<button
className="flex items-center gap-2 px-3 py-2 bg-white/5 hover:bg-white/10 border border-white/10 rounded-lg text-sm transition-all"
title="Export project"
>
<Download size={14} />
<span className="hidden sm:inline">Export</span>
</button>
</div>
</header>
{/* Scene Chips Rail - Like Play Story */}
{scenes.length > 0 && (
<div className="w-full overflow-x-auto scrollbar-hide border-b border-white/5 bg-black/20">
<div className="flex gap-2 px-4 py-3 min-w-max">
{scenes.map((scene, idx) => {
const isActive = idx === currentSceneIndex;
const hasImage = Boolean(scene.imageUrl);
const hasVideo = Boolean(scene.videoUrl);
const isHovered = hoveredSceneIdx === idx;
const showDelete = isHovered && scenes.length > 1;
return (
<div
key={scene.id}
className="relative"
onMouseEnter={() => setHoveredSceneIdx(idx)}
onMouseLeave={() => setHoveredSceneIdx(null)}
>
<button
onClick={() => setCurrentSceneIndex(idx)}
className={`
relative rounded-lg overflow-hidden transition-all duration-200
${isActive
? "ring-2 ring-cyan-400 ring-offset-2 ring-offset-black scale-105"
: hasVideo
? "ring-1 ring-cyan-500/50 opacity-80 hover:opacity-100 hover:scale-102"
: "opacity-60 hover:opacity-100 hover:scale-102"
}
`}
type="button"
title={`Scene ${idx + 1}${hasVideo ? ' (Video)' : ''}`}
>
<div className="w-20 h-12 flex items-center justify-center bg-white/5">
{hasImage ? (
<img
src={scene.imageUrl!}
alt={`Scene ${idx + 1}`}
className="w-full h-full object-cover"
/>
) : (
<ImageIcon size={16} className="text-white/20" />
)}
</div>
{/* Subtle video indicator - small dot in corner */}
{hasVideo && (
<div className="absolute top-1 right-1 w-2 h-2 bg-cyan-400 rounded-full shadow-sm shadow-cyan-400/50" title="Has video" />
)}
{/* Status indicator */}
<div className="absolute bottom-1 right-1">
<SceneStatusIndicator status={scene.status} />
</div>
{/* Scene number */}
{!scene.status || scene.status === 'ready' ? (
<div className="absolute bottom-1 left-1 text-[10px] bg-black/70 px-1.5 rounded font-medium">
{idx + 1}
</div>
) : null}
</button>
{/* Delete button */}
{showDelete && (
<button
onClick={(e) => {
e.stopPropagation();
deleteScene(scene.id);
}}
className="absolute -top-2 -right-2 w-5 h-5 rounded-full flex items-center justify-center transition-all transform hover:scale-110 bg-black/90 text-white/60 hover:text-white hover:bg-red-500 border border-white/10"
type="button"
title="Delete scene"
>
<X size={10} />
</button>
)}
</div>
);
})}
{/* Add Scene Chip */}
<button
onClick={generateNextScene}
disabled={isGeneratingScene}
className="w-20 h-12 rounded-lg border border-dashed border-white/20 hover:border-cyan-400/50 hover:bg-cyan-400/5 flex items-center justify-center transition-all disabled:opacity-40"
title="Add scene"
>
{isGeneratingScene ? (
<Loader2 size={16} className="text-cyan-400 animate-spin" />
) : (
<Plus size={16} className="text-white/40" />
)}
</button>
</div>
</div>
)}
{/* Main Content */}
{scenes.length === 0 ? (
// Empty State - Cinematic
<div className="flex-1 flex items-center justify-center p-8">
<div className="max-w-md text-center">
<div className="w-28 h-28 mx-auto mb-8 rounded-3xl bg-gradient-to-br from-cyan-500/20 via-blue-500/10 to-transparent border border-cyan-500/20 flex items-center justify-center">
<ImageIcon size={48} className="text-cyan-400/60" />
</div>
<h2 className="text-2xl font-semibold text-white mb-3">Create Your First Scene</h2>
<p className="text-white/50 mb-8 leading-relaxed">
Your project is ready. Generate a scene to start bringing your story to life with AI-powered visuals.
</p>
<button
onClick={generateFirstScene}
disabled={isGeneratingScene}
className="inline-flex items-center gap-3 px-8 py-4 bg-gradient-to-r from-cyan-500 to-blue-500 hover:from-cyan-400 hover:to-blue-400 disabled:opacity-50 disabled:cursor-not-allowed rounded-2xl text-base font-semibold shadow-lg shadow-cyan-500/25 transition-all"
>
{isGeneratingScene ? (
<>
<Loader2 size={20} className="animate-spin" />
Generating...
</>
) : (
<>
<Play size={20} fill="currentColor" />
Generate First Scene
</>
)}
</button>
<p className="text-xs text-white/30 mt-6">
Powered by AI • Based on your project settings
</p>
</div>
</div>
) : (
// Preview + Actions - Cinematic Layout
<div className="flex-1 flex flex-col">
{/* Preview Panel - Dominant */}
<div className="flex-1 relative overflow-hidden">
{/* Background gradient */}
<div className="absolute inset-0 bg-gradient-to-b from-transparent via-[#0a0a0f] to-[#0f0f18]" />
{/* Main preview area */}
<div className="absolute inset-0 flex items-center justify-center p-6">
{currentScene?.videoUrl ? (
/* Video/Animation Preview - when scene has generated media */
<div className="relative max-w-full max-h-full group">
{isAnimatedImageUrl(currentScene.videoUrl) ? (
/* Animated WebP/GIF - render as <img> (some Comfy workflows output this) */
<img
src={proxyVideoUrl(currentScene.videoUrl) || currentScene.videoUrl}
alt={`Scene ${currentSceneIndex + 1} animation`}
className="max-h-[calc(100vh-320px)] max-w-full object-contain rounded-xl shadow-2xl shadow-black/50"
/>
) : isWebmUrl(currentScene.videoUrl) ? (
/* WebM video - render as <video> */
canPlayWebm ? (
<video
className="max-h-[calc(100vh-320px)] max-w-full object-contain rounded-xl shadow-2xl shadow-black/50"
controls
loop
muted
autoPlay
playsInline
preload="metadata"
crossOrigin="anonymous"
src={proxyVideoUrl(currentScene.videoUrl) || currentScene.videoUrl}
/>
) : (
<div className="max-w-xl w-full bg-black/40 border border-white/10 rounded-xl p-6 text-center">
<div className="flex items-center justify-center gap-2 text-cyan-300 font-medium mb-2">
<Film size={16} />
Video generated (WebM)
</div>
<div className="text-white/60 text-sm mb-4">
This browser cannot play WebM inline. Use Chrome/Edge/Firefox, or download the clip.
</div>
<a
href={proxyVideoUrl(currentScene.videoUrl) || currentScene.videoUrl}
target="_blank"
rel="noreferrer"
className="inline-flex items-center gap-2 px-4 py-2 rounded-lg bg-white/10 hover:bg-white/15 border border-white/10 text-sm"
>
<Download size={14} />
Open / Download WebM
</a>
</div>
)
) : (
/* Unknown format fallback - try to display with download option */
<div className="max-w-xl w-full bg-black/40 border border-white/10 rounded-xl p-6 text-center">
<div className="flex items-center justify-center gap-2 text-cyan-300 font-medium mb-2">
<Film size={16} />
Generated media
</div>
<div className="text-white/60 text-sm mb-4">
This output format may not preview inline. Try opening in a new tab.
</div>
<a
href={proxyVideoUrl(currentScene.videoUrl) || currentScene.videoUrl}
target="_blank"
rel="noreferrer"
className="inline-flex items-center gap-2 px-4 py-2 rounded-lg bg-white/10 hover:bg-white/15 border border-white/10 text-sm"
>
<Download size={14} />
Open
</a>
</div>
)}
{/* Top-right overlay controls - glass style */}
<div className="absolute top-3 right-3 flex items-center gap-2 opacity-0 group-hover:opacity-100 transition-opacity duration-200">
{/* More options */}
<button
type="button"
className="w-9 h-9 rounded-full bg-black/40 backdrop-blur-md border border-white/10 flex items-center justify-center text-white/70 hover:text-white hover:bg-black/60 transition-all disabled:opacity-50 disabled:cursor-not-allowed"
title="More options"
onClick={() => {
setEditingScene(currentScene);
setEditNarration(currentScene.narration);
setEditImagePrompt(currentScene.imagePrompt);
setEditNegativePrompt(currentScene.negativePrompt || '');
setShowSceneEditor(true);
}}
>
<MoreHorizontal size={16} />
</button>
{/* Regenerate video */}
<button
type="button"
onClick={() => generateVideoForScene(currentScene.id, currentScene.imageUrl!, currentScene.imagePrompt)}
disabled={isGeneratingVideo}
className="w-9 h-9 rounded-full bg-black/40 backdrop-blur-md border border-white/10 flex items-center justify-center text-white/70 hover:text-white hover:bg-black/60 transition-all disabled:opacity-50 disabled:cursor-not-allowed"
title="Regenerate video"
>
<RefreshCw size={16} className={isGeneratingVideo ? 'animate-spin' : ''} />
</button>
{/* Remove video */}
<button
type="button"
onClick={() => removeVideoForScene(currentScene.id)}
disabled={isGeneratingVideo}
className="w-9 h-9 rounded-full bg-red-500/30 backdrop-blur-md border border-red-500/30 flex items-center justify-center text-red-200 hover:text-white hover:bg-red-500/50 transition-all disabled:opacity-50 disabled:cursor-not-allowed"
title="Remove video (keep image)"
>
<X size={16} />
</button>
</div>
{/* Generating video overlay */}
{isGeneratingVideo && (
<div className="absolute inset-0 flex items-center justify-center bg-black/70 backdrop-blur-sm rounded-xl">
<div className="flex flex-col items-center gap-3">
<Loader2 size={36} className="text-cyan-400 animate-spin" />
<span className="text-white/70 text-sm">Generating video...</span>
</div>
</div>
)}
</div>
) : currentScene?.imageUrl ? (
/* Image Preview - when scene has image but no video */
<div className="relative max-w-full max-h-full group">
<img
src={currentScene.imageUrl}
alt={`Scene ${currentSceneIndex + 1}`}
className="max-h-[calc(100vh-320px)] max-w-full object-contain rounded-xl shadow-2xl shadow-black/50 transition-all duration-500"
/>
{/* Top-right overlay controls - glass style */}
<div className="absolute top-3 right-3 flex items-center gap-2 opacity-0 group-hover:opacity-100 transition-opacity duration-200">
{/* More options */}
<button
type="button"
className="w-9 h-9 rounded-full bg-black/40 backdrop-blur-md border border-white/10 flex items-center justify-center text-white/70 hover:text-white hover:bg-black/60 transition-all disabled:opacity-50 disabled:cursor-not-allowed"
title="Edit scene"
onClick={() => {
setEditingScene(currentScene);
setEditNarration(currentScene.narration);
setEditImagePrompt(currentScene.imagePrompt);
setEditNegativePrompt(currentScene.negativePrompt || '');
setShowSceneEditor(true);
}}
>
<MoreHorizontal size={16} />
</button>
{/* Regenerate image */}
<button
type="button"
onClick={() => generateImageForScene(currentScene.id, currentScene.imagePrompt, true)}
disabled={isGeneratingImage || isGeneratingVideo}
className="w-9 h-9 rounded-full bg-black/40 backdrop-blur-md border border-white/10 flex items-center justify-center text-white/70 hover:text-white hover:bg-black/60 transition-all disabled:opacity-50 disabled:cursor-not-allowed"
title="Regenerate image"
>
<RefreshCw size={16} className={isGeneratingImage ? 'animate-spin' : ''} />
</button>
</div>
{/* Bottom-right Make Video CTA - only show if project wants video and scene has no video yet */}
{projectWantsVideo && !currentScene.videoUrl && !isGeneratingImage && !isGeneratingVideo && (
<div className="absolute bottom-4 right-4 opacity-0 group-hover:opacity-100 transition-opacity duration-200">
<button
type="button"
onClick={() => generateVideoForScene(currentScene.id, currentScene.imageUrl!, currentScene.imagePrompt)}
className="flex items-center gap-2 px-4 py-2 bg-cyan-500/90 backdrop-blur-md border border-cyan-400/30 rounded-full text-white text-sm font-medium hover:bg-cyan-500 transition-all shadow-lg shadow-cyan-500/20"
title="Convert to video"
>
<Film size={14} />
Make Video
</button>
</div>
)}
{/* Generating image overlay */}
{isGeneratingImage && (
<div className="absolute inset-0 flex items-center justify-center bg-black/70 backdrop-blur-sm rounded-xl">
<div className="flex flex-col items-center gap-3">
<Loader2 size={36} className="text-cyan-400 animate-spin" />
<span className="text-white/70 text-sm">Generating image...</span>
</div>
</div>
)}
{/* Generating video overlay */}
{isGeneratingVideo && (
<div className="absolute inset-0 flex items-center justify-center bg-black/70 backdrop-blur-sm rounded-xl">
<div className="flex flex-col items-center gap-3">
<Loader2 size={36} className="text-cyan-400 animate-spin" />
<span className="text-white/70 text-sm">Converting to video...</span>
<span className="text-white/40 text-xs">This may take a few minutes</span>
</div>
</div>
)}
</div>
) : (
/* Empty state when no image */
<div className="flex flex-col items-center justify-center text-center p-8">
{isGeneratingImage ? (
<>
<Loader2 size={48} className="text-cyan-400 animate-spin mb-4" />
<p className="text-white/60 text-sm">Generating image...</p>
{currentScene?.imagePrompt && (
<p className="text-white/30 text-xs mt-2 max-w-md line-clamp-2">{currentScene.imagePrompt}</p>
)}
</>
) : (
<>
<div className="w-20 h-20 rounded-2xl bg-white/5 border border-white/10 flex items-center justify-center mb-4">
<ImageIcon size={32} className="text-white/20" />
</div>
<p className="text-white/40 text-sm mb-4">No image for this scene</p>
<button
onClick={() => currentScene && generateImageForScene(currentScene.id, currentScene.imagePrompt)}
className="flex items-center gap-2 px-5 py-2.5 bg-cyan-500 hover:bg-cyan-400 rounded-full text-white text-sm font-medium transition-colors"
type="button"
>
Generate Image
</button>
</>
)}
</div>
)}
</div>
{/* Narration subtitle overlay */}
{currentScene?.narration && (
<div className="absolute bottom-8 left-0 right-0 flex justify-center px-8 pointer-events-none">
<div className="bg-black/80 backdrop-blur-md px-6 py-4 rounded-xl max-w-3xl shadow-xl border border-white/5">
<p className="text-base md:text-lg text-white leading-relaxed text-center">
{currentScene.narration}
</p>
</div>
</div>
)}
</div>
{/* Action Bar - Like Play Story */}
<div className="border-t border-white/5 bg-black/60 backdrop-blur-md">
<div className="max-w-4xl mx-auto px-4 py-4">
<div className="flex items-center justify-between gap-4">
{/* Left: Playback controls */}
<div className="flex items-center gap-2">
<button
onClick={() => setCurrentSceneIndex((i) => Math.max(0, i - 1))}
disabled={currentSceneIndex === 0}
className="p-3 text-white/40 hover:text-white hover:bg-white/5 rounded-full transition-all disabled:opacity-30 disabled:cursor-not-allowed"
type="button"
title="Previous scene"
>
<SkipBack size={20} />
</button>
<button
onClick={() => setIsPlaying(!isPlaying)}
className="p-4 bg-cyan-500 hover:bg-cyan-400 rounded-full transition-all shadow-lg shadow-cyan-500/25"
type="button"
title={isPlaying ? "Pause" : "Play"}
>
{isPlaying ? <Pause size={24} /> : <Play size={24} fill="currentColor" />}
</button>
<button
onClick={() => setCurrentSceneIndex((i) => Math.min(scenes.length - 1, i + 1))}
disabled={currentSceneIndex >= scenes.length - 1}
className="p-3 text-white/40 hover:text-white hover:bg-white/5 rounded-full transition-all disabled:opacity-30 disabled:cursor-not-allowed"
type="button"
title="Next scene"
>
<SkipForward size={20} />
</button>
</div>
{/* Center: Scene progress bar */}
<div className="flex-1 mx-4 hidden sm:block">
<div className="flex gap-1">
{scenes.map((_, i) => (
<button
key={i}
onClick={() => setCurrentSceneIndex(i)}
className={`flex-1 h-1.5 rounded-full transition-all ${
i === currentSceneIndex
? 'bg-cyan-400'
: i < currentSceneIndex
? 'bg-white/30'
: 'bg-white/10'
}`}
type="button"
title={`Scene ${i + 1}`}
/>
))}
</div>
</div>
{/* Right: Actions */}
<div className="flex items-center gap-2">
{/* Edit Scene Button */}
<button
onClick={() => currentScene && openSceneEditor(currentScene)}
disabled={!currentScene}
className="flex items-center gap-2 px-4 py-2 bg-white/5 hover:bg-white/10 border border-white/10 rounded-full text-sm transition-all disabled:opacity-40"
title="Edit scene"
>
<Edit3 size={14} />
<span className="hidden sm:inline">Edit</span>
</button>
{/* Generate Next Scene */}
<button
onClick={generateNextScene}
disabled={isGeneratingScene}
className="flex items-center gap-2 px-4 py-2 bg-cyan-500/20 hover:bg-cyan-500/30 text-cyan-300 border border-cyan-500/30 rounded-full text-sm transition-all disabled:opacity-50"
title="Generate next scene"
>
{isGeneratingScene ? (
<>
<Loader2 size={14} className="animate-spin" />
<span className="hidden sm:inline">Generating...</span>
</>
) : (
<>
<Plus size={14} />
<span className="hidden sm:inline">Next Scene</span>
</>
)}
</button>
{/* TV Mode */}
<button
onClick={handleEnterTVMode}
disabled={scenes.length === 0}
className="flex items-center gap-2 px-4 py-2 bg-gradient-to-r from-cyan-500/20 to-blue-500/20 hover:from-cyan-500/30 hover:to-blue-500/30 text-cyan-300 border border-cyan-500/30 rounded-full text-sm transition-all disabled:opacity-40"
type="button"
title="Watch in TV Mode"
>
<Monitor size={14} />
<span className="hidden sm:inline">TV Mode</span>
</button>
</div>
</div>
</div>
</div>
</div>
)}
{/* Scene Editor Modal */}
{showSceneEditor && editingScene && (
<div className="fixed inset-0 z-50 flex items-center justify-center p-4">
<div className="absolute inset-0 bg-black/80 backdrop-blur-sm" onClick={() => setShowSceneEditor(false)} />
<div className="relative w-full max-w-2xl rounded-2xl border border-white/10 bg-[#0f0f18] shadow-2xl max-h-[90vh] overflow-y-auto">
{/* Modal Header */}
<div className="flex items-center justify-between p-5 border-b border-white/10">
<div className="flex items-center gap-3">
<div className="w-10 h-10 rounded-xl bg-cyan-500/20 flex items-center justify-center">
<Edit3 size={18} className="text-cyan-400" />
</div>
<div>
<h2 className="text-lg font-semibold">Edit Scene {editingScene.idx + 1}</h2>
<p className="text-xs text-white/40">Update narration and prompts</p>
</div>
</div>
<button
onClick={() => setShowSceneEditor(false)}
className="p-2 rounded-lg hover:bg-white/5 transition-colors"
>
<X size={18} className="text-white/40" />
</button>
</div>
{/* Modal Content */}
<div className="p-5 space-y-5">
{/* Narration */}
<div>
<label className="flex items-center gap-2 text-sm font-medium text-white/70 mb-2">
<FileText size={14} />
Narration
</label>
<textarea
value={editNarration}
onChange={(e) => setEditNarration(e.target.value)}
className="w-full px-4 py-3 bg-black/40 border border-white/10 rounded-xl text-white placeholder-white/30 focus:border-cyan-500/50 focus:ring-1 focus:ring-cyan-500/25 focus:outline-none resize-none transition-all"
rows={3}
placeholder="Enter narration text for this scene..."
/>
</div>
{/* Image Prompt */}
<div>
<label className="flex items-center gap-2 text-sm font-medium text-white/70 mb-2">
<Sparkles size={14} />
Image Prompt
</label>
<textarea
value={editImagePrompt}
onChange={(e) => setEditImagePrompt(e.target.value)}
className="w-full px-4 py-3 bg-black/40 border border-white/10 rounded-xl text-white placeholder-white/30 focus:border-cyan-500/50 focus:ring-1 focus:ring-cyan-500/25 focus:outline-none resize-none transition-all"
rows={4}
placeholder="Describe the visual elements for image generation..."
/>
</div>
{/* Negative Prompt */}
<div>
<label className="text-sm font-medium text-white/70 mb-2 block">
Negative Prompt
</label>
<textarea
value={editNegativePrompt}
onChange={(e) => setEditNegativePrompt(e.target.value)}
className="w-full px-4 py-3 bg-black/40 border border-white/10 rounded-xl text-white placeholder-white/30 focus:border-cyan-500/50 focus:ring-1 focus:ring-cyan-500/25 focus:outline-none resize-none transition-all"
rows={2}
placeholder="Elements to avoid in the image..."
/>
</div>
{/* Model Selection */}
<div className="grid grid-cols-2 gap-4">
<div>
<label className="flex items-center gap-2 text-sm font-medium text-white/70 mb-2">
<Settings size={14} />
LLM Model
</label>
<select
value={selectedLLMModel}
onChange={(e) => setSelectedLLMModel(e.target.value)}
className="w-full px-3 py-2.5 bg-black/40 border border-white/10 rounded-xl text-white focus:border-cyan-500/50 focus:outline-none transition-all"
>
<option value="">Default</option>
{availableLLMModels.map((m) => (
<option key={m.id} value={m.id}>{m.name}</option>
))}
</select>
</div>
<div>
<label className="flex items-center gap-2 text-sm font-medium text-white/70 mb-2">
<ImageIcon size={14} />
Image Model
</label>
<select
value={selectedImageModel}
onChange={(e) => setSelectedImageModel(e.target.value)}
className="w-full px-3 py-2.5 bg-black/40 border border-white/10 rounded-xl text-white focus:border-cyan-500/50 focus:outline-none transition-all"
>
<option value="">Default</option>
{availableImageModels.map((m) => (
<option key={m.id} value={m.id}>{m.name}</option>
))}
</select>
</div>
</div>
</div>
{/* Modal Footer */}
<div className="flex items-center justify-between p-5 border-t border-white/10">
<div className="flex items-center gap-2">
{/* Regenerate Image button */}
<button
onClick={() => {
generateImageForScene(editingScene.id, editImagePrompt, true);
setShowSceneEditor(false);
}}
disabled={isGeneratingImage || isGeneratingVideo}
className="flex items-center gap-2 px-4 py-2 bg-white/5 hover:bg-white/10 border border-white/10 rounded-xl text-sm transition-all disabled:opacity-50"
>
<RefreshCw size={14} />
Regenerate Image
</button>
{/* Make Video / Regenerate Video button */}
{projectWantsVideo && editingScene.imageUrl && (
<div className="flex items-center gap-2">
<button
onClick={() => {
generateVideoForScene(editingScene.id, editingScene.imageUrl!, editImagePrompt);
setShowSceneEditor(false);
}}
disabled={isGeneratingImage || isGeneratingVideo}
className={[
"flex items-center gap-2 px-4 py-2 rounded-xl text-sm transition-all disabled:opacity-50",
editingScene.videoUrl
? "bg-white/5 hover:bg-white/10 border border-white/10"
: "bg-cyan-500/20 hover:bg-cyan-500/30 border border-cyan-500/30 text-cyan-300"
].join(" ")}
>
<Film size={14} />
{editingScene.videoUrl ? 'Regenerate Video' : 'Make Video'}
</button>
{editingScene.videoUrl && (
<button
onClick={() => {
removeVideoForScene(editingScene.id);
setShowSceneEditor(false);
}}
disabled={isGeneratingImage || isGeneratingVideo}
className="flex items-center gap-2 px-4 py-2 bg-red-500/15 hover:bg-red-500/25 border border-red-500/25 rounded-xl text-sm text-red-200 transition-all disabled:opacity-50"
title="Remove video and keep image"
>
<X size={14} />
Remove Video
</button>
)}
</div>
)}
</div>
<div className="flex items-center gap-3">
<button
onClick={() => setShowSceneEditor(false)}
className="px-4 py-2 text-sm text-white/50 hover:text-white transition-colors"
>
Cancel
</button>
<button
onClick={saveSceneEdits}
disabled={isSavingScene}
className="flex items-center gap-2 px-5 py-2.5 bg-cyan-500 hover:bg-cyan-400 disabled:opacity-50 rounded-xl text-sm font-medium transition-all"
>
{isSavingScene ? (
<>
<Loader2 size={14} className="animate-spin" />
Saving...
</>
) : (
<>
<Save size={14} />
Save Changes
</>
)}
</button>
</div>
</div>
</div>
</div>
)}
{/* Story Outline Panel */}
{showOutlinePanel && (
<div className="fixed inset-0 z-50 flex items-center justify-center p-4">
<div className="absolute inset-0 bg-black/80 backdrop-blur-sm" onClick={() => setShowOutlinePanel(false)} />
<div className="relative w-full max-w-3xl rounded-2xl border border-white/10 bg-[#0f0f18] shadow-2xl max-h-[90vh] overflow-y-auto">
{/* Panel Header */}
<div className="flex items-center justify-between p-5 border-b border-white/10">
<div className="flex items-center gap-3">
<div className="w-10 h-10 rounded-xl bg-cyan-500/20 flex items-center justify-center">
<Wand2 size={18} className="text-cyan-400" />
</div>
<div>
<h2 className="text-lg font-semibold">Story Outline</h2>
<p className="text-xs text-white/40">AI-powered story structure</p>
</div>
</div>
<div className="flex items-center gap-3">
<button
onClick={generateStoryOutline}
disabled={isGeneratingOutline}
className="flex items-center gap-2 px-4 py-2 bg-cyan-500 hover:bg-cyan-400 disabled:opacity-50 rounded-xl text-sm font-medium transition-all"
>
{isGeneratingOutline ? (
<>
<Loader2 size={14} className="animate-spin" />
Generating...
</>
) : (
<>
<Sparkles size={14} />
{storyOutline ? "Regenerate" : "Generate"}
</>
)}
</button>
<button
onClick={() => setShowOutlinePanel(false)}
className="p-2 rounded-lg hover:bg-white/5 transition-colors"
>
<X size={18} className="text-white/40" />
</button>
</div>
</div>
{/* Panel Content */}
<div className="p-5">
{storyOutline ? (
<div className="space-y-6">
{/* Story Arc */}
<div className="p-4 bg-black/30 rounded-xl border border-white/5">
<h3 className="text-sm font-semibold text-cyan-400 mb-3">Story Arc</h3>
<div className="grid grid-cols-5 gap-2 text-xs">
{['beginning', 'rising_action', 'climax', 'falling_action', 'resolution'].map((key, i) => (
<div key={key} className="p-2.5 bg-white/5 rounded-lg">
<div className="text-white/40 mb-1 capitalize">{['Beginning', 'Rising', 'Climax', 'Falling', 'Resolution'][i]}</div>
<div className="text-white/80">{(storyOutline.story_arc as any)?.[key] || "—"}</div>
</div>
))}
</div>
</div>
{/* Scene Outlines */}
<div>
<h3 className="text-sm font-semibold text-white/70 mb-3">
Scene Outlines ({storyOutline.scenes?.length || 0} scenes)
</h3>
<div className="space-y-3">
{storyOutline.scenes?.map((scene, idx) => {
const alreadyGenerated = scenes.length > idx;
return (
<div
key={idx}
className={`p-4 rounded-xl border transition-all ${
alreadyGenerated
? "bg-emerald-500/10 border-emerald-500/30"
: "bg-black/30 border-white/10 hover:border-white/20"
}`}
>
<div className="flex items-start justify-between gap-4">
<div className="flex-1">
<div className="flex items-center gap-2 mb-1">
<span className="text-sm font-medium text-white">
Scene {scene.scene_number}: {scene.title}
</span>
{alreadyGenerated && (
<span className="text-xs px-2 py-0.5 bg-emerald-500/20 text-emerald-400 rounded-full">
Generated
</span>
)}
</div>
<p className="text-sm text-white/50 mb-2">{scene.description}</p>
<p className="text-xs text-white/30 italic">"{scene.narration}"</p>
</div>
{!alreadyGenerated && scenes.length === idx && (
<button
onClick={() => {
generateSceneFromOutline(idx);
setShowOutlinePanel(false);
}}
disabled={isGeneratingScene}
className="flex items-center gap-1.5 px-3 py-1.5 bg-cyan-500 hover:bg-cyan-400 disabled:opacity-50 rounded-lg text-xs font-medium transition-all"
>
<Plus size={12} />
Generate
</button>
)}
</div>
</div>
);
})}
</div>
</div>
</div>
) : (
<div className="text-center py-16">
<div className="w-20 h-20 mx-auto rounded-2xl bg-white/5 border border-white/10 flex items-center justify-center mb-6">
<Wand2 size={32} className="text-white/20" />
</div>
<h3 className="text-lg font-medium text-white mb-2">No Outline Yet</h3>
<p className="text-white/40 mb-8 max-w-md mx-auto">
Generate an AI-powered story outline based on your project settings.
This creates a complete story arc with scene-by-scene planning.
</p>
<button
onClick={generateStoryOutline}
disabled={isGeneratingOutline}
className="inline-flex items-center gap-3 px-8 py-4 bg-gradient-to-r from-cyan-500 to-blue-500 hover:from-cyan-400 hover:to-blue-400 disabled:opacity-50 rounded-2xl font-semibold shadow-lg shadow-cyan-500/25 transition-all"
>
{isGeneratingOutline ? (
<>
<Loader2 size={20} className="animate-spin" />
Generating Outline...
</>
) : (
<>
<Sparkles size={20} />
Generate Story Outline
</>
)}
</button>
</div>
)}
</div>
</div>
</div>
)}
{/* Project Settings Modal */}
{showSettingsModal && (
<div className="fixed inset-0 z-50 flex items-center justify-center p-4">
<div className="absolute inset-0 bg-black/80 backdrop-blur-sm" onClick={() => setShowSettingsModal(false)} />
<div className="relative w-full max-w-2xl rounded-2xl border border-white/10 bg-[#0f0f18] shadow-2xl max-h-[90vh] overflow-y-auto">
{/* Modal Header */}
<div className="flex items-center justify-between p-5 border-b border-white/10 sticky top-0 bg-[#0f0f18] z-10">
<div className="flex items-center gap-3">
<div className="w-10 h-10 rounded-xl bg-cyan-500/20 flex items-center justify-center">
<Settings size={18} className="text-cyan-400" />
</div>
<div>
<h2 className="text-lg font-semibold">Project Settings</h2>
<p className="text-xs text-white/40">Customize your project configuration</p>
</div>
</div>
<button
onClick={() => setShowSettingsModal(false)}
className="p-2 rounded-lg hover:bg-white/5 transition-colors"
>
<X size={18} className="text-white/40" />
</button>
</div>
{/* Modal Content */}
<div className="p-5 space-y-6">
{/* Title & Description */}
<div className="space-y-4">
<h3 className="text-sm font-semibold text-cyan-400 flex items-center gap-2">
<FileText size={14} />
Details
</h3>
<div>
<label className="block text-xs font-medium text-white/50 mb-2">Title</label>
<input
type="text"
value={settingsTitle}
onChange={(e) => setSettingsTitle(e.target.value)}
className="w-full px-4 py-3 bg-black/40 border border-white/10 rounded-xl text-white placeholder-white/30 focus:border-cyan-500/50 focus:ring-1 focus:ring-cyan-500/25 focus:outline-none transition-all"
placeholder="Project title"
/>
</div>
<div>
<label className="block text-xs font-medium text-white/50 mb-2">Description</label>
<textarea
value={settingsLogline}
onChange={(e) => setSettingsLogline(e.target.value)}
className="w-full px-4 py-3 bg-black/40 border border-white/10 rounded-xl text-white placeholder-white/30 focus:border-cyan-500/50 focus:ring-1 focus:ring-cyan-500/25 focus:outline-none resize-none transition-all"
rows={2}
placeholder="A short description of your project..."
/>
</div>
</div>
{/* Format */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400 flex items-center gap-2">
<Tv size={14} />
Format
</h3>
<div className="grid grid-cols-3 gap-3">
<button
onClick={() => setSettingsPlatform("youtube_16_9")}
className={`p-4 rounded-xl border text-center transition-all ${
settingsPlatform === "youtube_16_9"
? "border-cyan-500 bg-cyan-500/10"
: "border-white/10 bg-white/5 hover:bg-white/10"
}`}
>
<Tv size={24} className={`mx-auto mb-2 ${settingsPlatform === "youtube_16_9" ? "text-cyan-400" : "text-white/40"}`} />
<div className="text-sm font-medium">YouTube Video</div>
<div className="text-xs text-white/40">16:9 Landscape</div>
</button>
<button
onClick={() => setSettingsPlatform("shorts_9_16")}
className={`p-4 rounded-xl border text-center transition-all ${
settingsPlatform === "shorts_9_16"
? "border-cyan-500 bg-cyan-500/10"
: "border-white/10 bg-white/5 hover:bg-white/10"
}`}
>
<Smartphone size={24} className={`mx-auto mb-2 ${settingsPlatform === "shorts_9_16" ? "text-cyan-400" : "text-white/40"}`} />
<div className="text-sm font-medium">YouTube Short</div>
<div className="text-xs text-white/40">9:16 Vertical</div>
</button>
<button
onClick={() => setSettingsPlatform("slides_16_9")}
className={`p-4 rounded-xl border text-center transition-all ${
settingsPlatform === "slides_16_9"
? "border-cyan-500 bg-cyan-500/10"
: "border-white/10 bg-white/5 hover:bg-white/10"
}`}
>
<Presentation size={24} className={`mx-auto mb-2 ${settingsPlatform === "slides_16_9" ? "text-cyan-400" : "text-white/40"}`} />
<div className="text-sm font-medium">Slides</div>
<div className="text-xs text-white/40">16:9 Presentation</div>
</button>
</div>
</div>
{/* Image Generation Model */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400 flex items-center gap-2">
<ImageIcon size={14} />
Image Generation
</h3>
<div className="p-4 rounded-xl border border-white/10 bg-white/5">
<label className="block text-xs font-medium text-white/50 mb-2">Image Model</label>
<div className="relative">
<select
value={settingsImageModel}
onChange={(e) => setSettingsImageModel(e.target.value)}
className="w-full px-4 py-3 bg-black/40 border border-white/10 rounded-xl text-white focus:border-cyan-500/50 focus:ring-1 focus:ring-cyan-500/25 focus:outline-none transition-all appearance-none cursor-pointer"
disabled={loadingModels}
>
{loadingModels ? (
<option value="">Loading models...</option>
) : availableImageModels.length === 0 ? (
<option value="">No models available</option>
) : (
availableImageModels.map((m) => (
<option key={m.id} value={m.id}>{m.name}</option>
))
)}
</select>
<ChevronDown className="absolute right-3 top-1/2 -translate-y-1/2 w-4 h-4 text-white/40 pointer-events-none" />
</div>
<p className="text-xs text-white/30 mt-2">Override the global image model for this project</p>
</div>
</div>
{/* Video Generation Toggle */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400 flex items-center gap-2">
<Film size={14} />
Video Generation
</h3>
<div
className={`p-4 rounded-xl border transition-all ${
settingsEnableVideo
? "border-cyan-500 bg-cyan-500/10"
: "border-white/10 bg-white/5 hover:bg-white/10 cursor-pointer"
}`}
onClick={(e) => {
// Only toggle if clicking on the toggle area, not on child elements like select
if ((e.target as HTMLElement).tagName !== 'SELECT') {
if (!settingsEnableVideo) setSettingsEnableVideo(true);
}
}}
>
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<div className={`w-10 h-10 rounded-lg flex items-center justify-center ${
settingsEnableVideo ? "bg-cyan-500/30" : "bg-white/10"
}`}>
<Film size={18} className={settingsEnableVideo ? "text-cyan-400" : "text-white/40"} />
</div>
<div>
<div className="text-sm font-medium">Enable Video Generation</div>
<div className="text-xs text-white/40">
{settingsEnableVideo
? "Make Video button visible for each scene"
: "Scenes will be static images only"
}
</div>
</div>
</div>
<div className={`relative w-12 h-6 rounded-full transition-colors ${
settingsEnableVideo ? "bg-cyan-500" : "bg-white/20"
}`}>
<div className={`absolute top-1 w-4 h-4 rounded-full bg-white transition-transform ${
settingsEnableVideo ? "translate-x-7" : "translate-x-1"
}`} />
</div>
</div>
{settingsEnableVideo && settingsPlatform === "slides_16_9" && (
<div className="mt-3 p-2 bg-amber-500/10 border border-amber-500/30 rounded-lg text-xs text-amber-400">
Tip: Slides with video will create animated clips instead of static images with Ken Burns effect.
</div>
)}
{/* Video Model Selector - shown when video is enabled */}
{settingsEnableVideo && (
<div className="mt-4 pt-4 border-t border-white/10" onClick={(e) => e.stopPropagation()}>
<label className="block text-xs font-medium text-white/50 mb-2">Video Model</label>
<div className="relative">
<select
value={settingsVideoModel}
onChange={(e) => setSettingsVideoModel(e.target.value)}
className="w-full px-4 py-3 bg-black/40 border border-white/10 rounded-xl text-white focus:border-cyan-500/50 focus:ring-1 focus:ring-cyan-500/25 focus:outline-none transition-all appearance-none cursor-pointer"
disabled={loadingModels}
>
{loadingModels ? (
<option value="">Loading models...</option>
) : availableVideoModels.length === 0 ? (
<option value="">No video models available</option>
) : (
availableVideoModels.map((m) => (
<option key={m.id} value={m.id}>{m.name}</option>
))
)}
</select>
<ChevronDown className="absolute right-3 top-1/2 -translate-y-1/2 w-4 h-4 text-white/40 pointer-events-none" />
</div>
<p className="text-xs text-white/30 mt-2">Override the global video model for this project</p>
</div>
)}
{/* Toggle OFF button when video is enabled */}
{settingsEnableVideo && (
<button
type="button"
onClick={(e) => { e.stopPropagation(); setSettingsEnableVideo(false); }}
className="mt-4 w-full px-4 py-2 text-xs text-white/50 hover:text-white/70 border border-white/10 rounded-lg hover:bg-white/5 transition-all"
>
Disable Video Generation
</button>
)}
</div>
</div>
{/* Intent */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400">Intent</h3>
<div className="flex gap-2 flex-wrap">
{(["Entertain", "Educate", "Inspire"] as const).map((g) => (
<button
key={g}
onClick={() => setSettingsGoal(g)}
className={`px-4 py-2 rounded-full text-sm border transition-all ${
settingsGoal === g
? "bg-white text-black font-medium border-transparent"
: "bg-white/5 text-white/60 border-white/10 hover:bg-white/10 hover:text-white"
}`}
>
{g}
</button>
))}
</div>
</div>
{/* Episode Configuration */}
<div className="grid grid-cols-2 gap-4">
<div>
<label className="block text-xs font-medium text-white/50 mb-2">Scenes per Episode</label>
<div className="flex gap-2 flex-wrap">
{[4, 6, 8, 10, 12].map((count) => (
<button
key={count}
onClick={() => setSettingsSceneCount(count)}
className={`px-4 py-2 rounded-lg text-sm transition-all ${
settingsSceneCount === count
? "bg-cyan-500 text-white font-medium"
: "bg-white/5 text-white/60 hover:bg-white/10"
}`}
>
{count}
</button>
))}
</div>
<p className="text-xs text-white/30 mt-1">
~{settingsSceneCount * settingsSceneDuration}s total
</p>
</div>
<div>
<label className="block text-xs font-medium text-white/50 mb-2">Scene Duration</label>
<div className="flex gap-2 flex-wrap">
{[3, 5, 7, 10].map((dur) => (
<button
key={dur}
onClick={() => setSettingsSceneDuration(dur)}
className={`px-4 py-2 rounded-lg text-sm transition-all ${
settingsSceneDuration === dur
? "bg-cyan-500 text-white font-medium"
: "bg-white/5 text-white/60 hover:bg-white/10"
}`}
>
{dur}s
</button>
))}
</div>
</div>
</div>
{/* Visual Style */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400 flex items-center gap-2">
<Palette size={14} />
Visual Style
</h3>
<div className="grid grid-cols-3 gap-3">
<button
onClick={() => setSettingsVisualStyle("Cinematic")}
className={`p-3 rounded-xl border text-center transition-all ${
settingsVisualStyle === "Cinematic"
? "border-cyan-500 bg-cyan-500/10"
: "border-white/10 bg-white/5 hover:bg-white/10"
}`}
>
<Camera size={20} className={`mx-auto mb-1.5 ${settingsVisualStyle === "Cinematic" ? "text-cyan-400" : "text-white/40"}`} />
<div className="text-sm font-medium">Cinematic</div>
<div className="text-xs text-white/40">High fidelity</div>
</button>
<button
onClick={() => setSettingsVisualStyle("Digital Art")}
className={`p-3 rounded-xl border text-center transition-all ${
settingsVisualStyle === "Digital Art"
? "border-cyan-500 bg-cyan-500/10"
: "border-white/10 bg-white/5 hover:bg-white/10"
}`}
>
<Palette size={20} className={`mx-auto mb-1.5 ${settingsVisualStyle === "Digital Art" ? "text-cyan-400" : "text-white/40"}`} />
<div className="text-sm font-medium">Digital Art</div>
<div className="text-xs text-white/40">Stylized</div>
</button>
<button
onClick={() => setSettingsVisualStyle("Anime")}
className={`p-3 rounded-xl border text-center transition-all ${
settingsVisualStyle === "Anime"
? "border-cyan-500 bg-cyan-500/10"
: "border-white/10 bg-white/5 hover:bg-white/10"
}`}
>
<Star size={20} className={`mx-auto mb-1.5 ${settingsVisualStyle === "Anime" ? "text-cyan-400" : "text-white/40"}`} />
<div className="text-sm font-medium">Anime</div>
<div className="text-xs text-white/40">Japanese style</div>
</button>
</div>
</div>
{/* Mood & Tone */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400">Mood & Tone</h3>
<div className="flex gap-2 flex-wrap">
{["Documentary", "Dramatic", "Calm", "Upbeat", "Dark"].map((t) => (
<button
key={t}
onClick={() => toggleSettingsTone(t)}
className={`px-4 py-2 rounded-full text-sm border transition-all ${
settingsTones.includes(t)
? "bg-white text-black font-medium border-transparent"
: "bg-white/5 text-white/60 border-white/10 hover:bg-white/10 hover:text-white"
}`}
>
{t}
</button>
))}
</div>
</div>
{/* AI Story Model */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400 flex items-center gap-2">
<Sparkles size={14} />
AI Story Model
</h3>
<p className="text-xs text-white/40">Select the AI model for generating outlines and narration</p>
<select
value={settingsLLMModel}
onChange={(e) => setSettingsLLMModel(e.target.value)}
className="w-full px-4 py-3 bg-black/40 border border-white/10 rounded-xl text-white focus:border-cyan-500/50 focus:outline-none transition-all appearance-none cursor-pointer"
>
{availableLLMModels.length === 0 ? (
<option value="">No models available</option>
) : (
availableLLMModels.map((m) => (
<option key={m.id} value={m.id}>
{m.name}
</option>
))
)}
</select>
{availableLLMModels.length === 0 && (
<p className="text-xs text-red-400">
No Ollama models found. Make sure Ollama is running with at least one model.
</p>
)}
</div>
{/* Checks */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400 flex items-center gap-2">
<Shield size={14} />
Checks
</h3>
<div className="bg-black/30 rounded-xl border border-white/10 divide-y divide-white/10">
<label className="flex items-center justify-between p-4 cursor-pointer hover:bg-white/5 transition-colors">
<div className="flex items-center gap-3">
<Lock size={16} className="text-white/40" />
<div>
<div className="text-sm font-medium">Consistency Lock</div>
<div className="text-xs text-white/40">Keep characters stable across scenes</div>
</div>
</div>
<input
type="checkbox"
checked={settingsLockIdentity}
onChange={(e) => setSettingsLockIdentity(e.target.checked)}
className="w-5 h-5 accent-cyan-500"
/>
</label>
<label className="flex items-center justify-between p-4 cursor-pointer hover:bg-white/5 transition-colors">
<div className="flex items-center gap-3">
<Shield size={16} className="text-white/40" />
<div>
<div className="text-sm font-medium">Safe for Work (SFW)</div>
<div className="text-xs text-white/40">Filter explicit content</div>
</div>
</div>
<input
type="checkbox"
checked={settingsContentRating === "sfw"}
onChange={(e) => setSettingsContentRating(e.target.checked ? "sfw" : "mature")}
className="w-5 h-5 accent-cyan-500"
/>
</label>
</div>
{settingsContentRating === "mature" && (
<div className="p-3 bg-purple-500/10 border border-purple-500/30 rounded-xl text-sm">
<span className="font-medium text-purple-400">Mature Mode Enabled</span>
<span className="text-white/50 ml-2">- This project may generate explicit content.</span>
</div>
)}
</div>
{/* Generation Parameters */}
<div className="space-y-3">
<h3 className="text-sm font-semibold text-cyan-400 flex items-center gap-2">
<Sliders size={14} />
Generation Parameters
</h3>
<p className="text-xs text-white/40">
Customize generation defaults for images & videos. These override model presets.
</p>
<CreatorStudioSettings value={genParams} onChange={setGenParams} />
</div>
</div>
{/* Modal Footer */}
<div className="flex items-center justify-end gap-3 p-5 border-t border-white/10 sticky bottom-0 bg-[#0f0f18]">
<button
onClick={() => setShowSettingsModal(false)}
className="px-4 py-2 text-sm text-white/50 hover:text-white transition-colors"
>
Cancel
</button>
<button
onClick={saveProjectSettings}
disabled={isSavingSettings || !settingsTitle.trim()}
className="flex items-center gap-2 px-5 py-2.5 bg-cyan-500 hover:bg-cyan-400 disabled:opacity-50 rounded-xl text-sm font-medium transition-all"
>
{isSavingSettings ? (
<>
<Loader2 size={14} className="animate-spin" />
Saving...
</>
) : (
<>
<Save size={14} />
Save Settings
</>
)}
</button>
</div>
</div>
</div>
)}
{/* TV Mode Overlay */}
{tvModeActive && (
<TVModeContainer
onGenerateNext={generateNextForTVMode}
onEnsureImage={ensureImageForTVMode}
onSyncOutline={syncOutlineWithScenes}
/>
)}
{/* Custom scrollbar hide */}
<style>{`
.scrollbar-hide::-webkit-scrollbar { display: none; }
.scrollbar-hide { -ms-overflow-style: none; scrollbar-width: none; }
.line-clamp-2 {
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
overflow: hidden;
}
`}</style>
</div>
);
}
export default CreatorStudioEditor;