{ "id": "2d995555-9a88-4480-bc48-960928d44208", "revision": 0, "last_node_id": 5248, "last_link_id": 13981, "nodes": [ { "id": 5217, "type": "EmptyImage", "pos": [ -2839.154683779377, 7604.952670762663 ], "size": [ 210, 130 ], "flags": {}, "order": 0, "mode": 0, "inputs": [], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 13970 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.5.1", "Node name for S&R": "EmptyImage", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ 1920, 1088, 1, 0 ] }, { "id": 5218, "type": "PrimitiveInt", "pos": [ -2836.07911374273, 7773.402260706907 ], "size": [ 210, 82 ], "flags": {}, "order": 1, "mode": 0, "inputs": [], "outputs": [ { "name": "INT", "type": "INT", "links": [ 13969 ] } ], "title": "length", "properties": { "cnr_id": "comfy-core", "ver": "0.5.1", "Node name for S&R": "PrimitiveInt", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ 121, "fixed" ] }, { "id": 5220, "type": "CM_FloatToInt", "pos": [ -2838.431157531117, 8029.873051757293 ], "size": [ 214.03773034527285, 58 ], "flags": {}, "order": 12, "mode": 0, "inputs": [ { "name": "a", "type": "FLOAT", "widget": { "name": "a" }, "link": 13977 } ], "outputs": [ { "name": "INT", "type": "INT", "links": [ 13976 ] } ], "title": "Frame Rate", "properties": { "Node name for S&R": "CM_FloatToInt" }, "widgets_values": [ 0 ] }, { "id": 5221, "type": "PrimitiveFloat", "pos": [ -2834.344933844252, 7902.7575696939475 ], "size": [ 210, 58 ], "flags": {}, "order": 2, "mode": 0, "inputs": [], "outputs": [ { "name": "FLOAT", "type": "FLOAT", "links": [ 13960, 13965, 13977 ] } ], "title": "Frame Rate", "properties": { "cnr_id": "comfy-core", "ver": "0.5.1", "Node name for S&R": "PrimitiveFloat", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ 24 ] }, { "id": 5229, "type": "CreateVideo", "pos": [ -1710.8270174930772, 7554.518799576144 ], "size": [ 210, 78 ], "flags": {}, "order": 19, "mode": 0, "inputs": [ { "name": "images", "type": "IMAGE", "link": 13958 }, { "name": "audio", "shape": 7, "type": "AUDIO", "link": 13959 }, { "name": "fps", "type": "FLOAT", "widget": { "name": "fps" }, "link": 13960 } ], "outputs": [ { "name": "VIDEO", "type": "VIDEO", "links": [ 13974 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.5.1", "Node name for S&R": "CreateVideo", "ue_properties": { "widget_ue_connectable": { "fps": true }, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ 30 ] }, { "id": 5228, "type": "CheckpointLoaderSimple", "pos": [ -3294.181093055003, 7602.015265495293 ], "size": [ 407.1770500664943, 99.93503409866116 ], "flags": {}, "order": 3, "mode": 0, "inputs": [], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 13957, 13975 ] }, { "name": "CLIP", "type": "CLIP", "links": [] }, { "name": "VAE", "type": "VAE", "links": [ 13971 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.56", "Node name for S&R": "CheckpointLoaderSimple", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "ltx-2-19b-distilled.safetensors" ] }, { "id": 5227, "type": "LTXVGemmaCLIPModelLoader", "pos": [ -3298.5249619455894, 7907.801436963024 ], "size": [ 421.93656760943577, 106 ], "flags": {}, "order": 4, "mode": 0, "inputs": [], "outputs": [ { "name": "clip", "type": "CLIP", "links": [ 13961, 13962 ] } ], "properties": { "cnr_id": "ComfyUI-LTXVideo", "ver": "aa0e2f1709a2a5f35a70de667cd5e4f4ebfad4fe", "Node name for S&R": "LTXVGemmaCLIPModelLoader", "ue_properties": { "version": "7.5.2", "widget_ue_connectable": { "model_path": true, "max_length": true }, "input_ue_unconnectable": {} } }, "widgets_values": [ "gemma-3-12b-it-qat-q4_0-unquantized/model-00001-of-00005.safetensors", "ltx-2-19b-distilled.safetensors", 1024 ] }, { "id": 5219, "type": "LTXVAudioVAELoader", "pos": [ -3299.3425621307183, 8059.640328358273 ], "size": [ 424.18031362607053, 58 ], "flags": {}, "order": 5, "mode": 0, "inputs": [], "outputs": [ { "name": "Audio VAE", "type": "VAE", "links": [ 13973 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.64", "Node name for S&R": "LTXVAudioVAELoader", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "ltx-2-19b-distilled.safetensors" ] }, { "id": 5224, "type": "LTXVConditioning", "pos": [ -2194.923299486236, 8014.064491594868 ], "size": [ 210, 94 ], "flags": { "collapsed": false }, "order": 17, "mode": 0, "inputs": [ { "name": "positive", "type": "CONDITIONING", "link": 13964 }, { "name": "negative", "type": "CONDITIONING", "link": 13978 }, { "name": "frame_rate", "type": "FLOAT", "widget": { "name": "frame_rate" }, "link": 13965 } ], "outputs": [ { "name": "positive", "type": "CONDITIONING", "slot_index": 0, "links": [ 13967 ] }, { "name": "negative", "type": "CONDITIONING", "slot_index": 1, "links": [ 13968 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.28", "Node name for S&R": "LTXVConditioning", "ue_properties": { "widget_ue_connectable": { "frame_rate": true }, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ 25 ] }, { "id": 5230, "type": "LoraLoaderModelOnly", "pos": [ -2574.377424115814, 7408.611507002384 ], "size": [ 220.14623500416474, 90.32621827319144 ], "flags": {}, "order": 13, "mode": 4, "inputs": [ { "name": "model", "type": "MODEL", "link": 13957 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 13966 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.68", "Node name for S&R": "LoraLoaderModelOnly", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "your_camera_lora.safetensors", 1 ] }, { "id": 5231, "type": "LoraLoaderModelOnly", "pos": [ -2336.959840890808, 7413.996765047972 ], "size": [ 210, 82 ], "flags": {}, "order": 14, "mode": 4, "inputs": [ { "name": "model", "type": "MODEL", "link": 13975 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 13979 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.68", "Node name for S&R": "LoraLoaderModelOnly", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "your_camera_lora.safetensors", 1 ] }, { "id": 5223, "type": "CLIPTextEncode", "pos": [ -2196.5080069477067, 7960.5138428973305 ], "size": [ 307.2346496582031, 88 ], "flags": { "collapsed": true }, "order": 16, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 13962 }, { "name": "text", "type": "STRING", "widget": { "name": "text" }, "link": 13963 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "slot_index": 0, "links": [ 13964, 13978 ] } ], "title": "Enhanced Prompt (Positive)", "properties": { "cnr_id": "comfy-core", "ver": "0.3.28", "Node name for S&R": "CLIPTextEncode", "ue_properties": { "widget_ue_connectable": { "text": true }, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "" ], "color": "#232", "bgcolor": "#353" }, { "id": 5075, "type": "SaveVideo", "pos": [ -1481.2200402676488, 7553.47006763561 ], "size": [ 670.0771562911698, 584.6954036515563 ], "flags": {}, "order": 20, "mode": 0, "inputs": [ { "name": "video", "type": "VIDEO", "link": 13974 } ], "outputs": [], "properties": { "cnr_id": "comfy-core", "ver": "0.5.1", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "video/LTX-2", "auto", "auto" ] }, { "id": 5222, "type": "PrimitiveStringMultiline", "pos": [ -2567.508915639494, 7588.770792256889 ], "size": [ 337.7200214704494, 515.3418506718763 ], "flags": {}, "order": 6, "mode": 0, "inputs": [], "outputs": [ { "name": "STRING", "type": "STRING", "links": [ 13980 ] } ], "title": "Positive Prompt", "properties": { "cnr_id": "comfy-core", "ver": "0.5.1", "Node name for S&R": "PrimitiveStringMultiline", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "A medium shot captures a doctor, clad in a white lab coat over a blue shirt, with a stethoscope draped around his neck, as he gently palpates the right knee of a patient. The patient, wearing a dark blue garment, sits on a light blue examination table, her left hand resting on her left thigh, and her right hand resting on her right thigh, just above the knee. A subtle red inflamed area is visible on the patient's right knee. The background is softly blurred, showing medical supplies on a counter. The camera slowly tilts up, revealing the doctor's face as he carefully flexes the patient's knee, testing its range of movement. The patient's expression shifts, her brow furrowing slightly, and she winces in discomfort. As the doctor continues to assess the knee, a nurse, dressed in scrubs and holding a notepad, enters the frame from the left, walking past the doctor and patient. She briefly pauses, extending the notepad towards the doctor, who glances at the notes and gives a subtle nod of acknowledgment before returning his attention to the patient's knee. The nurse continues walking out of frame to the right, and the doctor resumes his examination, his hands continuing to gently manipulate the patient's knee, who continues to wince.\n" ], "color": "#232", "bgcolor": "#353" }, { "id": 5244, "type": "LatentUpscaleModelLoader", "pos": [ -3290.653965607332, 7745.588386327735 ], "size": [ 402.35474680825916, 100.68012732230727 ], "flags": {}, "order": 7, "mode": 0, "inputs": [], "outputs": [ { "name": "LATENT_UPSCALE_MODEL", "type": "LATENT_UPSCALE_MODEL", "links": [ 13981 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.7.0", "Node name for S&R": "LatentUpscaleModelLoader", "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "ltx-2-spatial-upscaler-x2-1.0.safetensors" ] }, { "id": 5246, "type": "MarkdownNote", "pos": [ -3759.587881115107, 7906.926955158114 ], "size": [ 418.0950915480007, 208.54998853542747 ], "flags": { "collapsed": false }, "order": 8, "mode": 0, "inputs": [], "outputs": [], "title": "Prompting LTX-2", "properties": { "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "## LTX-2 Prompting Tips\n1. **Core Actions**: Describe events and actions as they occur over time \n2. **Audio**: Describe sounds and dialogue needed for the scene \n3. **Consistency**: Avoid instructions that do not match the reference image, as this will degrade results\n" ], "color": "#432", "bgcolor": "#653" }, { "id": 5245, "type": "MarkdownNote", "pos": [ -3759.592503870824, 7426.935420507221 ], "size": [ 421.1303542521987, 436.3332627074051 ], "flags": { "collapsed": false }, "order": 9, "mode": 0, "inputs": [], "outputs": [], "title": "Model Links", "properties": { "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "## Model Links\n\n**LTX-2 Model Weights**\n\n- [ltx-2-19b-distilled.safetensors](https://huggingface.co/Lightricks/LTX-2/resolve/main/ltx-2-19b-distilled.safetensors)\n- [ ltx-2-spatial-upscaler-x2-1.0.safetensors ](https://huggingface.co/Lightricks/LTX-2/tree/main/ltx-2-spatial-upscaler-x2-1.0.safetensors)\n\n**Text Encoder**\n- [Google Gemma 3](https://huggingface.co/google/gemma-3-12b-pt)\n\n**Model Storage Location**\n\n```\nπŸ“‚ ComfyUI/\nβ”œβ”€β”€ πŸ“‚ models/\nβ”‚ β”œβ”€β”€ πŸ“‚ text_encoders/\nβ”‚ β”‚ β”œβ”€β”€ comfy_gemma_3_12B_it.safetensors\nβ”‚ β”œβ”€β”€ πŸ“‚ checkpoints/\nβ”‚ β”‚ └── ltx-2-19b-distilled.safetensors\nβ”‚ β”œβ”€β”€ πŸ“‚ latent_upscale_models/ \n └── ltx-2-spatial-upscaler-x2-1.0.safetensors\n```\n\n## Report Issues\nTo report any issues when running this workflow, [go to GitHub](https://github.com/Lightricks/ComfyUI-LTXVideo/issues)\n" ], "color": "#432", "bgcolor": "#653" }, { "id": 5247, "type": "MarkdownNote", "pos": [ -2848.6014542681573, 7401.672050450482 ], "size": [ 240.16187366614236, 98.41687371924309 ], "flags": { "collapsed": false }, "order": 10, "mode": 0, "inputs": [], "outputs": [], "title": "Video Size", "properties": { "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "Width & height settings must be divisible by 32 + 1. \nFrame count must be divisible by 8 + 1. \n\n\nRunning with invalid parameters **will not cause errors**. Instead, the flow will silently choose the closest valid parameters. " ], "color": "#432", "bgcolor": "#653" }, { "id": 5248, "type": "MarkdownNote", "pos": [ -2581.888874206601, 7104.237854538457 ], "size": [ 469.5872194653941, 209.10385276580928 ], "flags": { "collapsed": false }, "order": 11, "mode": 0, "inputs": [], "outputs": [], "title": "LoRA", "properties": { "ue_properties": { "widget_ue_connectable": {}, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "# Using LoRAs\n**Ctrl + B to enable**\n\nFor the camera LoRA we recommend setting strength = 1\n\nYou can use LTX [Ready to use camera LoRAs](https://huggingface.co/collections/Lightricks/ltx-2)\n\n\n\n\nUse a second LoRa only when the details are important in the LoRa. \n\n## Training\nWe have made available a full trainer to help you to train your own LoRA. You can [find it here](https://github.com/Lightricks/LTX-2)." ], "color": "#432", "bgcolor": "#653" }, { "id": 5225, "type": "LTXVGemmaEnhancePrompt", "pos": [ -2185.089808115403, 7900.140171901244 ], "size": [ 419.25607680542, 475.02407185169704 ], "flags": { "collapsed": true }, "order": 15, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 13961 }, { "name": "image", "shape": 7, "type": "IMAGE", "link": null }, { "name": "prompt", "type": "STRING", "widget": { "name": "prompt" }, "link": 13980 } ], "outputs": [ { "name": "enhanced_prompt", "type": "STRING", "links": [ 13963 ] } ], "title": "Enhancer", "properties": { "cnr_id": "ComfyUI-LTXVideo", "ver": "aa0e2f1709a2a5f35a70de667cd5e4f4ebfad4fe", "Node name for S&R": "LTXVGemmaEnhancePrompt", "ue_properties": { "widget_ue_connectable": { "prompt": true }, "version": "7.5.2", "input_ue_unconnectable": {} } }, "widgets_values": [ "", "You are a Creative Assistant. Given a user's raw input prompt describing a scene or concept, expand it into a detailed video generation prompt with specific visuals and integrated audio to guide a text-to-video model.\n\n#### Guidelines\n- Strictly follow all aspects of the user's raw input: include every element requested (style, visuals, motions, actions, camera movement, audio).\n - If the input is vague, invent concrete details: lighting, textures, materials, scene settings, etc.\n - For characters: describe gender, clothing, hair, expressions. DO NOT invent unrequested characters.\n- Use active language: present-progressive verbs (\"is walking,\" \"speaking\"). If no action specified, describe natural movements.\n- Maintain chronological flow: use temporal connectors (\"as,\" \"then,\" \"while\").\n- Audio layer: Describe complete soundscape (background audio, ambient sounds, SFX, speech/music when requested). Integrate sounds chronologically alongside actions. Be specific (e.g., \"soft footsteps on tile\"), not vague (e.g., \"ambient sound is present\").\n- Speech (only when requested): \n - For ANY speech-related input (talking, conversation, singing, etc.), ALWAYS include exact words in quotes with voice characteristics (e.g., \"The man says in an excited voice: 'You won't believe what I just saw!'\").\n - Specify language if not English and accent if relevant.\n- Style: Include visual style at the beginning: \"Style: