{"repo_name": "dia", "file_name": "/dia/cli.py", "inference_info": {"prefix_code": "import argparse\nimport os\nimport random\n\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # Ensure deterministic behavior for cuDNN (if used)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\n", "suffix_code": "\n\n\nif __name__ == \"__main__\":\n main()\n", "middle_code": "def main():\n parser = argparse.ArgumentParser(description=\"Generate audio using the Dia model.\")\n parser.add_argument(\"text\", type=str, help=\"Input text for speech generation.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"Path to save the generated audio file (e.g., output.wav).\"\n )\n parser.add_argument(\n \"--repo-id\",\n type=str,\n default=\"nari-labs/Dia-1.6B-0626\",\n help=\"Hugging Face repository ID (e.g., nari-labs/Dia-1.6B-0626).\",\n )\n parser.add_argument(\n \"--local-paths\", action=\"store_true\", help=\"Load model from local config and checkpoint files.\"\n )\n parser.add_argument(\n \"--config\", type=str, help=\"Path to local config.json file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--checkpoint\", type=str, help=\"Path to local model checkpoint .pth file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--audio-prompt\", type=str, default=None, help=\"Path to an optional audio prompt WAV file for voice cloning.\"\n )\n gen_group = parser.add_argument_group(\"Generation Parameters\")\n gen_group.add_argument(\n \"--max-tokens\",\n type=int,\n default=None,\n help=\"Maximum number of audio tokens to generate (defaults to config value).\",\n )\n gen_group.add_argument(\n \"--cfg-scale\", type=float, default=3.0, help=\"Classifier-Free Guidance scale (default: 3.0).\"\n )\n gen_group.add_argument(\n \"--temperature\", type=float, default=1.3, help=\"Sampling temperature (higher is more random, default: 0.7).\"\n )\n gen_group.add_argument(\"--top-p\", type=float, default=0.95, help=\"Nucleus sampling probability (default: 0.95).\")\n infra_group = parser.add_argument_group(\"Infrastructure\")\n infra_group.add_argument(\"--seed\", type=int, default=None, help=\"Random seed for reproducibility.\")\n infra_group.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to run inference on (e.g., 'cuda', 'cpu', default: auto).\",\n )\n args = parser.parse_args()\n if args.local_paths:\n if not args.config:\n parser.error(\"--config is required when --local-paths is set.\")\n if not args.checkpoint:\n parser.error(\"--checkpoint is required when --local-paths is set.\")\n if not os.path.exists(args.config):\n parser.error(f\"Config file not found: {args.config}\")\n if not os.path.exists(args.checkpoint):\n parser.error(f\"Checkpoint file not found: {args.checkpoint}\")\n if args.seed is not None:\n set_seed(args.seed)\n print(f\"Using user-selected seed: {args.seed}\")\n device = torch.device(args.device)\n print(f\"Using device: {device}\")\n print(\"Loading model...\")\n if args.local_paths:\n print(f\"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'\")\n try:\n model = Dia.from_local(args.config, args.checkpoint, device=device)\n except Exception as e:\n print(f\"Error loading local model: {e}\")\n exit(1)\n else:\n print(f\"Loading from Hugging Face Hub: repo_id='{args.repo_id}'\")\n try:\n model = Dia.from_pretrained(args.repo_id, device=device)\n except Exception as e:\n print(f\"Error loading model from Hub: {e}\")\n exit(1)\n print(\"Model loaded.\")\n print(\"Generating audio...\")\n try:\n sample_rate = 44100 \n output_audio = model.generate(\n text=args.text,\n audio_prompt=args.audio_prompt,\n max_tokens=args.max_tokens,\n cfg_scale=args.cfg_scale,\n temperature=args.temperature,\n top_p=args.top_p,\n )\n print(\"Audio generation complete.\")\n print(f\"Saving audio to {args.output}...\")\n os.makedirs(os.path.dirname(args.output) or \".\", exist_ok=True)\n sf.write(args.output, output_audio, sample_rate)\n print(f\"Audio successfully saved to {args.output}\")\n except Exception as e:\n print(f\"Error during audio generation or saving: {e}\")\n exit(1)", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/dia/app.py", "import argparse\nimport contextlib\nimport io\nimport random\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport gradio as gr\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\n# --- Global Setup ---\nparser = argparse.ArgumentParser(description=\"Gradio interface for Nari TTS\")\nparser.add_argument(\"--device\", type=str, default=None, help=\"Force device (e.g., 'cuda', 'mps', 'cpu')\")\nparser.add_argument(\"--share\", action=\"store_true\", help=\"Enable Gradio sharing\")\n\nargs = parser.parse_args()\n\n\n# Determine device\nif args.device:\n device = torch.device(args.device)\nelif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n# Simplified MPS check for broader compatibility\nelif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n # Basic check is usually sufficient, detailed check can be problematic\n device = torch.device(\"mps\")\nelse:\n device = torch.device(\"cpu\")\n\nprint(f\"Using device: {device}\")\n\n# Load Nari model and config\nprint(\"Loading Nari model...\")\ntry:\n dtype_map = {\n \"cpu\": \"float32\",\n \"mps\": \"float32\", # Apple M series – better with float32\n \"cuda\": \"float16\", # NVIDIA – better with float16\n }\n\n dtype = dtype_map.get(device.type, \"float16\")\n print(f\"Using device: {device}, attempting to load model with {dtype}\")\n model = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=dtype, device=device)\nexcept Exception as e:\n print(f\"Error loading Nari model: {e}\")\n raise\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef run_inference(\n text_input: str,\n audio_prompt_text_input: str,\n audio_prompt_input: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: Optional[int] = None,\n):\n \"\"\"\n Runs Nari inference using the globally loaded model and provided inputs.\n Uses temporary files for text and audio prompt compatibility with inference.generate.\n \"\"\"\n global model, device # Access global model, config, device\n console_output_buffer = io.StringIO()\n\n with contextlib.redirect_stdout(console_output_buffer):\n # Prepend transcript text if audio_prompt provided\n if audio_prompt_input and audio_prompt_text_input and not audio_prompt_text_input.isspace():\n text_input = audio_prompt_text_input + \"\\n\" + text_input\n text_input = text_input.strip()\n\n if audio_prompt_input and (not audio_prompt_text_input or audio_prompt_text_input.isspace()):\n raise gr.Error(\"Audio Prompt Text input cannot be empty.\")\n\n if not text_input or text_input.isspace():\n raise gr.Error(\"Text input cannot be empty.\")\n\n # Preprocess Audio\n temp_txt_file_path = None\n temp_audio_prompt_path = None\n output_audio = (44100, np.zeros(1, dtype=np.float32))\n\n try:\n prompt_path_for_generate = None\n if audio_prompt_input is not None:\n sr, audio_data = audio_prompt_input\n # Check if audio_data is valid\n if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: # Check for silence/empty\n gr.Warning(\"Audio prompt seems empty or silent, ignoring prompt.\")\n else:\n # Save prompt audio to a temporary WAV file\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".wav\", delete=False) as f_audio:\n temp_audio_prompt_path = f_audio.name # Store path for cleanup\n\n # Basic audio preprocessing for consistency\n # Convert to float32 in [-1, 1] range if integer type\n if np.issubdtype(audio_data.dtype, np.integer):\n max_val = np.iinfo(audio_data.dtype).max\n audio_data = audio_data.astype(np.float32) / max_val\n elif not np.issubdtype(audio_data.dtype, np.floating):\n gr.Warning(f\"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.\")\n # Attempt conversion, might fail for complex types\n try:\n audio_data = audio_data.astype(np.float32)\n except Exception as conv_e:\n raise gr.Error(f\"Failed to convert audio prompt to float32: {conv_e}\")\n\n # Ensure mono (average channels if stereo)\n if audio_data.ndim > 1:\n if audio_data.shape[0] == 2: # Assume (2, N)\n audio_data = np.mean(audio_data, axis=0)\n elif audio_data.shape[1] == 2: # Assume (N, 2)\n audio_data = np.mean(audio_data, axis=1)\n else:\n gr.Warning(\n f\"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis.\"\n )\n audio_data = (\n audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]\n )\n audio_data = np.ascontiguousarray(audio_data) # Ensure contiguous after slicing/mean\n\n # Write using soundfile\n try:\n sf.write(\n temp_audio_prompt_path, audio_data, sr, subtype=\"FLOAT\"\n ) # Explicitly use FLOAT subtype\n prompt_path_for_generate = temp_audio_prompt_path\n print(f\"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})\")\n except Exception as write_e:\n print(f\"Error writing temporary audio file: {write_e}\")\n raise gr.Error(f\"Failed to save audio prompt: {write_e}\")\n\n # Set and Display Generation Seed\n if seed is None or seed < 0:\n seed = random.randint(0, 2**32 - 1)\n print(f\"\\nNo seed provided, generated random seed: {seed}\\n\")\n else:\n print(f\"\\nUsing user-selected seed: {seed}\\n\")\n set_seed(seed)\n\n # Run Generation\n print(f'Generating speech: \\n\"{text_input}\"\\n')\n\n start_time = time.time()\n\n # Use torch.inference_mode() context manager for the generation call\n with torch.inference_mode():\n output_audio_np = model.generate(\n text_input,\n max_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k, # Pass the value here\n use_torch_compile=False, # Keep False for Gradio stability\n audio_prompt=prompt_path_for_generate,\n verbose=True,\n )\n\n end_time = time.time()\n print(f\"Generation finished in {end_time - start_time:.2f} seconds.\\n\")\n\n # 4. Convert Codes to Audio\n if output_audio_np is not None:\n # Get sample rate from the loaded DAC model\n output_sr = 44100\n\n # --- Slow down audio ---\n original_len = len(output_audio_np)\n # Ensure speed_factor is positive and not excessively small/large to avoid issues\n speed_factor = max(0.1, min(speed_factor, 5.0))\n target_len = int(original_len / speed_factor) # Target length based on speed_factor\n if target_len != original_len and target_len > 0: # Only interpolate if length changes and is valid\n x_original = np.arange(original_len)\n x_resampled = np.linspace(0, original_len - 1, target_len)\n resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)\n output_audio = (\n output_sr,\n resampled_audio_np.astype(np.float32),\n ) # Use resampled audio\n print(\n f\"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.\"\n )\n else:\n output_audio = (\n output_sr,\n output_audio_np,\n ) # Keep original if calculation fails or no change\n print(f\"Skipping audio speed adjustment (factor: {speed_factor:.2f}).\")\n # --- End slowdown ---\n\n print(f\"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}\")\n\n # Explicitly convert to int16 to prevent Gradio warning\n if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:\n audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)\n audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)\n output_audio = (output_sr, audio_for_gradio)\n print(\"Converted audio to int16 for Gradio output.\")\n\n else:\n print(\"\\nGeneration finished, but no valid tokens were produced.\")\n # Return default silence\n gr.Warning(\"Generation produced no output.\")\n\n except Exception as e:\n print(f\"Error during inference: {e}\")\n import traceback\n\n traceback.print_exc()\n # Re-raise as Gradio error to display nicely in the UI\n raise gr.Error(f\"Inference failed: {e}\")\n\n finally:\n # Cleanup Temporary Files defensively\n if temp_txt_file_path and Path(temp_txt_file_path).exists():\n try:\n Path(temp_txt_file_path).unlink()\n print(f\"Deleted temporary text file: {temp_txt_file_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}\")\n if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():\n try:\n Path(temp_audio_prompt_path).unlink()\n print(f\"Deleted temporary audio prompt file: {temp_audio_prompt_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}\")\n\n # After generation, capture the printed output\n console_output = console_output_buffer.getvalue()\n\n return output_audio, seed, console_output\n\n\n# --- Create Gradio Interface ---\ncss = \"\"\"\n#col-container {max-width: 90%; margin-left: auto; margin-right: auto;}\n\"\"\"\n# Attempt to load default text from example.txt\ndefault_text = \"[S1] Dia is an open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] Wow. Amazing. (laughs) \\n[S2] Try it now on Git hub or Hugging Face.\"\nexample_txt_path = Path(\"./example.txt\")\nif example_txt_path.exists():\n try:\n default_text = example_txt_path.read_text(encoding=\"utf-8\").strip()\n if not default_text: # Handle empty example file\n default_text = \"Example text file was empty.\"\n except Exception as e:\n print(f\"Warning: Could not read example.txt: {e}\")\n\n\n# Build Gradio UI\nwith gr.Blocks(css=css, theme=\"gradio/dark\") as demo:\n gr.Markdown(\"# Nari Text-to-Speech Synthesis\")\n\n with gr.Row(equal_height=False):\n with gr.Column(scale=1):\n with gr.Accordion(\"Audio Reference Prompt (Optional)\", open=False):\n audio_prompt_input = gr.Audio(\n label=\"Audio Prompt (Optional)\",\n show_label=True,\n sources=[\"upload\", \"microphone\"],\n type=\"numpy\",\n )\n audio_prompt_text_input = gr.Textbox(\n label=\"Transcript of Audio Prompt (Required if using Audio Prompt)\",\n placeholder=\"Enter text here...\",\n value=\"\",\n lines=5, # Increased lines\n )\n text_input = gr.Textbox(\n label=\"Text To Generate\",\n placeholder=\"Enter text here...\",\n value=default_text,\n lines=5, # Increased lines\n )\n with gr.Accordion(\"Generation Parameters\", open=False):\n max_new_tokens = gr.Slider(\n label=\"Max New Tokens (Audio Length)\",\n minimum=860,\n maximum=3072,\n value=model.config.decoder_config.max_position_embeddings, # Use config default if available, else fallback\n step=50,\n info=\"Controls the maximum length of the generated audio (more tokens = longer audio).\",\n )\n cfg_scale = gr.Slider(\n label=\"CFG Scale (Guidance Strength)\",\n minimum=1.0,\n maximum=5.0,\n value=3.0, # Default from inference.py\n step=0.1,\n info=\"Higher values increase adherence to the text prompt.\",\n )\n temperature = gr.Slider(\n label=\"Temperature (Randomness)\",\n minimum=1.0,\n maximum=2.5,\n value=1.8, # Default from inference.py\n step=0.05,\n info=\"Lower values make the output more deterministic, higher values increase randomness.\",\n )\n top_p = gr.Slider(\n label=\"Top P (Nucleus Sampling)\",\n minimum=0.70,\n maximum=1.0,\n value=0.95, # Default from inference.py\n step=0.01,\n info=\"Filters vocabulary to the most likely tokens cumulatively reaching probability P.\",\n )\n cfg_filter_top_k = gr.Slider(\n label=\"CFG Filter Top K\",\n minimum=15,\n maximum=100,\n value=45,\n step=1,\n info=\"Top k filter for CFG guidance.\",\n )\n speed_factor_slider = gr.Slider(\n label=\"Speed Factor\",\n minimum=0.8,\n maximum=1.0,\n value=1.0,\n step=0.02,\n info=\"Adjusts the speed of the generated audio (1.0 = original speed).\",\n )\n seed_input = gr.Number(\n label=\"Generation Seed (Optional)\",\n value=-1,\n precision=0, # No decimal points\n step=1,\n interactive=True,\n info=\"Set a generation seed for reproducible outputs. Leave empty or -1 for random seed.\",\n )\n\n run_button = gr.Button(\"Generate Audio\", variant=\"primary\")\n\n with gr.Column(scale=1):\n audio_output = gr.Audio(\n label=\"Generated Audio\",\n type=\"numpy\",\n autoplay=False,\n )\n seed_output = gr.Textbox(label=\"Generation Seed\", interactive=False)\n console_output = gr.Textbox(label=\"Console Output Log\", lines=10, interactive=False)\n\n # Link button click to function\n run_button.click(\n fn=run_inference,\n inputs=[\n text_input,\n audio_prompt_text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[\n audio_output,\n seed_output,\n console_output,\n ], # Add status_output here if using it\n api_name=\"generate_audio\",\n )\n\n # Add examples (ensure the prompt path is correct or remove it if example file doesn't exist)\n example_prompt_path = \"./example_prompt.mp3\" # Adjust if needed\n examples_list = [\n [\n \"[S1] Oh fire! Oh my goodness! What's the procedure? What to we do people? The smoke could be coming through an air duct! \\n[S2] Oh my god! Okay.. it's happening. Everybody stay calm! \\n[S1] What's the procedure... \\n[S2] Everybody stay fucking calm!!!... Everybody fucking calm down!!!!! \\n[S1] No! No! If you touch the handle, if its hot there might be a fire down the hallway! \",\n None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n [\n \"[S1] Open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] I'm biased, but I think we clearly won. \\n[S2] Hard to disagree. (laughs) \\n[S1] Thanks for listening to this demo. \\n[S2] Try it now on Git hub and Hugging Face. \\n[S1] If you liked our model, please give us a star and share to your friends. \\n[S2] This was Nari Labs.\",\n example_prompt_path if Path(example_prompt_path).exists() else None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n ]\n\n if examples_list:\n gr.Examples(\n examples=examples_list,\n inputs=[\n text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[audio_output],\n fn=run_inference,\n cache_examples=False,\n label=\"Examples (Click to Run)\",\n )\n else:\n gr.Markdown(\"_(No examples configured or example prompt file missing)_\")\n\n# --- Launch the App ---\nif __name__ == \"__main__\":\n print(\"Launching Gradio interface...\")\n\n # set `GRADIO_SERVER_NAME`, `GRADIO_SERVER_PORT` env vars to override default values\n # use `GRADIO_SERVER_NAME=0.0.0.0` for Docker\n demo.launch(share=args.share)\n"], ["/dia/dia/model.py", "import time\nfrom enum import Enum\nfrom typing import Callable\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchaudio\n\nfrom .audio import apply_audio_delay, build_delay_indices, build_revert_indices, revert_audio_delay\nfrom .config import DiaConfig\nfrom .layers import DiaModel\nfrom .state import DecoderInferenceState, DecoderOutput, EncoderInferenceState\n\n\nDEFAULT_SAMPLE_RATE = 44100\nSAMPLE_RATE_RATIO = 512\n\n\ndef _get_default_device():\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n elif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef _sample_next_token(\n logits_BCxV: torch.Tensor,\n temperature: float,\n top_p: float,\n top_k: int | None,\n audio_eos_value: int,\n) -> torch.Tensor:\n if temperature == 0.0:\n return torch.argmax(logits_BCxV, dim=-1)\n\n logits_BCxV = logits_BCxV / temperature\n\n if audio_eos_value is not None and audio_eos_value >= 0:\n top_logit_indices_BC = torch.argmax(logits_BCxV, dim=-1)\n eos_not_highest_mask_BC = top_logit_indices_BC != audio_eos_value\n mask_eos_unless_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_unless_highest_BCxV[eos_not_highest_mask_BC, audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_unless_highest_BCxV, -torch.inf)\n eos_highest_mask_BC = top_logit_indices_BC == audio_eos_value\n mask_eos_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_highest_BCxV[eos_highest_mask_BC, :audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_highest_BCxV, -torch.inf)\n\n if top_k is not None:\n _, top_k_indices_BCxV = torch.topk(logits_BCxV, k=top_k, dim=-1)\n mask = torch.ones_like(logits_BCxV, dtype=torch.bool)\n mask = mask.scatter(dim=-1, index=top_k_indices_BCxV, value=False)\n logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)\n\n if top_p < 1.0:\n probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)\n cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)\n\n sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p\n sorted_indices_to_remove_BCxV = torch.roll(sorted_indices_to_remove_BCxV, shifts=1, dims=-1)\n sorted_indices_to_remove_BCxV[..., 0] = torch.zeros_like(sorted_indices_to_remove_BCxV[..., 0])\n\n indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)\n indices_to_remove_BCxV = indices_to_remove_BCxV.scatter(\n dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV\n )\n logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)\n\n final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n\n sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)\n sampled_indices_C = sampled_indices_BC.squeeze(-1)\n return sampled_indices_C\n\n\nclass ComputeDtype(str, Enum):\n FLOAT32 = \"float32\"\n FLOAT16 = \"float16\"\n BFLOAT16 = \"bfloat16\"\n\n def to_dtype(self) -> torch.dtype:\n if self == ComputeDtype.FLOAT32:\n return torch.float32\n elif self == ComputeDtype.FLOAT16:\n return torch.float16\n elif self == ComputeDtype.BFLOAT16:\n return torch.bfloat16\n else:\n raise ValueError(f\"Unsupported compute dtype: {self}\")\n\n\nclass Dia:\n def __init__(\n self,\n config: DiaConfig,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ):\n \"\"\"Initializes the Dia model.\n\n Args:\n config: The configuration object for the model.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Raises:\n RuntimeError: If there is an error loading the DAC model.\n \"\"\"\n super().__init__()\n self.config = config\n self.device = device if device is not None else _get_default_device()\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n self.compute_dtype = compute_dtype.to_dtype()\n self.model: DiaModel = DiaModel(config, self.compute_dtype)\n self.dac_model = None\n self._compiled_step = None\n self.load_dac = load_dac\n\n if not self.load_dac:\n print(\"Warning: DAC model will not be loaded. This is not recommended.\")\n\n if torch.cuda.is_available():\n torch.backends.cuda.matmul.allow_tf32 = True\n\n @classmethod\n def from_local(\n cls,\n config_path: str,\n checkpoint_path: str,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from local configuration and checkpoint files.\n\n Args:\n config_path: Path to the configuration JSON file.\n checkpoint_path: Path to the model checkpoint (.pth) file.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If the config or checkpoint file is not found.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n config = DiaConfig.load(config_path)\n if config is None:\n raise FileNotFoundError(f\"Config file not found at {config_path}\")\n\n dia = cls(config, compute_dtype, device, load_dac)\n\n try:\n state_dict = torch.load(checkpoint_path, map_location=dia.device)\n dia.model.load_state_dict(state_dict)\n except FileNotFoundError:\n raise FileNotFoundError(f\"Checkpoint file not found at {checkpoint_path}\")\n except Exception as e:\n raise RuntimeError(f\"Error loading checkpoint from {checkpoint_path}\") from e\n\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n @classmethod\n def from_pretrained(\n cls,\n model_name: str = \"nari-labs/Dia-1.6B-0626\",\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from a Hugging Face Hub repository.\n\n Downloads the configuration and checkpoint files from the specified\n repository ID and then loads the model.\n\n Args:\n model_name: The Hugging Face Hub repository ID (e.g., \"nari-labs/Dia-1.6B-0626\").\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If config or checkpoint download/loading fails.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n\n # Load model directly using DiaModel's from_pretrained which handles HF download\n try:\n loaded_model = DiaModel.from_pretrained(model_name, compute_dtype=compute_dtype.to_dtype())\n except Exception as e:\n raise RuntimeError(f\"Error loading model from Hugging Face Hub ({model_name})\") from e\n\n config = loaded_model.config # Get config from the loaded model\n dia = cls(config, compute_dtype, device, load_dac)\n\n dia.model = loaded_model # Assign the already loaded model\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n def _load_dac_model(self):\n \"\"\"Loads the Descript Audio Codec (DAC) model.\n\n Downloads the DAC model if necessary and loads it onto the specified device.\n Sets the DAC model to evaluation mode.\n\n Raises:\n RuntimeError: If downloading or loading the DAC model fails.\n \"\"\"\n import dac\n\n try:\n dac_model_path = dac.utils.download()\n dac_model = dac.DAC.load(dac_model_path).to(self.device)\n dac_model.eval() # Ensure DAC is in eval mode\n except Exception as e:\n raise RuntimeError(\"Failed to load DAC model\") from e\n self.dac_model = dac_model\n\n def _encode_text(self, text: str) -> torch.Tensor:\n \"\"\"Encodes the input text string into a tensor of token IDs using byte-level encoding.\n\n Special tokens [S1] and [S2] are replaced by their byte values. The resulting\n sequence is truncated to the maximum configured text length.\n\n Args:\n text: The input text string.\n\n Returns:\n A tensor containing the encoded byte token IDs.\n \"\"\"\n max_len = self.config.encoder_config.max_position_embeddings\n\n byte_text = text.encode(\"utf-8\")\n # Replace special tokens with their byte values if needed by the specific tokenizer/config\n # Assuming byte values 1 and 2 are correct placeholders based on original code\n replaced_bytes = byte_text.replace(b\"[S1]\", b\"\\x01\").replace(b\"[S2]\", b\"\\x02\")\n text_tokens = list(replaced_bytes)\n return torch.tensor(\n text_tokens[:max_len],\n dtype=torch.long,\n device=self.device,\n )\n\n def _pad_text_input(self, text_tokens: list[torch.Tensor]) -> torch.Tensor:\n \"\"\"Pads the text input to the maximum length.\"\"\"\n text_pad_value = 0\n max_len = self.config.encoder_config.max_position_embeddings\n batch_size = len(text_tokens)\n\n src_tokens = torch.full(\n (batch_size, 1, max_len),\n fill_value=text_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n for i in range(batch_size):\n current_len = len(text_tokens[i])\n src_tokens[i, 0, :current_len] = text_tokens[i]\n return src_tokens\n\n def _prepare_audio_prompt(self, audio_prompts: list[torch.Tensor | None]) -> tuple[torch.Tensor, list[int]]:\n \"\"\"Prepares the audio prompt tensor for the decoder.\n\n Handles padding, adds the beginning-of-sequence (BOS) token, applies the\n delay pattern, and determines the number of prefill steps for each item\n in the batch.\n\n Args:\n audio_prompts: A list of audio prompt tensors (encoded DAC frames) or None.\n Each tensor should have shape [T, C].\n\n Returns:\n A tuple containing:\n - delayed_batch (torch.Tensor): The prepared audio prompt tensor with\n delays applied, shape [B, T_max_padded, C].\n - prefill_steps (list[int]): A list containing the number of valid\n tokens (including BOS) for each prompt in the batch.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n audio_bos_value = self.config.bos_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n batch_size = len(audio_prompts)\n\n max_len = max(p.shape[0] if p is not None else 0 for p in audio_prompts) + max_delay_pattern\n prefill_steps = []\n\n prefill = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=self.device,\n )\n\n prefill[:, 0, :] = audio_bos_value\n\n for i in range(batch_size):\n prompt = audio_prompts[i]\n if prompt is not None:\n prompt = prompt.to(device=self.device, dtype=torch.int)\n prefill[i, 1 : prompt.shape[0] + 1, :] = prompt\n prefill_steps.append(prompt.shape[0] + 1)\n else:\n prefill_steps.append(1)\n\n delay_precomp = build_delay_indices(\n B=batch_size,\n T=max_len,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n delayed_batch = apply_audio_delay(\n audio_BxTxC=prefill,\n pad_value=-1,\n bos_value=audio_bos_value,\n precomp=delay_precomp,\n )\n\n return delayed_batch, prefill_steps\n\n def _prepare_generation(\n self,\n text: torch.Tensor,\n audio_prompts: list[torch.Tensor | None],\n max_tokens: int | None = None,\n attn_fn: Callable = F.scaled_dot_product_attention,\n ):\n \"\"\"Initializes the model state for generation.\n\n Encodes the text input (conditional and unconditional), prepares the\n encoder and decoder states (including KV caches and cross-attention),\n prepares the audio prompt, and performs the initial decoder prefill steps\n based on the audio prompts.\n\n Args:\n text: The padded text input tensor, shape [B, 1, T_text].\n audio_prompts: A list of prepared audio prompt tensors or None.\n\n Returns:\n A tuple containing:\n - dec_state (DecoderInferenceState): The initialized decoder state.\n - dec_output (DecoderOutput): The initialized decoder output manager,\n containing the prefilled audio tokens.\n \"\"\"\n batch_size = text.shape[0]\n\n enc_input_uncond = torch.zeros_like(text)\n enc_input_cond = text\n stacked_inputs = torch.stack([enc_input_uncond, enc_input_cond], dim=1)\n enc_input = stacked_inputs.view(2 * batch_size, -1)\n\n enc_state = EncoderInferenceState.new(self.config, enc_input_cond)\n encoder_out = self.model.encoder(enc_input, enc_state)\n\n dec_cross_attn_cache = self.model.decoder.precompute_cross_attn_cache(encoder_out)\n dec_state = DecoderInferenceState.new(\n self.config,\n enc_state,\n encoder_out,\n dec_cross_attn_cache,\n self.compute_dtype,\n max_generation_length=max_tokens,\n )\n prefill, prefill_steps = self._prepare_audio_prompt(audio_prompts)\n\n dec_output = DecoderOutput.new(batch_size, self.config, self.device)\n dec_output.prefill(prefill, prefill_steps)\n\n dec_step = min(prefill_steps) - 1\n if dec_step > 0:\n dec_state.prepare_step(0, dec_step)\n tokens_BxTxC = dec_output.get_tokens_at(0, dec_step).repeat_interleave(2, dim=0)\n self.model.decoder.forward(tokens_BxTxC, dec_state)\n\n return dec_state, dec_output\n\n def _decoder_step(\n self,\n tokens_Bx1xC: torch.Tensor,\n dec_state: DecoderInferenceState,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n top_k: int,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"Performs a single step of the decoder inference.\n\n Takes the tokens from the previous step, runs them through the decoder\n (for both conditional and unconditional paths), applies classifier-free\n guidance (CFG), samples the next token using temperature, top-p, and top-k\n sampling, and applies constraints (e.g., preventing EOS in certain channels).\n\n Args:\n tokens_Bx1xC: The input tokens for the current step, shape [2*B, 1, C].\n Repeated for CFG (unconditional and conditional).\n dec_state: The current state of the decoder (KV caches, etc.).\n cfg_scale: The scale factor for classifier-free guidance.\n temperature: The temperature for sampling.\n top_p: The cumulative probability threshold for top-p sampling.\n top_k: The number of top logits to consider for top-k sampling.\n current_idx: The current generation step index.\n\n Returns:\n torch.Tensor: The sampled next tokens for each item in the batch,\n shape [B, C].\n \"\"\"\n B = tokens_Bx1xC.shape[0] // 2\n\n audio_eos_value = self.config.eos_token_id\n logits_Bx1xCxV = self.model.decoder.decode_step(tokens_Bx1xC, dec_state, current_idx)\n\n logits_last_2BxCxV = logits_Bx1xCxV[:, -1]\n logits_last_Bx2xCxV = logits_last_2BxCxV.view(B, 2, *logits_last_2BxCxV.shape[1:])\n\n uncond_logits_BxCxV = logits_last_Bx2xCxV[:, 0, :, :] # Shape [B, C, V]\n cond_logits_BxCxV = logits_last_Bx2xCxV[:, 1, :, :] # Shape [B, C, V]\n logits_BxCxV = cond_logits_BxCxV + cfg_scale * (cond_logits_BxCxV - uncond_logits_BxCxV)\n\n _, top_k_indices_BxCxk = torch.topk(logits_BxCxV, k=top_k, dim=-1)\n mask_BxCxV = torch.ones_like(logits_BxCxV, dtype=torch.bool)\n mask_BxCxV = mask_BxCxV.scatter(dim=-1, index=top_k_indices_BxCxk, value=False)\n logits_BxCxV = cond_logits_BxCxV.masked_fill(mask_BxCxV, -torch.inf)\n\n logits_BxCxV[:, :, audio_eos_value + 1 :] = torch.full_like(\n logits_BxCxV[:, :, audio_eos_value + 1 :],\n fill_value=-torch.inf,\n )\n logits_BxCxV[:, 1:, audio_eos_value:] = torch.full_like(\n logits_BxCxV[:, 1:, audio_eos_value:],\n fill_value=-torch.inf,\n )\n\n flat_logits_BCxV = logits_BxCxV.view(B * self.config.decoder_config.num_channels, -1)\n\n pred_BC = _sample_next_token(\n flat_logits_BCxV.float(),\n temperature=temperature,\n top_p=top_p,\n top_k=top_k,\n audio_eos_value=audio_eos_value,\n )\n\n pred_BxC = pred_BC.view(B, self.config.decoder_config.num_channels)\n return pred_BxC\n\n def _generate_output(self, generated_codes: torch.Tensor, lengths_Bx: torch.Tensor) -> list[np.ndarray]:\n \"\"\"Converts generated delayed codes into audio waveforms.\n\n Reverts the delay pattern applied during generation, decodes the resulting\n codebook using the DAC model (if loaded), and returns a list of audio\n waveforms as NumPy arrays. If DAC is not loaded, returns the raw codebook indices.\n\n Args:\n generated_codes: The tensor of generated audio codes with delays,\n shape [B, T_gen, C].\n lengths_Bx: A tensor containing the valid length of generated codes\n (excluding padding and BOS/EOS markers) for each item\n in the batch, shape [B].\n\n Returns:\n A list of NumPy arrays, where each array represents the generated audio\n waveform for one item in the batch. If DAC is not loaded, returns the\n raw, reverted codebook indices as NumPy arrays.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n batch_size = generated_codes.shape[0]\n seq_length = generated_codes.shape[1]\n delay_pattern = self.config.delay_pattern\n audio_pad_value = self.config.pad_token_id\n max_delay_pattern = max(delay_pattern)\n\n revert_precomp = build_revert_indices(\n B=batch_size,\n T=seq_length,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n codebook = revert_audio_delay(\n audio_BxTxC=generated_codes,\n pad_value=audio_pad_value,\n precomp=revert_precomp,\n T=seq_length,\n )[:, :-max_delay_pattern, :]\n\n min_valid_index = 0\n max_valid_index = 1023\n invalid_mask = (codebook < min_valid_index) | (codebook > max_valid_index)\n codebook[invalid_mask] = 0\n\n audios = []\n\n if self.load_dac:\n for i in range(batch_size):\n audio = self._decode(codebook[i, : lengths_Bx[i], :])\n audio_np = audio.cpu().numpy()\n audios.append(audio_np)\n else:\n for i in range(batch_size):\n audios.append(codebook[i, : lengths_Bx[i], :].cpu().numpy())\n return audios\n\n @torch.no_grad()\n @torch.inference_mode()\n def _encode(self, audio: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Encodes the given audio waveform into a tensor of DAC codebook indices\n \"\"\"\n audio = audio.unsqueeze(0)\n audio_data = self.dac_model.preprocess(audio, DEFAULT_SAMPLE_RATE)\n _, encoded_frame, _, _, _ = self.dac_model.encode(audio_data)\n encoded_frame: torch.Tensor\n return encoded_frame.squeeze(0).transpose(0, 1)\n\n @torch.no_grad()\n @torch.inference_mode()\n def _decode(self, audio_codes: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Decodes the given frames into an output audio waveform\n \"\"\"\n audio_codes = audio_codes.unsqueeze(0).transpose(1, 2)\n audio_values, _, _ = self.dac_model.quantizer.from_codes(audio_codes)\n audio_values = self.dac_model.decode(audio_values)\n audio_values: torch.Tensor\n return audio_values.squeeze()\n\n def load_audio(self, audio_path: str) -> torch.Tensor:\n \"\"\"Loads and preprocesses an audio file for use as a prompt.\n\n Loads the audio file, resamples it to the target sample rate if necessary,\n preprocesses it using the DAC model's preprocessing, and encodes it into\n DAC codebook indices.\n\n Args:\n audio_path: Path to the audio file.\n\n Returns:\n torch.Tensor: The encoded audio prompt as DAC codebook indices,\n shape [T, C].\n\n Raises:\n RuntimeError: If the DAC model is not loaded (`load_dac=False` during init).\n FileNotFoundError: If the audio file cannot be found.\n Exception: If there's an error during loading or processing.\n \"\"\"\n if self.dac_model is None:\n raise RuntimeError(\"DAC model is required for loading audio prompts but was not loaded.\")\n audio, sr = torchaudio.load(audio_path, channels_first=True) # C, T\n if sr != DEFAULT_SAMPLE_RATE:\n audio = torchaudio.functional.resample(audio, sr, DEFAULT_SAMPLE_RATE)\n # Convert to mono if stereo\n if audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True) # Average channels to get mono\n return self._encode(audio.to(self.device))\n\n def save_audio(self, path: str, audio: np.ndarray):\n \"\"\"Saves the generated audio waveform to a file.\n\n Uses the soundfile library to write the NumPy audio array to the specified\n path with the default sample rate.\n\n Args:\n path: The path where the audio file will be saved.\n audio: The audio waveform as a NumPy array.\n \"\"\"\n import soundfile as sf\n\n sf.write(path, audio, DEFAULT_SAMPLE_RATE)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str | list[str],\n max_tokens: int = 3072,\n cfg_scale: float = 3.0,\n temperature: float = 1.2,\n top_p: float = 0.95,\n use_torch_compile: bool = False,\n cfg_filter_top_k: int = 45,\n audio_prompt: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n audio_prompt_path: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n use_cfg_filter: bool | None = None,\n verbose: bool = False,\n ) -> np.ndarray | list[np.ndarray]:\n \"\"\"Generates audio corresponding to the input text.\n\n Args:\n text: The input text prompt, or a list of text prompts for batch generation.\n max_tokens: The maximum number of audio tokens to generate per prompt.\n Defaults to the model's configured audio length if None.\n cfg_scale: The scale factor for classifier-free guidance (CFG). Higher values\n lead to stronger guidance towards the text prompt.\n temperature: The temperature for sampling. Higher values increase randomness.\n top_p: The cumulative probability threshold for nucleus (top-p) sampling.\n use_torch_compile: Whether to compile the generation steps using torch.compile.\n Can significantly speed up generation after the initial\n compilation overhead. Defaults to False.\n cfg_filter_top_k: The number of top logits to consider during CFG filtering.\n (Note: This parameter name might be slightly misleading based\n on the code; it's used in the `_sample_next_token` function.)\n audio_prompt: An audio prompt or list of prompts to condition the generation.\n Can be a file path (str), a pre-loaded tensor (DAC codes), or None.\n If a list, its length must match the batch size of the text input.\n audio_prompt_path: (Deprecated) Use `audio_prompt` instead.\n use_cfg_filter: (Deprecated) This parameter is no longer used.\n verbose: If True, prints progress information during generation, including\n speed metrics.\n\n Returns:\n If a single text prompt was provided, returns a NumPy array containing the\n generated audio waveform.\n If a list of text prompts was provided, returns a list of NumPy arrays,\n each corresponding to a prompt in the input list. Returns None for a\n sequence if no audio was generated for it.\n \"\"\"\n batch_size = len(text) if isinstance(text, list) else 1\n audio_eos_value = self.config.eos_token_id\n audio_pad_value = self.config.pad_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n delay_pattern_Cx = torch.tensor(delay_pattern, device=self.device, dtype=torch.long)\n self.model.eval()\n\n if audio_prompt_path:\n print(\"Warning: audio_prompt_path is deprecated. Use audio_prompt instead.\")\n audio_prompt = audio_prompt_path\n if use_cfg_filter is not None:\n print(\"Warning: use_cfg_filter is deprecated.\")\n\n if verbose:\n total_start_time = time.time()\n\n if use_torch_compile and not hasattr(self, \"_compiled\"):\n # Compilation can take about a minute.\n self._prepare_generation = torch.compile(self._prepare_generation, dynamic=True, fullgraph=True)\n self._decoder_step = torch.compile(self._decoder_step, fullgraph=True, mode=\"max-autotune\")\n self._compiled = True\n\n if isinstance(audio_prompt, list):\n audio_prompt = [self.load_audio(p) if isinstance(p, str) else p for p in audio_prompt]\n elif isinstance(audio_prompt, str):\n audio_prompt = [self.load_audio(audio_prompt)]\n elif isinstance(audio_prompt, torch.Tensor):\n audio_prompt = [audio_prompt]\n elif audio_prompt is None:\n audio_prompt = [None] * batch_size\n\n assert len(audio_prompt) == batch_size, \"Number of audio prompts must match batch size\"\n\n if isinstance(text, list):\n text = [self._encode_text(t) for t in text]\n else:\n text = [self._encode_text(text)]\n text = self._pad_text_input(text)\n\n dec_state, dec_output = self._prepare_generation(text, audio_prompt, max_tokens=max_tokens)\n dec_step = min(dec_output.prefill_steps) - 1\n current_idx = torch.tensor([dec_step], device=self.device)\n\n eos_detected_Bx = torch.zeros((batch_size,), dtype=torch.bool, device=self.device)\n eos_countdown_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n finished_step_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n\n bos_over = False\n\n if verbose:\n print(\"generate: starting generation loop\")\n if use_torch_compile:\n print(\"generate: using use_torch_compile=True, the first step may be slow\")\n start_time = time.time()\n\n # --- Generation Loop ---\n while dec_step < max_tokens:\n if (eos_countdown_Bx == 0).all():\n break\n\n current_step_idx = dec_step + 1\n torch.compiler.cudagraph_mark_step_begin()\n dec_state.prepare_step(dec_step)\n tokens_Bx1xC = dec_output.get_tokens_at(dec_step).repeat_interleave(2, dim=0) # Repeat for CFG\n\n pred_BxC = self._decoder_step(\n tokens_Bx1xC,\n dec_state,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n current_idx,\n )\n\n current_idx += 1\n\n active_mask_Bx = eos_countdown_Bx != 0\n eos_trigger_Bx = torch.zeros_like(active_mask_Bx)\n if active_mask_Bx.any():\n is_eos_token = (~eos_detected_Bx[active_mask_Bx]) & (pred_BxC[active_mask_Bx, 0] == audio_eos_value)\n is_max_len = current_step_idx >= max_tokens - max_delay_pattern\n eos_trigger_Bx[active_mask_Bx] = is_eos_token | is_max_len\n eos_detected_Bx |= eos_trigger_Bx\n start_countdown_mask_Bx = eos_trigger_Bx & (eos_countdown_Bx < 0)\n if start_countdown_mask_Bx.any():\n eos_countdown_Bx[start_countdown_mask_Bx] = max_delay_pattern\n finished_step_Bx[start_countdown_mask_Bx] = current_step_idx\n\n padding_mask_Bx = eos_countdown_Bx > 0\n if padding_mask_Bx.any():\n pred_active_BxC = pred_BxC[padding_mask_Bx].clone()\n countdown_active_Bx = eos_countdown_Bx[padding_mask_Bx]\n step_after_eos_Bx = max_delay_pattern - countdown_active_Bx\n step_after_eos_Bx_ = step_after_eos_Bx.unsqueeze(1)\n delay_pattern_Cx_ = delay_pattern_Cx.unsqueeze(0)\n eos_mask_NxC = step_after_eos_Bx_ == delay_pattern_Cx_\n pad_mask_NxC = step_after_eos_Bx_ > delay_pattern_Cx_\n pred_active_BxC[eos_mask_NxC] = audio_eos_value\n pred_active_BxC[pad_mask_NxC] = audio_pad_value\n pred_BxC[padding_mask_Bx] = pred_active_BxC\n eos_countdown_Bx[padding_mask_Bx] -= 1\n\n # --- Update BOS flag (Original) ---\n if not bos_over:\n bos_over = all(\n dec_step - prefill_step > max_delay_pattern for prefill_step in dec_output.prefill_steps\n )\n\n dec_output.update_one(pred_BxC, current_step_idx, not bos_over)\n\n dec_step += 1\n\n if verbose and dec_step % 86 == 0:\n duration = time.time() - start_time\n if duration > 0:\n print(\n f\"generate step {dec_step}: speed={86 * batch_size / duration:.3f} tokens/s, realtime factor={batch_size / duration:.3f}x\"\n )\n start_time = time.time()\n\n # --- Finalize and Extract Output ---\n final_step = dec_step + 1\n\n finished_step_Bx[finished_step_Bx == -1] = final_step - max_delay_pattern\n\n prefill_steps_tensor = torch.tensor(dec_output.prefill_steps, device=self.device)\n lengths_Bx = finished_step_Bx - prefill_steps_tensor\n lengths_Bx = torch.clamp(lengths_Bx, min=0)\n\n max_len = lengths_Bx.max().item() + max_delay_pattern\n outputs = []\n\n if max_len > 0:\n num_channels = self.config.decoder_config.num_channels\n audio_pad_value = self.config.pad_token_id\n generated_codes = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=audio_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n\n for i in range(batch_size):\n start_step = dec_output.prefill_steps[i]\n actual_len = lengths_Bx[i].item() + max_delay_pattern\n if actual_len > 0:\n tokens_to_copy = dec_output.generated_tokens[i, start_step : start_step + actual_len, :]\n generated_codes[i, :actual_len, :] = tokens_to_copy\n\n if verbose:\n avg_steps = lengths_Bx.float().mean().item()\n total_duration = time.time() - total_start_time\n print(f\"generate: avg steps={avg_steps:.1f}, total duration={total_duration:.3f}s\")\n\n del dec_state\n\n outputs = self._generate_output(generated_codes, lengths_Bx)\n else:\n print(\"Warning: Nothing generated for any sequence in the batch.\")\n outputs = [None] * batch_size\n\n return outputs if batch_size > 1 else outputs[0]\n"], ["/dia/dia/layers.py", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torch import Tensor\nfrom torch.nn import RMSNorm\n\nfrom .config import DecoderConfig, DiaConfig, EncoderConfig\nfrom .state import DecoderInferenceState, EncoderInferenceState, KVCache\n\n\ndef _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:\n return tuple(ax if ax >= 0 else ndim + ax for ax in axes)\n\n\nclass DenseGeneral(nn.Module):\n \"\"\"\n PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init.\n Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot\n for the generalized matrix multiplication. Weight/bias shapes are calculated\n and parameters created during initialization based on config.\n `load_weights` validates shapes and copies data.\n Attributes:\n axis (Tuple[int, ...]): Input axis or axes to contract.\n in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`.\n out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims).\n use_bias (bool): Whether to add a bias term.\n weight (nn.Parameter): The kernel parameter.\n bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True).\n \"\"\"\n\n def __init__(\n self,\n in_shapes: tuple[int, ...],\n out_features: tuple[int, ...],\n axis: tuple[int, ...] = (-1,),\n weight_dtype: torch.dtype | None = None,\n device: torch.device | None = None,\n ):\n super().__init__()\n self.in_shapes = in_shapes\n self.out_features = out_features\n self.axis = axis\n self.kernel_shape = self.in_shapes + self.out_features\n\n factory_kwargs = {\"device\": device, \"dtype\": weight_dtype}\n self.weight = nn.Parameter(torch.empty(self.kernel_shape, **factory_kwargs))\n\n def forward(self, inputs: Tensor) -> Tensor:\n norm_axis = _normalize_axes(self.axis, inputs.ndim)\n kernel_contract_axes = tuple(range(len(norm_axis)))\n\n output = torch.tensordot(\n inputs.to(self.weight.dtype),\n self.weight,\n dims=(norm_axis, kernel_contract_axes),\n ).to(inputs.dtype)\n return output\n\n\nclass MlpBlock(nn.Module):\n \"\"\"MLP block using DenseGeneral.\"\"\"\n\n def __init__(self, embed_dim: int, intermediate_dim: int, compute_dtype: torch.dtype):\n super().__init__()\n self.dtype = compute_dtype\n\n self.wi_fused = DenseGeneral(\n in_shapes=(embed_dim,),\n out_features=(2, intermediate_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n self.wo = DenseGeneral(\n in_shapes=(intermediate_dim,),\n out_features=(embed_dim,),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass.\"\"\"\n fused_x = self.wi_fused(x)\n\n gate = fused_x[..., 0, :]\n up = fused_x[..., 1, :]\n\n hidden = torch.mul(F.silu(gate), up).to(self.dtype)\n\n output = self.wo(hidden)\n return output\n\n\nclass RotaryEmbedding(nn.Module):\n \"\"\"Rotary Position Embedding (RoPE) implementation in PyTorch.\"\"\"\n\n def __init__(\n self,\n embedding_dims: int,\n min_timescale: float = 1.0,\n max_timescale: float = 10000.0,\n dtype: torch.dtype = torch.float32,\n ):\n super().__init__()\n if embedding_dims % 2 != 0:\n raise ValueError(\"Embedding dim must be even for RoPE.\")\n self.embedding_dims = embedding_dims\n self.min_timescale = min_timescale\n self.max_timescale = max_timescale\n self.compute_dtype = dtype\n\n half_embedding_dim = embedding_dims // 2\n fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims\n timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32)\n self.register_buffer(\"timescale\", timescale, persistent=False)\n\n def forward(self, inputs: torch.Tensor, position: torch.Tensor):\n \"\"\"Applies RoPE.\"\"\"\n position = position.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat(\n (first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)),\n dim=-1,\n )\n\n def apply_rope(self, inputs: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor):\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat((first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)), dim=-1)\n\n\ndef custom_scaled_dot_product_attention(\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n attn_mask: torch.Tensor | None = None,\n scale: float = 1.0,\n is_causal: bool = False,\n num_gqa_groups: int = 1,\n) -> torch.Tensor:\n \"\"\"\n Custom scaled dot-product attention with GQA support for MPS compatibility.\n\n Args:\n query: (B, N_q, T, H) - Query tensor, N_q = num_query_heads\n key: (B, N_kv, S, H) - Key tensor, N_kv = num_kv_heads\n value: (B, N_kv, S, H) - Value tensor\n attn_mask: (B, 1, T, S) - Attention mask, optional\n scale: Scaling factor for attention scores\n is_causal: If True, apply causal masking\n num_gqa_groups: Number of query groups per KV head (N_q / N_kv)\n\n Returns:\n output: (B, N_q, T, H) - Attention output\n \"\"\"\n B, N_q, T, H = query.shape\n _, N_kv, S, _ = key.shape\n\n # For GQA, repeat key and value tensors to match query heads\n if num_gqa_groups > 1:\n key = key.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n value = value.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n\n # Compute attention scores: (B, N_q, T, H) @ (B, N_q, H, S) -> (B, N_q, T, S)\n scores = torch.matmul(query, key.transpose(-1, -2)) * scale\n\n # Apply causal mask if needed\n if is_causal:\n causal_mask = torch.tril(torch.ones(T, S, dtype=torch.bool, device=query.device))\n scores = scores.masked_fill(~causal_mask, float(\"-inf\"))\n\n # Apply attention mask if provided\n if attn_mask is not None:\n scores = scores.masked_fill(~attn_mask, float(\"-inf\"))\n\n # Softmax over the last dimension (S)\n attn_weights = F.softmax(scores, dim=-1)\n\n # Compute output: (B, N_q, T, S) @ (B, N_q, S, H) -> (B, N_q, T, H)\n output = torch.matmul(attn_weights, value)\n\n return output\n\n\nclass CrossAttention(nn.Module):\n \"\"\"Cross-Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n def forward(\n self,\n Xq: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n is_causal: bool = False,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n original_dtype = Xq.dtype\n\n Xq_BxTxNxH = self.q_proj(Xq)\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xq.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass FusedQKV(nn.Module):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = False,\n num_q_heads: int = 1,\n q_head_dim: int = 1,\n num_kv_heads: int = 1,\n kv_head_dim: int = 1,\n ):\n super().__init__()\n self.num_q_heads = num_q_heads\n self.q_head_dim = q_head_dim\n self.num_kv_heads = num_kv_heads\n self.kv_head_dim = kv_head_dim\n self.q_output_dim = num_q_heads * q_head_dim\n self.kv_output_dim = num_kv_heads * kv_head_dim\n self.linear = nn.Linear(in_features, out_features, bias=bias)\n\n def forward(self, inputs: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n x = self.linear(inputs)\n\n q, k, v = x.split([self.q_output_dim, self.kv_output_dim, self.kv_output_dim], dim=-1)\n\n q = q.reshape(q.shape[:-1] + (self.num_q_heads, self.q_head_dim))\n k = k.reshape(k.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n v = v.reshape(v.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n\n return q, k, v\n\n\nclass SelfAttention(nn.Module):\n \"\"\"Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n self.kv_embed_dim = kv_embed_dim\n self.q_embed_dim = q_embed_dim\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n self.is_fused_qkv = False\n\n def get_linear_weight(self, dense: DenseGeneral):\n W_dg = dense.weight.data\n\n out_features = 1\n input_features = 1\n for dim in dense.out_features:\n out_features *= dim\n for dim in dense.in_shapes:\n input_features *= dim\n\n W_dg_reshaped_for_linear_T = W_dg.reshape(input_features, out_features)\n linear_weight = W_dg_reshaped_for_linear_T.transpose(0, 1).contiguous()\n return linear_weight\n\n def patch_fused_qkv(self):\n q_proj_weight = self.get_linear_weight(self.q_proj)\n k_proj_weight = self.get_linear_weight(self.k_proj)\n v_proj_weight = self.get_linear_weight(self.v_proj)\n\n self.qkv = FusedQKV(\n self.kv_embed_dim,\n (self.num_query_heads * self.head_dim + 2 * (self.num_kv_heads * self.head_dim)),\n bias=False,\n num_q_heads=self.num_query_heads,\n q_head_dim=self.head_dim,\n num_kv_heads=self.num_kv_heads,\n kv_head_dim=self.head_dim,\n )\n self.qkv.linear.weight.data = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=0)\n\n # print(f\"qkv.weight.shape: {self.qkv.linear.weight.shape}\")\n self.is_fused_qkv = True\n\n def forward(\n self,\n X: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n prefill: bool = False,\n is_causal: bool = False,\n current_idx: torch.Tensor | None = None,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n prefill: If True, use prefill mode.\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n\n original_dtype = X.dtype\n\n if self.is_fused_qkv:\n Xq_BxTxNxH, Xk_BxSxKxH, Xv_BxSxKxH = self.qkv(X)\n else:\n Xq_BxTxNxH = self.q_proj(X)\n Xk_BxSxKxH = self.k_proj(X)\n Xv_BxSxKxH = self.v_proj(X)\n\n position = q_positions.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.rotary_emb.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n\n Xq_BxTxNxH = self.rotary_emb.apply_rope(Xq_BxTxNxH, sin, cos)\n Xk_BxSxKxH = self.rotary_emb.apply_rope(Xk_BxSxKxH, sin, cos)\n\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n\n if cache is None:\n attn_k = Xk_BxKxSxH\n attn_v = Xv_BxKxSxH\n elif prefill:\n attn_k, attn_v = Xk_BxKxSxH, Xv_BxKxSxH\n cache.prefill(attn_k, attn_v)\n else:\n attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH, current_idx)\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xv_BxSxKxH.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass EncoderLayer(nn.Module):\n \"\"\"Transformer Encoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n self.pre_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.self_attention = SelfAttention(\n enc_config,\n q_embed_dim=embed_dim,\n kv_embed_dim=embed_dim,\n num_query_heads=enc_config.num_attention_heads,\n num_kv_heads=enc_config.num_key_value_heads,\n head_dim=enc_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=embed_dim,\n )\n self.post_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.mlp = MlpBlock(\n embed_dim=embed_dim,\n intermediate_dim=enc_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n sa_out = self.self_attention(\n X=x_norm,\n q_positions=state.positions,\n kv_positions=state.positions,\n attn_mask=state.attn_mask,\n )\n x = residual + sa_out\n\n residual = x\n x_norm = self.post_sa_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Encoder(nn.Module):\n \"\"\"Transformer Encoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n self.compute_dtype = compute_dtype\n\n self.embedding = nn.Embedding(\n enc_config.vocab_size,\n enc_config.hidden_size,\n dtype=compute_dtype,\n )\n self.layers = nn.ModuleList([EncoderLayer(config, compute_dtype) for _ in range(enc_config.num_hidden_layers)])\n self.norm = RMSNorm(\n enc_config.hidden_size,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n\n def forward(\n self,\n x_ids: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n x = self.embedding(x_ids)\n\n for layer in self.layers:\n x = layer(x, state)\n\n x = self.norm(x).to(self.compute_dtype)\n return x\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"Transformer Decoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n enc_config = config.encoder_config\n dec_embed_dim = dec_config.hidden_size\n enc_embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n # Norms\n self.pre_sa_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_ca_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_mlp_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n # Self-Attention (GQA) with Causal Masking\n self.self_attention = SelfAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=dec_embed_dim,\n num_query_heads=dec_config.num_attention_heads,\n num_kv_heads=dec_config.num_key_value_heads,\n head_dim=dec_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # Cross-Attention (MHA)\n self.cross_attention = CrossAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=enc_embed_dim, # Note kv_embed_dim\n num_query_heads=dec_config.cross_num_attention_heads,\n num_kv_heads=dec_config.cross_num_key_value_heads,\n head_dim=dec_config.cross_head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # MLP\n self.mlp = MlpBlock(\n embed_dim=dec_embed_dim,\n intermediate_dim=dec_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: DecoderInferenceState,\n self_attn_cache: KVCache | None = None,\n cross_attn_cache: KVCache | None = None,\n prefill: bool = False,\n current_idx: int = 0,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n self_attn_mask = state.casual_attn_mask[None, None, current_idx]\n\n sa_out = self.self_attention(\n X=x_norm, # (2, 1, D)\n q_positions=state.dec_positions, # (2, 1)\n kv_positions=state.dec_positions, # (2, 1)\n attn_mask=self_attn_mask,\n cache=self_attn_cache,\n prefill=prefill,\n is_causal=prefill,\n current_idx=current_idx,\n )\n\n x = residual + sa_out\n\n residual = x\n x_norm = self.pre_ca_norm(x).to(self.compute_dtype)\n ca_out = self.cross_attention(\n Xq=x_norm,\n q_positions=state.dec_positions,\n kv_positions=state.enc_positions,\n attn_mask=state.cross_attn_mask,\n cache=cross_attn_cache,\n )\n x = residual + ca_out\n\n residual = x\n x_norm = self.pre_mlp_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Decoder(nn.Module):\n \"\"\"Transformer Decoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n self.num_channels = dec_config.num_channels\n self.num_layers = dec_config.num_hidden_layers\n\n self.embeddings = nn.ModuleList(\n [\n nn.Embedding(dec_config.vocab_size, dec_config.hidden_size, dtype=compute_dtype)\n for _ in range(self.num_channels)\n ]\n )\n self.layers = nn.ModuleList(\n [DecoderLayer(config=config, compute_dtype=compute_dtype) for _ in range(self.num_layers)]\n )\n\n self.norm = RMSNorm(\n dec_config.hidden_size,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n self.logits_dense = DenseGeneral(\n in_shapes=(dec_config.hidden_size,),\n out_features=(self.num_channels, dec_config.vocab_size),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def precompute_cross_attn_cache(\n self,\n enc_out: torch.Tensor, # (B, S, E)\n ) -> list[KVCache]:\n \"\"\"\n Computes the Key and Value tensors for cross-attention for each layer from the encoder output.\n \"\"\"\n per_layer_kv_cache: list[KVCache] = []\n\n for layer in self.layers:\n cross_attn_module = layer.cross_attention\n k_proj = cross_attn_module.k_proj(enc_out)\n v_proj = cross_attn_module.v_proj(enc_out)\n\n k = k_proj.transpose(1, 2)\n v = v_proj.transpose(1, 2)\n\n per_layer_kv_cache.append(KVCache.from_kv(k, v))\n\n return per_layer_kv_cache\n\n def decode_step(\n self,\n tgt_ids_Bx1xC: torch.Tensor, # [B, 1, C]\n state: DecoderInferenceState,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"\n Performs a single decoding step, managing KV caches layer by layer.\n Returns:\n A tuple containing:\n - logits_Bx1xCV: The final output logits for the current step (B, 1, C*V), cast to float32.\n \"\"\"\n\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_Bx1xC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x, # (2, 1, D)\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n current_idx=current_idx,\n )\n\n x = self.norm(x)\n logits_Bx1xCxV = self.logits_dense(x)\n\n return logits_Bx1xCxV.to(torch.float32)\n\n def forward(self, tgt_ids_BxTxC: torch.Tensor, state: DecoderInferenceState) -> torch.Tensor:\n \"\"\"\n Forward pass for the Decoder stack, managing KV caches.\n Args:\n tgt_ids_BxTxC: Target token IDs (B, T, C).\n encoder_out: Output from the encoder (B, S, E).\n tgt_positions: Positions for target sequence (B, T).\n src_positions: Positions for source sequence (B, S).\n self_attn_mask: Mask for self-attention.\n cross_attn_mask: Mask for cross-attention.\n past_key_values: List containing the self-attention KV cache for each layer\n from the previous decoding step. `len(past_key_values)` should\n equal `num_layers`.\n precomputed_cross_attn_kv: A single tuple containing the pre-computed K/V cache\n derived from `encoder_out`. This is passed identically\n to all layers.\n Returns:\n A tuple containing:\n - logits: The final output logits (B, T, C * V), cast to float32.\n - present_key_values: A list containing the updated self-attention KV cache\n for each layer for the *current* decoding step.\n \"\"\"\n _, _, num_channels_in = tgt_ids_BxTxC.shape\n assert num_channels_in == self.num_channels, \"Input channels mismatch\"\n\n # Embeddings\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_BxTxC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x,\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n prefill=True,\n )\n\n # Final Norm\n x = self.norm(x)\n logits_BxTxCxV = self.logits_dense(x)\n\n return logits_BxTxCxV.to(torch.float32)\n\n\nclass DiaModel(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/nari-labs/dia\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n coders={\n DiaConfig: (\n lambda x: x.model_dump(),\n lambda data: DiaConfig.model_validate(data),\n ),\n },\n):\n \"\"\"PyTorch Dia Model using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n self.encoder = Encoder(config, compute_dtype)\n self.decoder = Decoder(config, compute_dtype)\n"], ["/dia/dia/config.py", "\"\"\"Configuration management module for the Dia model.\n\nThis module provides comprehensive configuration management for the Dia model,\nutilizing Pydantic for validation. It defines configurations for data processing,\nmodel architecture (encoder and decoder), and training settings.\n\nKey components:\n- DataConfig: Parameters for data loading and preprocessing.\n- EncoderConfig: Architecture details for the encoder module.\n- DecoderConfig: Architecture details for the decoder module.\n- ModelConfig: Combined model architecture settings.\n- TrainingConfig: Training hyperparameters and settings.\n- DiaConfig: Master configuration combining all components.\n\"\"\"\n\nimport os\n\nfrom pydantic import BaseModel, Field\n\n\nclass EncoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the encoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_encoder\".\n hidden_size: Size of the encoder layers, defaults to 1024.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the encoder, defaults to 4096.\n num_hidden_layers: Number of hidden layers in the encoder, defaults to 12.\n num_attention_heads: Number of attention heads in the encoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the encoder, defaults to 16.\n head_dim: Dimension of each attention head, defaults to 128.\n hidden_act: Activation function in the encoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings, defaults to 1024.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n vocab_size: Vocabulary size, defaults to 256.\n \"\"\"\n\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=1024, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=4096, gt=0)\n max_position_embeddings: int = Field(default=1024, gt=0)\n model_type: str = Field(default=\"dia_encoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_hidden_layers: int = Field(default=12, gt=0)\n num_key_value_heads: int = Field(default=16, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=256, gt=0)\n\n\nclass DecoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the decoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_decoder\".\n hidden_size: Size of the decoder layers, defaults to 2048.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the decoder, defaults to 8192.\n num_hidden_layers: Number of hidden layers in the decoder, defaults to 18.\n num_attention_heads: Number of attention heads in the decoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the decoder, defaults to 4.\n head_dim: Dimension of each attention head, defaults to 128.\n cross_hidden_size: Size of the cross-attention layers, defaults to 1024.\n cross_num_attention_heads: Number of attention heads in the cross-attention mechanism, defaults to 16.\n cross_num_key_value_heads: Number of key-value heads in the cross-attention mechanism, defaults to 16.\n cross_head_dim: Dimension of each cross-attention head, defaults to 128.\n hidden_act: Activation function in the decoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings in the decoder, defaults to 3072.\n initializer_range: Range for initializing weights in the decoder, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers in the decoder, defaults to 1e-5.\n rope_theta: Theta value for RoPE in the decoder, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE in the decoder.\n vocab_size: Vocabulary size for the decoder, defaults to 1028.\n num_channels: Number of channels in the decoder, defaults to 9.\n \"\"\"\n\n cross_head_dim: int = Field(default=128, gt=0)\n cross_hidden_size: int = Field(default=1024, gt=0)\n cross_num_attention_heads: int = Field(default=16, gt=0)\n cross_num_key_value_heads: int = Field(default=16, gt=0)\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=2048, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=8192, gt=0)\n max_position_embeddings: int = Field(default=3072, gt=0)\n model_type: str = Field(default=\"dia_decoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_channels: int = Field(default=9, gt=0)\n num_hidden_layers: int = Field(default=18, gt=0)\n num_key_value_heads: int = Field(default=4, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=1028, gt=0)\n\n\nclass DiaConfig(BaseModel, frozen=True):\n \"\"\"Main configuration container for the Dia model architecture.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia\".\n is_encoder_decoder: Flag indicating if the model is an encoder-decoder type, defaults to True.\n encoder: Configuration for the encoder component.\n decoder: Configuration for the decoder component.\n src_vocab_size: Size of the source (text) vocabulary.\n tgt_vocab_size: Size of the target (audio code) vocabulary.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n torch_dtype: Data type for model weights in PyTorch, defaults to \"float32\".\n bos_token_id: Beginning-of-sequence token ID, defaults to 1026.\n eos_token_id: End-of-sequence token ID, defaults to 1024.\n pad_token_id: Padding token ID, defaults to 1025.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n transformers_version: Version of the transformers library, defaults to \"4.53.0.dev0\".\n architectures: List of model architectures, defaults to [\"DiaForConditionalGeneration\"].\n delay_pattern: List of delay values for each audio channel, defaults to [0,8,9,10,11,12,13,14,15].\n \"\"\"\n\n architectures: list[str] = Field(default_factory=lambda: [\"DiaForConditionalGeneration\"])\n bos_token_id: int = Field(default=1026)\n decoder_config: DecoderConfig\n delay_pattern: list[int] = Field(default_factory=lambda: [0, 8, 9, 10, 11, 12, 13, 14, 15])\n encoder_config: EncoderConfig\n eos_token_id: int = Field(default=1024)\n initializer_range: float = Field(default=0.02)\n is_encoder_decoder: bool = Field(default=True)\n model_type: str = Field(default=\"dia\")\n norm_eps: float = Field(default=1e-5)\n pad_token_id: int = Field(default=1025)\n torch_dtype: str = Field(default=\"float32\")\n transformers_version: str = Field(default=\"4.53.0.dev0\")\n\n def save(self, path: str) -> None:\n \"\"\"Save the current configuration instance to a JSON file.\n\n Ensures the parent directory exists and the file has a .json extension.\n\n Args:\n path: The target file path to save the configuration.\n\n Raises:\n ValueError: If the path is not a file with a .json extension.\n \"\"\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n config_json = self.model_dump_json(indent=2)\n with open(path, \"w\") as f:\n f.write(config_json)\n\n @classmethod\n def load(cls, path: str) -> \"DiaConfig | None\":\n \"\"\"Load and validate a Dia configuration from a JSON file.\n\n Args:\n path: The path to the configuration file.\n\n Returns:\n A validated DiaConfig instance if the file exists and is valid,\n otherwise None if the file is not found.\n\n Raises:\n ValueError: If the path does not point to an existing .json file.\n pydantic.ValidationError: If the JSON content fails validation against the DiaConfig schema.\n \"\"\"\n try:\n with open(path, \"r\") as f:\n content = f.read()\n return cls.model_validate_json(content)\n except FileNotFoundError:\n return None\n"], ["/dia/dia/audio.py", "import typing as tp\n\nimport torch\n\n\ndef build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute (t_idx_BxTxC, indices_BTCx3) so that out[t, c] = in[t - delay[c], c].\n Negative t_idx => BOS; t_idx >= T => PAD.\n \"\"\"\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n\n t_idx_BxT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T],\n )\n t_idx_BxTx1 = t_idx_BxT[..., None]\n t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)\n\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C],\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C],\n )\n\n # We must clamp time indices to [0..T-1] so gather_nd equivalent won't fail\n t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_clamped_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n dim=1,\n ).long() # Ensure indices are long type for indexing\n\n return t_idx_BxTxC, indices_BTCx3\n\n\ndef apply_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n bos_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n) -> torch.Tensor:\n \"\"\"\n Applies the delay pattern to batched audio tokens using precomputed indices,\n inserting BOS where t_idx < 0 and PAD where t_idx >= T.\n\n Args:\n audio_BxTxC: [B, T, C] int16 audio tokens (or int32/float)\n pad_value: the padding token\n bos_value: the BOS token\n precomp: (t_idx_BxTxC, indices_BTCx3) from build_delay_indices\n\n Returns:\n result_BxTxC: [B, T, C] delayed audio tokens\n \"\"\"\n device = audio_BxTxC.device # Get device from input tensor\n t_idx_BxTxC, indices_BTCx3 = precomp\n t_idx_BxTxC = t_idx_BxTxC.to(device) # Move precomputed indices to device\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Equivalent of tf.gather_nd using advanced indexing\n # Ensure indices are long type if not already (build_delay_indices should handle this)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)\n\n # Create masks on the correct device\n mask_bos = t_idx_BxTxC < 0 # => place bos_value\n mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] # => place pad_value\n\n # Create scalar tensors on the correct device\n bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n\n # If mask_bos, BOS; else if mask_pad, PAD; else original gather\n # All tensors should now be on the same device\n result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))\n\n return result_BxTxC\n\n\ndef build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute indices for the revert operation using PyTorch.\n\n Returns:\n A tuple (t_idx_BxTxC, indices_BTCx3) where:\n - t_idx_BxTxC is a tensor of shape [B, T, C] computed as time indices plus the delay.\n - indices_BTCx3 is a tensor of shape [B*T*C, 3] used for gathering, computed from:\n batch indices, clamped time indices, and channel indices.\n \"\"\"\n # Use default device unless specified otherwise; assumes inputs might define device later\n device = None # Or determine dynamically if needed, e.g., from a model parameter\n\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)\n\n t_idx_BT1 = torch.broadcast_to(torch.arange(T, device=device).unsqueeze(0), [B, T])\n t_idx_BT1 = t_idx_BT1.unsqueeze(-1)\n\n t_idx_BxTxC = torch.minimum(\n t_idx_BT1 + delay_arr.view(1, 1, C),\n torch.tensor(T - 1, device=device),\n )\n b_idx_BxTxC = torch.broadcast_to(torch.arange(B, device=device).view(B, 1, 1), [B, T, C])\n c_idx_BxTxC = torch.broadcast_to(torch.arange(C, device=device).view(1, 1, C), [B, T, C])\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_idx_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n axis=1,\n ).long() # Ensure indices are long type\n\n return t_idx_BxTxC, indices_BTCx3\n\n\ndef revert_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n T: int,\n) -> torch.Tensor:\n \"\"\"\n Reverts a delay pattern from batched audio tokens using precomputed indices (PyTorch version).\n\n Args:\n audio_BxTxC: Input delayed audio tensor\n pad_value: Padding value for out-of-bounds indices\n precomp: Precomputed revert indices tuple containing:\n - t_idx_BxTxC: Time offset indices tensor\n - indices_BTCx3: Gather indices tensor for original audio\n T: Original sequence length before padding\n\n Returns:\n Reverted audio tensor with same shape as input\n \"\"\"\n t_idx_BxTxC, indices_BTCx3 = precomp\n device = audio_BxTxC.device # Get device from input tensor\n\n # Move precomputed indices to the same device as audio_BxTxC if they aren't already\n t_idx_BxTxC = t_idx_BxTxC.to(device)\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Using PyTorch advanced indexing (equivalent to tf.gather_nd or np equivalent)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.size()) # Use .size() for robust reshaping\n\n # Create pad_tensor on the correct device\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n # Create T tensor on the correct device for comparison\n T_tensor = torch.tensor(T, device=device)\n\n result_BxTxC = torch.where(t_idx_BxTxC >= T_tensor, pad_tensor, gathered_BxTxC) # Changed np.where to torch.where\n\n return result_BxTxC\n"], ["/dia/dia/state.py", "from dataclasses import dataclass\nfrom typing import Optional\n\nimport torch\n\nfrom .config import DiaConfig\n\n\ndef create_attn_mask(\n q_padding_mask_1d: torch.Tensor,\n k_padding_mask_1d: torch.Tensor,\n device: torch.device,\n is_causal: bool = False,\n) -> torch.Tensor:\n \"\"\"\n Creates the attention mask (self or cross) mimicking JAX segment ID logic.\n \"\"\"\n # B1, Tq = q_padding_mask_1d.shape\n # B2, Tk = k_padding_mask_1d.shape\n\n p_mask_q = q_padding_mask_1d.unsqueeze(2) # Shape [B, Tq, 1]\n p_mask_k = k_padding_mask_1d.unsqueeze(1) # Shape [B, 1, Tk]\n\n # Condition A: Non-padding query attends to non-padding key\n non_pad_attends_non_pad = p_mask_q & p_mask_k # Shape [B, Tq, Tk]\n\n # Condition B: Padding query attends to padding key\n pad_attends_pad = (~p_mask_q) & (~p_mask_k) # Shape [B, Tq, Tk]\n\n # Combine: True if padding status is compatible (both non-pad OR both pad)\n mask = non_pad_attends_non_pad | pad_attends_pad # Shape [B, Tq, Tk]\n\n if is_causal:\n # assert Tq == Tk, \"Causal mask requires query and key sequence lengths to be equal\"\n causal_mask_2d = torch.tril(torch.ones_like(mask[0], dtype=torch.bool, device=device)) # Shape [B, Tq, Tk]\n causal_mask = mask & causal_mask_2d # Shape [B, Tq, Tk]\n return causal_mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n else:\n return mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n\n\n@dataclass\nclass EncoderInferenceState:\n \"\"\"Parameters specifically for encoder inference.\"\"\"\n\n max_seq_len: int\n device: torch.device\n positions: torch.Tensor\n padding_mask: torch.Tensor\n attn_mask: torch.Tensor\n\n @classmethod\n def new(cls, config: DiaConfig, cond_src: torch.Tensor) -> \"EncoderInferenceState\":\n \"\"\"Creates EtorchrInferenceParams from DiaConfig and a device.\"\"\"\n device = cond_src.device\n\n positions = torch.arange(\n config.encoder_config.max_position_embeddings, dtype=torch.float32, device=device\n ).unsqueeze(0)\n padding_mask = (cond_src.squeeze(1) != 0).to(device).repeat_interleave(2, dim=0)\n attn_mask = create_attn_mask(padding_mask, padding_mask, device, is_causal=False)\n\n return cls(\n max_seq_len=config.encoder_config.max_position_embeddings,\n device=device,\n positions=positions,\n padding_mask=padding_mask,\n attn_mask=attn_mask,\n )\n\n\nclass KVCache(torch.nn.Module):\n k: torch.Tensor\n v: torch.Tensor\n\n def __init__(\n self,\n batch_size: int,\n num_heads: int,\n max_len: int,\n head_dim: int,\n dtype: torch.dtype,\n device: torch.device,\n k: torch.Tensor | None = None,\n v: torch.Tensor | None = None,\n ):\n k = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if k is None else k\n v = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if v is None else v\n super().__init__()\n\n self.register_buffer(\"k\", k)\n self.register_buffer(\"v\", v)\n\n @classmethod\n def from_kv(cls, k: torch.Tensor, v: torch.Tensor) -> \"KVCache\":\n return cls(\n batch_size=k.shape[0] // 2,\n num_heads=k.shape[1],\n max_len=k.shape[2],\n head_dim=k.shape[3],\n dtype=k.dtype,\n device=k.device,\n k=k,\n v=v,\n )\n\n def update(self, k: torch.Tensor, v: torch.Tensor, current_idx: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n k_out, v_out = self.k, self.v\n k_out[:, :, current_idx, :] = k\n v_out[:, :, current_idx, :] = v\n return self.k, self.v\n\n def prefill(self, k: torch.Tensor, v: torch.Tensor):\n prefill_len = k.shape[2]\n self.k[:, :, :prefill_len, :] = k\n self.v[:, :, :prefill_len, :] = v\n\n\n@dataclass\nclass DecoderInferenceState:\n \"\"\"Parameters specifically for decoder inference.\"\"\"\n\n device: torch.device\n dtype: torch.dtype\n enc_out: torch.Tensor\n enc_positions: torch.Tensor\n dec_positions: torch.Tensor\n self_attn_cache: list[KVCache]\n cross_attn_cache: list[KVCache]\n casual_attn_mask: torch.Tensor\n cross_attn_mask: torch.Tensor\n\n @classmethod\n def new(\n cls,\n config: DiaConfig,\n enc_state: EncoderInferenceState,\n enc_out: torch.Tensor,\n dec_cross_attn_cache: list[KVCache],\n compute_dtype: torch.dtype,\n max_generation_length: Optional[int] = None,\n ) -> \"DecoderInferenceState\":\n \"\"\"Creates DecoderInferenceParams from DiaConfig and a device.\"\"\"\n device = enc_out.device\n max_audio_len = max_generation_length or config.decoder_config.max_position_embeddings\n batch_size = enc_out.shape[0] // 2\n\n dec_positions = torch.full((2 * batch_size, 1), fill_value=0, dtype=torch.int32, device=device)\n causal_mask = torch.tril(torch.ones(max_audio_len, max_audio_len, dtype=torch.bool, device=device))\n dec_mask = torch.ones((2 * batch_size, 1), dtype=torch.bool, device=device)\n cross_attn_mask = create_attn_mask(dec_mask, enc_state.padding_mask, device, is_causal=False)\n\n self_attn_cache = [\n KVCache(\n batch_size,\n config.decoder_config.num_key_value_heads,\n max_audio_len,\n config.decoder_config.head_dim,\n compute_dtype,\n device,\n )\n for _ in range(config.decoder_config.num_hidden_layers)\n ]\n\n return cls(\n device=device,\n dtype=compute_dtype,\n enc_out=enc_out,\n enc_positions=enc_state.positions,\n dec_positions=dec_positions,\n self_attn_cache=self_attn_cache,\n cross_attn_cache=dec_cross_attn_cache,\n casual_attn_mask=causal_mask,\n cross_attn_mask=cross_attn_mask,\n )\n\n def prepare_step(self, step_from: int, step_to: int | None = None) -> None:\n if step_to is None:\n step_to = step_from + 1\n self.dec_positions = torch.arange(step_from, step_to, dtype=torch.int32, device=self.device).unsqueeze(0)\n\n\n@dataclass\nclass DecoderOutput:\n generated_tokens: torch.Tensor\n prefill_steps: list[int]\n\n @classmethod\n def new(cls, batch_size: int, config: DiaConfig, device: torch.device) -> \"DecoderOutput\":\n max_audio_len = config.decoder_config.max_position_embeddings\n return cls(\n generated_tokens=torch.full(\n (batch_size, max_audio_len, config.decoder_config.num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=device,\n ),\n prefill_steps=[],\n )\n\n def get_tokens_at(self, step_from: int, step_to: int | None = None) -> torch.Tensor:\n if step_to is None:\n step_to = step_from + 1\n return self.generated_tokens[:, step_from:step_to, :]\n\n def update_one(self, dec_out: torch.Tensor, step: int, apply_mask: bool = False):\n dec_out = dec_out.to(self.generated_tokens.dtype)\n if apply_mask:\n mask = self.generated_tokens[:, step, :] == -1\n self.generated_tokens[:, step, :] = torch.where(mask, dec_out, self.generated_tokens[:, step, :])\n else:\n self.generated_tokens[:, step, :] = dec_out\n\n def prefill(self, dec_out: torch.Tensor, prefill_steps: list[int]):\n length = dec_out.shape[1]\n self.generated_tokens[:, :length, :] = dec_out\n self.prefill_steps = prefill_steps\n"], ["/dia/example/simple-cpu.py", "import torch\n\nfrom dia.model import Dia\n\n\n# Select device: CPU\ndevice = torch.device(\"cpu\")\nprint(f\"Using device: {device}\")\n\n# Load model\nmodel = Dia.from_pretrained(\n \"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float32\", device=device\n) # Float32 works better than float16 on CPU - you can also test with float16\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/voice_clone.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\nclone_from_audio = \"simple.mp3\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Hello, how are you? [S2] I'm good, thank you. [S1] What's your name? [S2] My name is Dia. [S1] Nice to meet you. [S2] Nice to meet you too.\"\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(\n clone_from_text + text_to_generate,\n audio_prompt=clone_from_audio,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=4.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"voice_clone.mp3\", output)\n"], ["/dia/example/voice_clone_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\nclone_from_audios = [f\"simple_{i}.mp3\" for i in range(10)]\n\ntexts = [clone_from_text + text_to_generate for _ in range(10)]\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(texts, audio_prompt=clone_from_audios, use_torch_compile=True, verbose=True, max_tokens=2000)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"voice_clone_{i}.mp3\", o)\n"], ["/dia/example/benchmark.py", "from random import choice\n\nimport torch\n\nfrom dia.model import Dia\n\n\ntorch._inductor.config.coordinate_descent_tuning = True\ntorch._inductor.config.triton.unique_kernel_names = True\ntorch._inductor.config.fx_graph_cache = True\n\n# debugging\ntorch._logging.set_logs(graph_breaks=True, recompiles=True)\n\nmodel_name = \"nari-labs/Dia-1.6B-0626\"\ncompute_dtype = \"float16\"\n\nmodel = Dia.from_pretrained(model_name, compute_dtype=compute_dtype)\n\n\ntest_cases = [\n \"[S1] Dia is an open weights text to dialogue model.\",\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code. [S2] It is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n]\n\n\n# Wram up\nfor _ in range(2):\n text = choice(test_cases)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n\n# Benchmark\nfor _ in range(10):\n text = choice(test_cases)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n"], ["/dia/example/simple_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\ntexts = [text for _ in range(10)]\n\noutput = model.generate(texts, use_torch_compile=True, verbose=True, max_tokens=1500)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"simple_{i}.mp3\", o)\n"], ["/dia/hf.py", "from transformers import AutoProcessor, DiaForConditionalGeneration\n\n\ntorch_device = \"cuda\"\nmodel_checkpoint = \"nari-labs/Dia-1.6B-0626\"\n\ntext = [\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n]\nprocessor = AutoProcessor.from_pretrained(model_checkpoint)\ninputs = processor(text=text, padding=True, return_tensors=\"pt\").to(torch_device)\n\nmodel = DiaForConditionalGeneration.from_pretrained(model_checkpoint).to(torch_device)\noutputs = model.generate(**inputs, max_new_tokens=3072, guidance_scale=3.0, temperature=1.8, top_p=0.90, top_k=45)\n\noutputs = processor.batch_decode(outputs)\nprocessor.save_audio(outputs, \"example.mp3\")\n"], ["/dia/example/simple.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(\n text,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=3.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/simple-mac.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# It is important to set the `use_torch_compile` argument to `False` when using Dia on MacOS.\n# This is because the `torch.compile` function is not supported on MacOS.\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/dia/__init__.py", "from .model import Dia\n\n\n__all__ = [\n \"Dia\",\n]\n"]], "task_instance_info": {"created_time": "2025-08-20 15:27:36", "created_task_model": "DeepSeek-R1", "function_skeleton": "def main():\n pass", "code_description": "This code segment functions to: Process command-line arguments for audio generation parameters, validate configuration requirements, initialize computational resources, load machine learning models from specified sources, and execute text-to-speech synthesis with configurable generation settings while handling error conditions."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\ndef main():\n parser = argparse.ArgumentParser(description='Process audio using Dia model')\n parser.add_argument('--input', required=True, help='Input audio file path')\n parser.add_argument('--output_dir', required=True, help='Output directory path')\n parser.add_argument('--model_path', required=True, help='Path to model checkpoint')\n parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu', \n help='Device to use (cuda/cpu)')\n parser.add_argument('--seed', type=int, default=None, help='Random seed for reproducibility')\n \n args = parser.parse_args()\n\n if not os.path.isfile(args.input):\n raise FileNotFoundError(f\"Input file {args.input} not found\")\n \n os.makedirs(args.output_dir, exist_ok=True)\n \n if args.seed is not None:\n set_seed(args.seed)\n \n # Load model\n model = Dia()\n checkpoint = torch.load(args.model_path, map_location=args.device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.to(args.device)\n model.eval()\n \n # Process audio\n audio, sr = sf.read(args.input)\n audio_tensor = torch.from_numpy(audio).float().to(args.device)\n \n with torch.no_grad():\n processed = model(audio_tensor.unsqueeze(0)).squeeze(0)\n \n # Save output\n output_path = os.path.join(args.output_dir, os.path.basename(args.input))\n sf.write(output_path, processed.cpu().numpy(), sr)\n[TASK_END]\n```", "inference_time": "2025-08-20 15-28-33"}, "editdistance_info": {"edit_distance": 13.3472, "calculate_time": "2025-08-20 15:28:33", "true_code_clean": "def main():\n parser = argparse.ArgumentParser(description=\"Generate audio using the Dia model.\")\n parser.add_argument(\"text\", type=str, help=\"Input text for speech generation.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"Path to save the generated audio file (e.g., output.wav).\"\n )\n parser.add_argument(\n \"--repo-id\",\n type=str,\n default=\"nari-labs/Dia-1.6B-0626\",\n help=\"Hugging Face repository ID (e.g., nari-labs/Dia-1.6B-0626).\",\n )\n parser.add_argument(\n \"--local-paths\", action=\"store_true\", help=\"Load model from local config and checkpoint files.\"\n )\n parser.add_argument(\n \"--config\", type=str, help=\"Path to local config.json file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--checkpoint\", type=str, help=\"Path to local model checkpoint .pth file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--audio-prompt\", type=str, default=None, help=\"Path to an optional audio prompt WAV file for voice cloning.\"\n )\n gen_group = parser.add_argument_group(\"Generation Parameters\")\n gen_group.add_argument(\n \"--max-tokens\",\n type=int,\n default=None,\n help=\"Maximum number of audio tokens to generate (defaults to config value).\",\n )\n gen_group.add_argument(\n \"--cfg-scale\", type=float, default=3.0, help=\"Classifier-Free Guidance scale (default: 3.0).\"\n )\n gen_group.add_argument(\n \"--temperature\", type=float, default=1.3, help=\"Sampling temperature (higher is more random, default: 0.7).\"\n )\n gen_group.add_argument(\"--top-p\", type=float, default=0.95, help=\"Nucleus sampling probability (default: 0.95).\")\n infra_group = parser.add_argument_group(\"Infrastructure\")\n infra_group.add_argument(\"--seed\", type=int, default=None, help=\"Random seed for reproducibility.\")\n infra_group.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to run inference on (e.g., 'cuda', 'cpu', default: auto).\",\n )\n args = parser.parse_args()\n if args.local_paths:\n if not args.config:\n parser.error(\"--config is required when --local-paths is set.\")\n if not args.checkpoint:\n parser.error(\"--checkpoint is required when --local-paths is set.\")\n if not os.path.exists(args.config):\n parser.error(f\"Config file not found: {args.config}\")\n if not os.path.exists(args.checkpoint):\n parser.error(f\"Checkpoint file not found: {args.checkpoint}\")\n if args.seed is not None:\n set_seed(args.seed)\n print(f\"Using user-selected seed: {args.seed}\")\n device = torch.device(args.device)\n print(f\"Using device: {device}\")\n print(\"Loading model...\")\n if args.local_paths:\n print(f\"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'\")\n try:\n model = Dia.from_local(args.config, args.checkpoint, device=device)\n except Exception as e:\n print(f\"Error loading local model: {e}\")\n exit(1)\n else:\n print(f\"Loading from Hugging Face Hub: repo_id='{args.repo_id}'\")\n try:\n model = Dia.from_pretrained(args.repo_id, device=device)\n except Exception as e:\n print(f\"Error loading model from Hub: {e}\")\n exit(1)\n print(\"Model loaded.\")\n print(\"Generating audio...\")\n try:\n sample_rate = 44100 \n output_audio = model.generate(\n text=args.text,\n audio_prompt=args.audio_prompt,\n max_tokens=args.max_tokens,\n cfg_scale=args.cfg_scale,\n temperature=args.temperature,\n top_p=args.top_p,\n )\n print(\"Audio generation complete.\")\n print(f\"Saving audio to {args.output}...\")\n os.makedirs(os.path.dirname(args.output) or \".\", exist_ok=True)\n sf.write(args.output, output_audio, sample_rate)\n print(f\"Audio successfully saved to {args.output}\")\n except Exception as e:\n print(f\"Error during audio generation or saving: {e}\")\n exit(1)", "predict_code_clean": "def main():\n parser = argparse.ArgumentParser(description='Process audio using Dia model')\n parser.add_argument('--input', required=True, help='Input audio file path')\n parser.add_argument('--output_dir', required=True, help='Output directory path')\n parser.add_argument('--model_path', required=True, help='Path to model checkpoint')\n parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu', \n help='Device to use (cuda/cpu)')\n parser.add_argument('--seed', type=int, default=None, help='Random seed for reproducibility')\n args = parser.parse_args()\n if not os.path.isfile(args.input):\n raise FileNotFoundError(f\"Input file {args.input} not found\")\n os.makedirs(args.output_dir, exist_ok=True)\n if args.seed is not None:\n set_seed(args.seed)\n model = Dia()\n checkpoint = torch.load(args.model_path, map_location=args.device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.to(args.device)\n model.eval()\n audio, sr = sf.read(args.input)\n audio_tensor = torch.from_numpy(audio).float().to(args.device)\n with torch.no_grad():\n processed = model(audio_tensor.unsqueeze(0)).squeeze(0)\n output_path = os.path.join(args.output_dir, os.path.basename(args.input))\n sf.write(output_path, processed.cpu().numpy(), sr)"}} {"repo_name": "dia", "file_name": "/dia/dia/audio.py", "inference_info": {"prefix_code": "import typing as tp\n\nimport torch\n\n\ndef build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute (t_idx_BxTxC, indices_BTCx3) so that out[t, c] = in[t - delay[c], c].\n Negative t_idx => BOS; t_idx >= T => PAD.\n \"\"\"\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n\n t_idx_BxT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T],\n )\n t_idx_BxTx1 = t_idx_BxT[..., None]\n t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)\n\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C],\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C],\n )\n\n # We must clamp time indices to [0..T-1] so gather_nd equivalent won't fail\n t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_clamped_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n dim=1,\n ).long() # Ensure indices are long type for indexing\n\n return t_idx_BxTxC, indices_BTCx3\n\n\n", "suffix_code": "\n\n\ndef build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute indices for the revert operation using PyTorch.\n\n Returns:\n A tuple (t_idx_BxTxC, indices_BTCx3) where:\n - t_idx_BxTxC is a tensor of shape [B, T, C] computed as time indices plus the delay.\n - indices_BTCx3 is a tensor of shape [B*T*C, 3] used for gathering, computed from:\n batch indices, clamped time indices, and channel indices.\n \"\"\"\n # Use default device unless specified otherwise; assumes inputs might define device later\n device = None # Or determine dynamically if needed, e.g., from a model parameter\n\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)\n\n t_idx_BT1 = torch.broadcast_to(torch.arange(T, device=device).unsqueeze(0), [B, T])\n t_idx_BT1 = t_idx_BT1.unsqueeze(-1)\n\n t_idx_BxTxC = torch.minimum(\n t_idx_BT1 + delay_arr.view(1, 1, C),\n torch.tensor(T - 1, device=device),\n )\n b_idx_BxTxC = torch.broadcast_to(torch.arange(B, device=device).view(B, 1, 1), [B, T, C])\n c_idx_BxTxC = torch.broadcast_to(torch.arange(C, device=device).view(1, 1, C), [B, T, C])\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_idx_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n axis=1,\n ).long() # Ensure indices are long type\n\n return t_idx_BxTxC, indices_BTCx3\n\n\ndef revert_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n T: int,\n) -> torch.Tensor:\n \"\"\"\n Reverts a delay pattern from batched audio tokens using precomputed indices (PyTorch version).\n\n Args:\n audio_BxTxC: Input delayed audio tensor\n pad_value: Padding value for out-of-bounds indices\n precomp: Precomputed revert indices tuple containing:\n - t_idx_BxTxC: Time offset indices tensor\n - indices_BTCx3: Gather indices tensor for original audio\n T: Original sequence length before padding\n\n Returns:\n Reverted audio tensor with same shape as input\n \"\"\"\n t_idx_BxTxC, indices_BTCx3 = precomp\n device = audio_BxTxC.device # Get device from input tensor\n\n # Move precomputed indices to the same device as audio_BxTxC if they aren't already\n t_idx_BxTxC = t_idx_BxTxC.to(device)\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Using PyTorch advanced indexing (equivalent to tf.gather_nd or np equivalent)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.size()) # Use .size() for robust reshaping\n\n # Create pad_tensor on the correct device\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n # Create T tensor on the correct device for comparison\n T_tensor = torch.tensor(T, device=device)\n\n result_BxTxC = torch.where(t_idx_BxTxC >= T_tensor, pad_tensor, gathered_BxTxC) # Changed np.where to torch.where\n\n return result_BxTxC\n", "middle_code": "def apply_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n bos_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n) -> torch.Tensor:\n device = audio_BxTxC.device \n t_idx_BxTxC, indices_BTCx3 = precomp\n t_idx_BxTxC = t_idx_BxTxC.to(device) \n indices_BTCx3 = indices_BTCx3.to(device)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)\n mask_bos = t_idx_BxTxC < 0 \n mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] \n bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))\n return result_BxTxC", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/dia/dia/model.py", "import time\nfrom enum import Enum\nfrom typing import Callable\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchaudio\n\nfrom .audio import apply_audio_delay, build_delay_indices, build_revert_indices, revert_audio_delay\nfrom .config import DiaConfig\nfrom .layers import DiaModel\nfrom .state import DecoderInferenceState, DecoderOutput, EncoderInferenceState\n\n\nDEFAULT_SAMPLE_RATE = 44100\nSAMPLE_RATE_RATIO = 512\n\n\ndef _get_default_device():\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n elif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef _sample_next_token(\n logits_BCxV: torch.Tensor,\n temperature: float,\n top_p: float,\n top_k: int | None,\n audio_eos_value: int,\n) -> torch.Tensor:\n if temperature == 0.0:\n return torch.argmax(logits_BCxV, dim=-1)\n\n logits_BCxV = logits_BCxV / temperature\n\n if audio_eos_value is not None and audio_eos_value >= 0:\n top_logit_indices_BC = torch.argmax(logits_BCxV, dim=-1)\n eos_not_highest_mask_BC = top_logit_indices_BC != audio_eos_value\n mask_eos_unless_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_unless_highest_BCxV[eos_not_highest_mask_BC, audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_unless_highest_BCxV, -torch.inf)\n eos_highest_mask_BC = top_logit_indices_BC == audio_eos_value\n mask_eos_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_highest_BCxV[eos_highest_mask_BC, :audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_highest_BCxV, -torch.inf)\n\n if top_k is not None:\n _, top_k_indices_BCxV = torch.topk(logits_BCxV, k=top_k, dim=-1)\n mask = torch.ones_like(logits_BCxV, dtype=torch.bool)\n mask = mask.scatter(dim=-1, index=top_k_indices_BCxV, value=False)\n logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)\n\n if top_p < 1.0:\n probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)\n cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)\n\n sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p\n sorted_indices_to_remove_BCxV = torch.roll(sorted_indices_to_remove_BCxV, shifts=1, dims=-1)\n sorted_indices_to_remove_BCxV[..., 0] = torch.zeros_like(sorted_indices_to_remove_BCxV[..., 0])\n\n indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)\n indices_to_remove_BCxV = indices_to_remove_BCxV.scatter(\n dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV\n )\n logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)\n\n final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n\n sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)\n sampled_indices_C = sampled_indices_BC.squeeze(-1)\n return sampled_indices_C\n\n\nclass ComputeDtype(str, Enum):\n FLOAT32 = \"float32\"\n FLOAT16 = \"float16\"\n BFLOAT16 = \"bfloat16\"\n\n def to_dtype(self) -> torch.dtype:\n if self == ComputeDtype.FLOAT32:\n return torch.float32\n elif self == ComputeDtype.FLOAT16:\n return torch.float16\n elif self == ComputeDtype.BFLOAT16:\n return torch.bfloat16\n else:\n raise ValueError(f\"Unsupported compute dtype: {self}\")\n\n\nclass Dia:\n def __init__(\n self,\n config: DiaConfig,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ):\n \"\"\"Initializes the Dia model.\n\n Args:\n config: The configuration object for the model.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Raises:\n RuntimeError: If there is an error loading the DAC model.\n \"\"\"\n super().__init__()\n self.config = config\n self.device = device if device is not None else _get_default_device()\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n self.compute_dtype = compute_dtype.to_dtype()\n self.model: DiaModel = DiaModel(config, self.compute_dtype)\n self.dac_model = None\n self._compiled_step = None\n self.load_dac = load_dac\n\n if not self.load_dac:\n print(\"Warning: DAC model will not be loaded. This is not recommended.\")\n\n if torch.cuda.is_available():\n torch.backends.cuda.matmul.allow_tf32 = True\n\n @classmethod\n def from_local(\n cls,\n config_path: str,\n checkpoint_path: str,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from local configuration and checkpoint files.\n\n Args:\n config_path: Path to the configuration JSON file.\n checkpoint_path: Path to the model checkpoint (.pth) file.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If the config or checkpoint file is not found.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n config = DiaConfig.load(config_path)\n if config is None:\n raise FileNotFoundError(f\"Config file not found at {config_path}\")\n\n dia = cls(config, compute_dtype, device, load_dac)\n\n try:\n state_dict = torch.load(checkpoint_path, map_location=dia.device)\n dia.model.load_state_dict(state_dict)\n except FileNotFoundError:\n raise FileNotFoundError(f\"Checkpoint file not found at {checkpoint_path}\")\n except Exception as e:\n raise RuntimeError(f\"Error loading checkpoint from {checkpoint_path}\") from e\n\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n @classmethod\n def from_pretrained(\n cls,\n model_name: str = \"nari-labs/Dia-1.6B-0626\",\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from a Hugging Face Hub repository.\n\n Downloads the configuration and checkpoint files from the specified\n repository ID and then loads the model.\n\n Args:\n model_name: The Hugging Face Hub repository ID (e.g., \"nari-labs/Dia-1.6B-0626\").\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If config or checkpoint download/loading fails.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n\n # Load model directly using DiaModel's from_pretrained which handles HF download\n try:\n loaded_model = DiaModel.from_pretrained(model_name, compute_dtype=compute_dtype.to_dtype())\n except Exception as e:\n raise RuntimeError(f\"Error loading model from Hugging Face Hub ({model_name})\") from e\n\n config = loaded_model.config # Get config from the loaded model\n dia = cls(config, compute_dtype, device, load_dac)\n\n dia.model = loaded_model # Assign the already loaded model\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n def _load_dac_model(self):\n \"\"\"Loads the Descript Audio Codec (DAC) model.\n\n Downloads the DAC model if necessary and loads it onto the specified device.\n Sets the DAC model to evaluation mode.\n\n Raises:\n RuntimeError: If downloading or loading the DAC model fails.\n \"\"\"\n import dac\n\n try:\n dac_model_path = dac.utils.download()\n dac_model = dac.DAC.load(dac_model_path).to(self.device)\n dac_model.eval() # Ensure DAC is in eval mode\n except Exception as e:\n raise RuntimeError(\"Failed to load DAC model\") from e\n self.dac_model = dac_model\n\n def _encode_text(self, text: str) -> torch.Tensor:\n \"\"\"Encodes the input text string into a tensor of token IDs using byte-level encoding.\n\n Special tokens [S1] and [S2] are replaced by their byte values. The resulting\n sequence is truncated to the maximum configured text length.\n\n Args:\n text: The input text string.\n\n Returns:\n A tensor containing the encoded byte token IDs.\n \"\"\"\n max_len = self.config.encoder_config.max_position_embeddings\n\n byte_text = text.encode(\"utf-8\")\n # Replace special tokens with their byte values if needed by the specific tokenizer/config\n # Assuming byte values 1 and 2 are correct placeholders based on original code\n replaced_bytes = byte_text.replace(b\"[S1]\", b\"\\x01\").replace(b\"[S2]\", b\"\\x02\")\n text_tokens = list(replaced_bytes)\n return torch.tensor(\n text_tokens[:max_len],\n dtype=torch.long,\n device=self.device,\n )\n\n def _pad_text_input(self, text_tokens: list[torch.Tensor]) -> torch.Tensor:\n \"\"\"Pads the text input to the maximum length.\"\"\"\n text_pad_value = 0\n max_len = self.config.encoder_config.max_position_embeddings\n batch_size = len(text_tokens)\n\n src_tokens = torch.full(\n (batch_size, 1, max_len),\n fill_value=text_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n for i in range(batch_size):\n current_len = len(text_tokens[i])\n src_tokens[i, 0, :current_len] = text_tokens[i]\n return src_tokens\n\n def _prepare_audio_prompt(self, audio_prompts: list[torch.Tensor | None]) -> tuple[torch.Tensor, list[int]]:\n \"\"\"Prepares the audio prompt tensor for the decoder.\n\n Handles padding, adds the beginning-of-sequence (BOS) token, applies the\n delay pattern, and determines the number of prefill steps for each item\n in the batch.\n\n Args:\n audio_prompts: A list of audio prompt tensors (encoded DAC frames) or None.\n Each tensor should have shape [T, C].\n\n Returns:\n A tuple containing:\n - delayed_batch (torch.Tensor): The prepared audio prompt tensor with\n delays applied, shape [B, T_max_padded, C].\n - prefill_steps (list[int]): A list containing the number of valid\n tokens (including BOS) for each prompt in the batch.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n audio_bos_value = self.config.bos_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n batch_size = len(audio_prompts)\n\n max_len = max(p.shape[0] if p is not None else 0 for p in audio_prompts) + max_delay_pattern\n prefill_steps = []\n\n prefill = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=self.device,\n )\n\n prefill[:, 0, :] = audio_bos_value\n\n for i in range(batch_size):\n prompt = audio_prompts[i]\n if prompt is not None:\n prompt = prompt.to(device=self.device, dtype=torch.int)\n prefill[i, 1 : prompt.shape[0] + 1, :] = prompt\n prefill_steps.append(prompt.shape[0] + 1)\n else:\n prefill_steps.append(1)\n\n delay_precomp = build_delay_indices(\n B=batch_size,\n T=max_len,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n delayed_batch = apply_audio_delay(\n audio_BxTxC=prefill,\n pad_value=-1,\n bos_value=audio_bos_value,\n precomp=delay_precomp,\n )\n\n return delayed_batch, prefill_steps\n\n def _prepare_generation(\n self,\n text: torch.Tensor,\n audio_prompts: list[torch.Tensor | None],\n max_tokens: int | None = None,\n attn_fn: Callable = F.scaled_dot_product_attention,\n ):\n \"\"\"Initializes the model state for generation.\n\n Encodes the text input (conditional and unconditional), prepares the\n encoder and decoder states (including KV caches and cross-attention),\n prepares the audio prompt, and performs the initial decoder prefill steps\n based on the audio prompts.\n\n Args:\n text: The padded text input tensor, shape [B, 1, T_text].\n audio_prompts: A list of prepared audio prompt tensors or None.\n\n Returns:\n A tuple containing:\n - dec_state (DecoderInferenceState): The initialized decoder state.\n - dec_output (DecoderOutput): The initialized decoder output manager,\n containing the prefilled audio tokens.\n \"\"\"\n batch_size = text.shape[0]\n\n enc_input_uncond = torch.zeros_like(text)\n enc_input_cond = text\n stacked_inputs = torch.stack([enc_input_uncond, enc_input_cond], dim=1)\n enc_input = stacked_inputs.view(2 * batch_size, -1)\n\n enc_state = EncoderInferenceState.new(self.config, enc_input_cond)\n encoder_out = self.model.encoder(enc_input, enc_state)\n\n dec_cross_attn_cache = self.model.decoder.precompute_cross_attn_cache(encoder_out)\n dec_state = DecoderInferenceState.new(\n self.config,\n enc_state,\n encoder_out,\n dec_cross_attn_cache,\n self.compute_dtype,\n max_generation_length=max_tokens,\n )\n prefill, prefill_steps = self._prepare_audio_prompt(audio_prompts)\n\n dec_output = DecoderOutput.new(batch_size, self.config, self.device)\n dec_output.prefill(prefill, prefill_steps)\n\n dec_step = min(prefill_steps) - 1\n if dec_step > 0:\n dec_state.prepare_step(0, dec_step)\n tokens_BxTxC = dec_output.get_tokens_at(0, dec_step).repeat_interleave(2, dim=0)\n self.model.decoder.forward(tokens_BxTxC, dec_state)\n\n return dec_state, dec_output\n\n def _decoder_step(\n self,\n tokens_Bx1xC: torch.Tensor,\n dec_state: DecoderInferenceState,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n top_k: int,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"Performs a single step of the decoder inference.\n\n Takes the tokens from the previous step, runs them through the decoder\n (for both conditional and unconditional paths), applies classifier-free\n guidance (CFG), samples the next token using temperature, top-p, and top-k\n sampling, and applies constraints (e.g., preventing EOS in certain channels).\n\n Args:\n tokens_Bx1xC: The input tokens for the current step, shape [2*B, 1, C].\n Repeated for CFG (unconditional and conditional).\n dec_state: The current state of the decoder (KV caches, etc.).\n cfg_scale: The scale factor for classifier-free guidance.\n temperature: The temperature for sampling.\n top_p: The cumulative probability threshold for top-p sampling.\n top_k: The number of top logits to consider for top-k sampling.\n current_idx: The current generation step index.\n\n Returns:\n torch.Tensor: The sampled next tokens for each item in the batch,\n shape [B, C].\n \"\"\"\n B = tokens_Bx1xC.shape[0] // 2\n\n audio_eos_value = self.config.eos_token_id\n logits_Bx1xCxV = self.model.decoder.decode_step(tokens_Bx1xC, dec_state, current_idx)\n\n logits_last_2BxCxV = logits_Bx1xCxV[:, -1]\n logits_last_Bx2xCxV = logits_last_2BxCxV.view(B, 2, *logits_last_2BxCxV.shape[1:])\n\n uncond_logits_BxCxV = logits_last_Bx2xCxV[:, 0, :, :] # Shape [B, C, V]\n cond_logits_BxCxV = logits_last_Bx2xCxV[:, 1, :, :] # Shape [B, C, V]\n logits_BxCxV = cond_logits_BxCxV + cfg_scale * (cond_logits_BxCxV - uncond_logits_BxCxV)\n\n _, top_k_indices_BxCxk = torch.topk(logits_BxCxV, k=top_k, dim=-1)\n mask_BxCxV = torch.ones_like(logits_BxCxV, dtype=torch.bool)\n mask_BxCxV = mask_BxCxV.scatter(dim=-1, index=top_k_indices_BxCxk, value=False)\n logits_BxCxV = cond_logits_BxCxV.masked_fill(mask_BxCxV, -torch.inf)\n\n logits_BxCxV[:, :, audio_eos_value + 1 :] = torch.full_like(\n logits_BxCxV[:, :, audio_eos_value + 1 :],\n fill_value=-torch.inf,\n )\n logits_BxCxV[:, 1:, audio_eos_value:] = torch.full_like(\n logits_BxCxV[:, 1:, audio_eos_value:],\n fill_value=-torch.inf,\n )\n\n flat_logits_BCxV = logits_BxCxV.view(B * self.config.decoder_config.num_channels, -1)\n\n pred_BC = _sample_next_token(\n flat_logits_BCxV.float(),\n temperature=temperature,\n top_p=top_p,\n top_k=top_k,\n audio_eos_value=audio_eos_value,\n )\n\n pred_BxC = pred_BC.view(B, self.config.decoder_config.num_channels)\n return pred_BxC\n\n def _generate_output(self, generated_codes: torch.Tensor, lengths_Bx: torch.Tensor) -> list[np.ndarray]:\n \"\"\"Converts generated delayed codes into audio waveforms.\n\n Reverts the delay pattern applied during generation, decodes the resulting\n codebook using the DAC model (if loaded), and returns a list of audio\n waveforms as NumPy arrays. If DAC is not loaded, returns the raw codebook indices.\n\n Args:\n generated_codes: The tensor of generated audio codes with delays,\n shape [B, T_gen, C].\n lengths_Bx: A tensor containing the valid length of generated codes\n (excluding padding and BOS/EOS markers) for each item\n in the batch, shape [B].\n\n Returns:\n A list of NumPy arrays, where each array represents the generated audio\n waveform for one item in the batch. If DAC is not loaded, returns the\n raw, reverted codebook indices as NumPy arrays.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n batch_size = generated_codes.shape[0]\n seq_length = generated_codes.shape[1]\n delay_pattern = self.config.delay_pattern\n audio_pad_value = self.config.pad_token_id\n max_delay_pattern = max(delay_pattern)\n\n revert_precomp = build_revert_indices(\n B=batch_size,\n T=seq_length,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n codebook = revert_audio_delay(\n audio_BxTxC=generated_codes,\n pad_value=audio_pad_value,\n precomp=revert_precomp,\n T=seq_length,\n )[:, :-max_delay_pattern, :]\n\n min_valid_index = 0\n max_valid_index = 1023\n invalid_mask = (codebook < min_valid_index) | (codebook > max_valid_index)\n codebook[invalid_mask] = 0\n\n audios = []\n\n if self.load_dac:\n for i in range(batch_size):\n audio = self._decode(codebook[i, : lengths_Bx[i], :])\n audio_np = audio.cpu().numpy()\n audios.append(audio_np)\n else:\n for i in range(batch_size):\n audios.append(codebook[i, : lengths_Bx[i], :].cpu().numpy())\n return audios\n\n @torch.no_grad()\n @torch.inference_mode()\n def _encode(self, audio: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Encodes the given audio waveform into a tensor of DAC codebook indices\n \"\"\"\n audio = audio.unsqueeze(0)\n audio_data = self.dac_model.preprocess(audio, DEFAULT_SAMPLE_RATE)\n _, encoded_frame, _, _, _ = self.dac_model.encode(audio_data)\n encoded_frame: torch.Tensor\n return encoded_frame.squeeze(0).transpose(0, 1)\n\n @torch.no_grad()\n @torch.inference_mode()\n def _decode(self, audio_codes: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Decodes the given frames into an output audio waveform\n \"\"\"\n audio_codes = audio_codes.unsqueeze(0).transpose(1, 2)\n audio_values, _, _ = self.dac_model.quantizer.from_codes(audio_codes)\n audio_values = self.dac_model.decode(audio_values)\n audio_values: torch.Tensor\n return audio_values.squeeze()\n\n def load_audio(self, audio_path: str) -> torch.Tensor:\n \"\"\"Loads and preprocesses an audio file for use as a prompt.\n\n Loads the audio file, resamples it to the target sample rate if necessary,\n preprocesses it using the DAC model's preprocessing, and encodes it into\n DAC codebook indices.\n\n Args:\n audio_path: Path to the audio file.\n\n Returns:\n torch.Tensor: The encoded audio prompt as DAC codebook indices,\n shape [T, C].\n\n Raises:\n RuntimeError: If the DAC model is not loaded (`load_dac=False` during init).\n FileNotFoundError: If the audio file cannot be found.\n Exception: If there's an error during loading or processing.\n \"\"\"\n if self.dac_model is None:\n raise RuntimeError(\"DAC model is required for loading audio prompts but was not loaded.\")\n audio, sr = torchaudio.load(audio_path, channels_first=True) # C, T\n if sr != DEFAULT_SAMPLE_RATE:\n audio = torchaudio.functional.resample(audio, sr, DEFAULT_SAMPLE_RATE)\n # Convert to mono if stereo\n if audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True) # Average channels to get mono\n return self._encode(audio.to(self.device))\n\n def save_audio(self, path: str, audio: np.ndarray):\n \"\"\"Saves the generated audio waveform to a file.\n\n Uses the soundfile library to write the NumPy audio array to the specified\n path with the default sample rate.\n\n Args:\n path: The path where the audio file will be saved.\n audio: The audio waveform as a NumPy array.\n \"\"\"\n import soundfile as sf\n\n sf.write(path, audio, DEFAULT_SAMPLE_RATE)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str | list[str],\n max_tokens: int = 3072,\n cfg_scale: float = 3.0,\n temperature: float = 1.2,\n top_p: float = 0.95,\n use_torch_compile: bool = False,\n cfg_filter_top_k: int = 45,\n audio_prompt: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n audio_prompt_path: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n use_cfg_filter: bool | None = None,\n verbose: bool = False,\n ) -> np.ndarray | list[np.ndarray]:\n \"\"\"Generates audio corresponding to the input text.\n\n Args:\n text: The input text prompt, or a list of text prompts for batch generation.\n max_tokens: The maximum number of audio tokens to generate per prompt.\n Defaults to the model's configured audio length if None.\n cfg_scale: The scale factor for classifier-free guidance (CFG). Higher values\n lead to stronger guidance towards the text prompt.\n temperature: The temperature for sampling. Higher values increase randomness.\n top_p: The cumulative probability threshold for nucleus (top-p) sampling.\n use_torch_compile: Whether to compile the generation steps using torch.compile.\n Can significantly speed up generation after the initial\n compilation overhead. Defaults to False.\n cfg_filter_top_k: The number of top logits to consider during CFG filtering.\n (Note: This parameter name might be slightly misleading based\n on the code; it's used in the `_sample_next_token` function.)\n audio_prompt: An audio prompt or list of prompts to condition the generation.\n Can be a file path (str), a pre-loaded tensor (DAC codes), or None.\n If a list, its length must match the batch size of the text input.\n audio_prompt_path: (Deprecated) Use `audio_prompt` instead.\n use_cfg_filter: (Deprecated) This parameter is no longer used.\n verbose: If True, prints progress information during generation, including\n speed metrics.\n\n Returns:\n If a single text prompt was provided, returns a NumPy array containing the\n generated audio waveform.\n If a list of text prompts was provided, returns a list of NumPy arrays,\n each corresponding to a prompt in the input list. Returns None for a\n sequence if no audio was generated for it.\n \"\"\"\n batch_size = len(text) if isinstance(text, list) else 1\n audio_eos_value = self.config.eos_token_id\n audio_pad_value = self.config.pad_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n delay_pattern_Cx = torch.tensor(delay_pattern, device=self.device, dtype=torch.long)\n self.model.eval()\n\n if audio_prompt_path:\n print(\"Warning: audio_prompt_path is deprecated. Use audio_prompt instead.\")\n audio_prompt = audio_prompt_path\n if use_cfg_filter is not None:\n print(\"Warning: use_cfg_filter is deprecated.\")\n\n if verbose:\n total_start_time = time.time()\n\n if use_torch_compile and not hasattr(self, \"_compiled\"):\n # Compilation can take about a minute.\n self._prepare_generation = torch.compile(self._prepare_generation, dynamic=True, fullgraph=True)\n self._decoder_step = torch.compile(self._decoder_step, fullgraph=True, mode=\"max-autotune\")\n self._compiled = True\n\n if isinstance(audio_prompt, list):\n audio_prompt = [self.load_audio(p) if isinstance(p, str) else p for p in audio_prompt]\n elif isinstance(audio_prompt, str):\n audio_prompt = [self.load_audio(audio_prompt)]\n elif isinstance(audio_prompt, torch.Tensor):\n audio_prompt = [audio_prompt]\n elif audio_prompt is None:\n audio_prompt = [None] * batch_size\n\n assert len(audio_prompt) == batch_size, \"Number of audio prompts must match batch size\"\n\n if isinstance(text, list):\n text = [self._encode_text(t) for t in text]\n else:\n text = [self._encode_text(text)]\n text = self._pad_text_input(text)\n\n dec_state, dec_output = self._prepare_generation(text, audio_prompt, max_tokens=max_tokens)\n dec_step = min(dec_output.prefill_steps) - 1\n current_idx = torch.tensor([dec_step], device=self.device)\n\n eos_detected_Bx = torch.zeros((batch_size,), dtype=torch.bool, device=self.device)\n eos_countdown_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n finished_step_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n\n bos_over = False\n\n if verbose:\n print(\"generate: starting generation loop\")\n if use_torch_compile:\n print(\"generate: using use_torch_compile=True, the first step may be slow\")\n start_time = time.time()\n\n # --- Generation Loop ---\n while dec_step < max_tokens:\n if (eos_countdown_Bx == 0).all():\n break\n\n current_step_idx = dec_step + 1\n torch.compiler.cudagraph_mark_step_begin()\n dec_state.prepare_step(dec_step)\n tokens_Bx1xC = dec_output.get_tokens_at(dec_step).repeat_interleave(2, dim=0) # Repeat for CFG\n\n pred_BxC = self._decoder_step(\n tokens_Bx1xC,\n dec_state,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n current_idx,\n )\n\n current_idx += 1\n\n active_mask_Bx = eos_countdown_Bx != 0\n eos_trigger_Bx = torch.zeros_like(active_mask_Bx)\n if active_mask_Bx.any():\n is_eos_token = (~eos_detected_Bx[active_mask_Bx]) & (pred_BxC[active_mask_Bx, 0] == audio_eos_value)\n is_max_len = current_step_idx >= max_tokens - max_delay_pattern\n eos_trigger_Bx[active_mask_Bx] = is_eos_token | is_max_len\n eos_detected_Bx |= eos_trigger_Bx\n start_countdown_mask_Bx = eos_trigger_Bx & (eos_countdown_Bx < 0)\n if start_countdown_mask_Bx.any():\n eos_countdown_Bx[start_countdown_mask_Bx] = max_delay_pattern\n finished_step_Bx[start_countdown_mask_Bx] = current_step_idx\n\n padding_mask_Bx = eos_countdown_Bx > 0\n if padding_mask_Bx.any():\n pred_active_BxC = pred_BxC[padding_mask_Bx].clone()\n countdown_active_Bx = eos_countdown_Bx[padding_mask_Bx]\n step_after_eos_Bx = max_delay_pattern - countdown_active_Bx\n step_after_eos_Bx_ = step_after_eos_Bx.unsqueeze(1)\n delay_pattern_Cx_ = delay_pattern_Cx.unsqueeze(0)\n eos_mask_NxC = step_after_eos_Bx_ == delay_pattern_Cx_\n pad_mask_NxC = step_after_eos_Bx_ > delay_pattern_Cx_\n pred_active_BxC[eos_mask_NxC] = audio_eos_value\n pred_active_BxC[pad_mask_NxC] = audio_pad_value\n pred_BxC[padding_mask_Bx] = pred_active_BxC\n eos_countdown_Bx[padding_mask_Bx] -= 1\n\n # --- Update BOS flag (Original) ---\n if not bos_over:\n bos_over = all(\n dec_step - prefill_step > max_delay_pattern for prefill_step in dec_output.prefill_steps\n )\n\n dec_output.update_one(pred_BxC, current_step_idx, not bos_over)\n\n dec_step += 1\n\n if verbose and dec_step % 86 == 0:\n duration = time.time() - start_time\n if duration > 0:\n print(\n f\"generate step {dec_step}: speed={86 * batch_size / duration:.3f} tokens/s, realtime factor={batch_size / duration:.3f}x\"\n )\n start_time = time.time()\n\n # --- Finalize and Extract Output ---\n final_step = dec_step + 1\n\n finished_step_Bx[finished_step_Bx == -1] = final_step - max_delay_pattern\n\n prefill_steps_tensor = torch.tensor(dec_output.prefill_steps, device=self.device)\n lengths_Bx = finished_step_Bx - prefill_steps_tensor\n lengths_Bx = torch.clamp(lengths_Bx, min=0)\n\n max_len = lengths_Bx.max().item() + max_delay_pattern\n outputs = []\n\n if max_len > 0:\n num_channels = self.config.decoder_config.num_channels\n audio_pad_value = self.config.pad_token_id\n generated_codes = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=audio_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n\n for i in range(batch_size):\n start_step = dec_output.prefill_steps[i]\n actual_len = lengths_Bx[i].item() + max_delay_pattern\n if actual_len > 0:\n tokens_to_copy = dec_output.generated_tokens[i, start_step : start_step + actual_len, :]\n generated_codes[i, :actual_len, :] = tokens_to_copy\n\n if verbose:\n avg_steps = lengths_Bx.float().mean().item()\n total_duration = time.time() - total_start_time\n print(f\"generate: avg steps={avg_steps:.1f}, total duration={total_duration:.3f}s\")\n\n del dec_state\n\n outputs = self._generate_output(generated_codes, lengths_Bx)\n else:\n print(\"Warning: Nothing generated for any sequence in the batch.\")\n outputs = [None] * batch_size\n\n return outputs if batch_size > 1 else outputs[0]\n"], ["/dia/dia/layers.py", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torch import Tensor\nfrom torch.nn import RMSNorm\n\nfrom .config import DecoderConfig, DiaConfig, EncoderConfig\nfrom .state import DecoderInferenceState, EncoderInferenceState, KVCache\n\n\ndef _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:\n return tuple(ax if ax >= 0 else ndim + ax for ax in axes)\n\n\nclass DenseGeneral(nn.Module):\n \"\"\"\n PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init.\n Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot\n for the generalized matrix multiplication. Weight/bias shapes are calculated\n and parameters created during initialization based on config.\n `load_weights` validates shapes and copies data.\n Attributes:\n axis (Tuple[int, ...]): Input axis or axes to contract.\n in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`.\n out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims).\n use_bias (bool): Whether to add a bias term.\n weight (nn.Parameter): The kernel parameter.\n bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True).\n \"\"\"\n\n def __init__(\n self,\n in_shapes: tuple[int, ...],\n out_features: tuple[int, ...],\n axis: tuple[int, ...] = (-1,),\n weight_dtype: torch.dtype | None = None,\n device: torch.device | None = None,\n ):\n super().__init__()\n self.in_shapes = in_shapes\n self.out_features = out_features\n self.axis = axis\n self.kernel_shape = self.in_shapes + self.out_features\n\n factory_kwargs = {\"device\": device, \"dtype\": weight_dtype}\n self.weight = nn.Parameter(torch.empty(self.kernel_shape, **factory_kwargs))\n\n def forward(self, inputs: Tensor) -> Tensor:\n norm_axis = _normalize_axes(self.axis, inputs.ndim)\n kernel_contract_axes = tuple(range(len(norm_axis)))\n\n output = torch.tensordot(\n inputs.to(self.weight.dtype),\n self.weight,\n dims=(norm_axis, kernel_contract_axes),\n ).to(inputs.dtype)\n return output\n\n\nclass MlpBlock(nn.Module):\n \"\"\"MLP block using DenseGeneral.\"\"\"\n\n def __init__(self, embed_dim: int, intermediate_dim: int, compute_dtype: torch.dtype):\n super().__init__()\n self.dtype = compute_dtype\n\n self.wi_fused = DenseGeneral(\n in_shapes=(embed_dim,),\n out_features=(2, intermediate_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n self.wo = DenseGeneral(\n in_shapes=(intermediate_dim,),\n out_features=(embed_dim,),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass.\"\"\"\n fused_x = self.wi_fused(x)\n\n gate = fused_x[..., 0, :]\n up = fused_x[..., 1, :]\n\n hidden = torch.mul(F.silu(gate), up).to(self.dtype)\n\n output = self.wo(hidden)\n return output\n\n\nclass RotaryEmbedding(nn.Module):\n \"\"\"Rotary Position Embedding (RoPE) implementation in PyTorch.\"\"\"\n\n def __init__(\n self,\n embedding_dims: int,\n min_timescale: float = 1.0,\n max_timescale: float = 10000.0,\n dtype: torch.dtype = torch.float32,\n ):\n super().__init__()\n if embedding_dims % 2 != 0:\n raise ValueError(\"Embedding dim must be even for RoPE.\")\n self.embedding_dims = embedding_dims\n self.min_timescale = min_timescale\n self.max_timescale = max_timescale\n self.compute_dtype = dtype\n\n half_embedding_dim = embedding_dims // 2\n fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims\n timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32)\n self.register_buffer(\"timescale\", timescale, persistent=False)\n\n def forward(self, inputs: torch.Tensor, position: torch.Tensor):\n \"\"\"Applies RoPE.\"\"\"\n position = position.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat(\n (first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)),\n dim=-1,\n )\n\n def apply_rope(self, inputs: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor):\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat((first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)), dim=-1)\n\n\ndef custom_scaled_dot_product_attention(\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n attn_mask: torch.Tensor | None = None,\n scale: float = 1.0,\n is_causal: bool = False,\n num_gqa_groups: int = 1,\n) -> torch.Tensor:\n \"\"\"\n Custom scaled dot-product attention with GQA support for MPS compatibility.\n\n Args:\n query: (B, N_q, T, H) - Query tensor, N_q = num_query_heads\n key: (B, N_kv, S, H) - Key tensor, N_kv = num_kv_heads\n value: (B, N_kv, S, H) - Value tensor\n attn_mask: (B, 1, T, S) - Attention mask, optional\n scale: Scaling factor for attention scores\n is_causal: If True, apply causal masking\n num_gqa_groups: Number of query groups per KV head (N_q / N_kv)\n\n Returns:\n output: (B, N_q, T, H) - Attention output\n \"\"\"\n B, N_q, T, H = query.shape\n _, N_kv, S, _ = key.shape\n\n # For GQA, repeat key and value tensors to match query heads\n if num_gqa_groups > 1:\n key = key.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n value = value.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n\n # Compute attention scores: (B, N_q, T, H) @ (B, N_q, H, S) -> (B, N_q, T, S)\n scores = torch.matmul(query, key.transpose(-1, -2)) * scale\n\n # Apply causal mask if needed\n if is_causal:\n causal_mask = torch.tril(torch.ones(T, S, dtype=torch.bool, device=query.device))\n scores = scores.masked_fill(~causal_mask, float(\"-inf\"))\n\n # Apply attention mask if provided\n if attn_mask is not None:\n scores = scores.masked_fill(~attn_mask, float(\"-inf\"))\n\n # Softmax over the last dimension (S)\n attn_weights = F.softmax(scores, dim=-1)\n\n # Compute output: (B, N_q, T, S) @ (B, N_q, S, H) -> (B, N_q, T, H)\n output = torch.matmul(attn_weights, value)\n\n return output\n\n\nclass CrossAttention(nn.Module):\n \"\"\"Cross-Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n def forward(\n self,\n Xq: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n is_causal: bool = False,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n original_dtype = Xq.dtype\n\n Xq_BxTxNxH = self.q_proj(Xq)\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xq.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass FusedQKV(nn.Module):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = False,\n num_q_heads: int = 1,\n q_head_dim: int = 1,\n num_kv_heads: int = 1,\n kv_head_dim: int = 1,\n ):\n super().__init__()\n self.num_q_heads = num_q_heads\n self.q_head_dim = q_head_dim\n self.num_kv_heads = num_kv_heads\n self.kv_head_dim = kv_head_dim\n self.q_output_dim = num_q_heads * q_head_dim\n self.kv_output_dim = num_kv_heads * kv_head_dim\n self.linear = nn.Linear(in_features, out_features, bias=bias)\n\n def forward(self, inputs: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n x = self.linear(inputs)\n\n q, k, v = x.split([self.q_output_dim, self.kv_output_dim, self.kv_output_dim], dim=-1)\n\n q = q.reshape(q.shape[:-1] + (self.num_q_heads, self.q_head_dim))\n k = k.reshape(k.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n v = v.reshape(v.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n\n return q, k, v\n\n\nclass SelfAttention(nn.Module):\n \"\"\"Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n self.kv_embed_dim = kv_embed_dim\n self.q_embed_dim = q_embed_dim\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n self.is_fused_qkv = False\n\n def get_linear_weight(self, dense: DenseGeneral):\n W_dg = dense.weight.data\n\n out_features = 1\n input_features = 1\n for dim in dense.out_features:\n out_features *= dim\n for dim in dense.in_shapes:\n input_features *= dim\n\n W_dg_reshaped_for_linear_T = W_dg.reshape(input_features, out_features)\n linear_weight = W_dg_reshaped_for_linear_T.transpose(0, 1).contiguous()\n return linear_weight\n\n def patch_fused_qkv(self):\n q_proj_weight = self.get_linear_weight(self.q_proj)\n k_proj_weight = self.get_linear_weight(self.k_proj)\n v_proj_weight = self.get_linear_weight(self.v_proj)\n\n self.qkv = FusedQKV(\n self.kv_embed_dim,\n (self.num_query_heads * self.head_dim + 2 * (self.num_kv_heads * self.head_dim)),\n bias=False,\n num_q_heads=self.num_query_heads,\n q_head_dim=self.head_dim,\n num_kv_heads=self.num_kv_heads,\n kv_head_dim=self.head_dim,\n )\n self.qkv.linear.weight.data = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=0)\n\n # print(f\"qkv.weight.shape: {self.qkv.linear.weight.shape}\")\n self.is_fused_qkv = True\n\n def forward(\n self,\n X: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n prefill: bool = False,\n is_causal: bool = False,\n current_idx: torch.Tensor | None = None,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n prefill: If True, use prefill mode.\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n\n original_dtype = X.dtype\n\n if self.is_fused_qkv:\n Xq_BxTxNxH, Xk_BxSxKxH, Xv_BxSxKxH = self.qkv(X)\n else:\n Xq_BxTxNxH = self.q_proj(X)\n Xk_BxSxKxH = self.k_proj(X)\n Xv_BxSxKxH = self.v_proj(X)\n\n position = q_positions.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.rotary_emb.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n\n Xq_BxTxNxH = self.rotary_emb.apply_rope(Xq_BxTxNxH, sin, cos)\n Xk_BxSxKxH = self.rotary_emb.apply_rope(Xk_BxSxKxH, sin, cos)\n\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n\n if cache is None:\n attn_k = Xk_BxKxSxH\n attn_v = Xv_BxKxSxH\n elif prefill:\n attn_k, attn_v = Xk_BxKxSxH, Xv_BxKxSxH\n cache.prefill(attn_k, attn_v)\n else:\n attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH, current_idx)\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xv_BxSxKxH.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass EncoderLayer(nn.Module):\n \"\"\"Transformer Encoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n self.pre_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.self_attention = SelfAttention(\n enc_config,\n q_embed_dim=embed_dim,\n kv_embed_dim=embed_dim,\n num_query_heads=enc_config.num_attention_heads,\n num_kv_heads=enc_config.num_key_value_heads,\n head_dim=enc_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=embed_dim,\n )\n self.post_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.mlp = MlpBlock(\n embed_dim=embed_dim,\n intermediate_dim=enc_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n sa_out = self.self_attention(\n X=x_norm,\n q_positions=state.positions,\n kv_positions=state.positions,\n attn_mask=state.attn_mask,\n )\n x = residual + sa_out\n\n residual = x\n x_norm = self.post_sa_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Encoder(nn.Module):\n \"\"\"Transformer Encoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n self.compute_dtype = compute_dtype\n\n self.embedding = nn.Embedding(\n enc_config.vocab_size,\n enc_config.hidden_size,\n dtype=compute_dtype,\n )\n self.layers = nn.ModuleList([EncoderLayer(config, compute_dtype) for _ in range(enc_config.num_hidden_layers)])\n self.norm = RMSNorm(\n enc_config.hidden_size,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n\n def forward(\n self,\n x_ids: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n x = self.embedding(x_ids)\n\n for layer in self.layers:\n x = layer(x, state)\n\n x = self.norm(x).to(self.compute_dtype)\n return x\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"Transformer Decoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n enc_config = config.encoder_config\n dec_embed_dim = dec_config.hidden_size\n enc_embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n # Norms\n self.pre_sa_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_ca_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_mlp_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n # Self-Attention (GQA) with Causal Masking\n self.self_attention = SelfAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=dec_embed_dim,\n num_query_heads=dec_config.num_attention_heads,\n num_kv_heads=dec_config.num_key_value_heads,\n head_dim=dec_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # Cross-Attention (MHA)\n self.cross_attention = CrossAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=enc_embed_dim, # Note kv_embed_dim\n num_query_heads=dec_config.cross_num_attention_heads,\n num_kv_heads=dec_config.cross_num_key_value_heads,\n head_dim=dec_config.cross_head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # MLP\n self.mlp = MlpBlock(\n embed_dim=dec_embed_dim,\n intermediate_dim=dec_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: DecoderInferenceState,\n self_attn_cache: KVCache | None = None,\n cross_attn_cache: KVCache | None = None,\n prefill: bool = False,\n current_idx: int = 0,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n self_attn_mask = state.casual_attn_mask[None, None, current_idx]\n\n sa_out = self.self_attention(\n X=x_norm, # (2, 1, D)\n q_positions=state.dec_positions, # (2, 1)\n kv_positions=state.dec_positions, # (2, 1)\n attn_mask=self_attn_mask,\n cache=self_attn_cache,\n prefill=prefill,\n is_causal=prefill,\n current_idx=current_idx,\n )\n\n x = residual + sa_out\n\n residual = x\n x_norm = self.pre_ca_norm(x).to(self.compute_dtype)\n ca_out = self.cross_attention(\n Xq=x_norm,\n q_positions=state.dec_positions,\n kv_positions=state.enc_positions,\n attn_mask=state.cross_attn_mask,\n cache=cross_attn_cache,\n )\n x = residual + ca_out\n\n residual = x\n x_norm = self.pre_mlp_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Decoder(nn.Module):\n \"\"\"Transformer Decoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n self.num_channels = dec_config.num_channels\n self.num_layers = dec_config.num_hidden_layers\n\n self.embeddings = nn.ModuleList(\n [\n nn.Embedding(dec_config.vocab_size, dec_config.hidden_size, dtype=compute_dtype)\n for _ in range(self.num_channels)\n ]\n )\n self.layers = nn.ModuleList(\n [DecoderLayer(config=config, compute_dtype=compute_dtype) for _ in range(self.num_layers)]\n )\n\n self.norm = RMSNorm(\n dec_config.hidden_size,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n self.logits_dense = DenseGeneral(\n in_shapes=(dec_config.hidden_size,),\n out_features=(self.num_channels, dec_config.vocab_size),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def precompute_cross_attn_cache(\n self,\n enc_out: torch.Tensor, # (B, S, E)\n ) -> list[KVCache]:\n \"\"\"\n Computes the Key and Value tensors for cross-attention for each layer from the encoder output.\n \"\"\"\n per_layer_kv_cache: list[KVCache] = []\n\n for layer in self.layers:\n cross_attn_module = layer.cross_attention\n k_proj = cross_attn_module.k_proj(enc_out)\n v_proj = cross_attn_module.v_proj(enc_out)\n\n k = k_proj.transpose(1, 2)\n v = v_proj.transpose(1, 2)\n\n per_layer_kv_cache.append(KVCache.from_kv(k, v))\n\n return per_layer_kv_cache\n\n def decode_step(\n self,\n tgt_ids_Bx1xC: torch.Tensor, # [B, 1, C]\n state: DecoderInferenceState,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"\n Performs a single decoding step, managing KV caches layer by layer.\n Returns:\n A tuple containing:\n - logits_Bx1xCV: The final output logits for the current step (B, 1, C*V), cast to float32.\n \"\"\"\n\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_Bx1xC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x, # (2, 1, D)\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n current_idx=current_idx,\n )\n\n x = self.norm(x)\n logits_Bx1xCxV = self.logits_dense(x)\n\n return logits_Bx1xCxV.to(torch.float32)\n\n def forward(self, tgt_ids_BxTxC: torch.Tensor, state: DecoderInferenceState) -> torch.Tensor:\n \"\"\"\n Forward pass for the Decoder stack, managing KV caches.\n Args:\n tgt_ids_BxTxC: Target token IDs (B, T, C).\n encoder_out: Output from the encoder (B, S, E).\n tgt_positions: Positions for target sequence (B, T).\n src_positions: Positions for source sequence (B, S).\n self_attn_mask: Mask for self-attention.\n cross_attn_mask: Mask for cross-attention.\n past_key_values: List containing the self-attention KV cache for each layer\n from the previous decoding step. `len(past_key_values)` should\n equal `num_layers`.\n precomputed_cross_attn_kv: A single tuple containing the pre-computed K/V cache\n derived from `encoder_out`. This is passed identically\n to all layers.\n Returns:\n A tuple containing:\n - logits: The final output logits (B, T, C * V), cast to float32.\n - present_key_values: A list containing the updated self-attention KV cache\n for each layer for the *current* decoding step.\n \"\"\"\n _, _, num_channels_in = tgt_ids_BxTxC.shape\n assert num_channels_in == self.num_channels, \"Input channels mismatch\"\n\n # Embeddings\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_BxTxC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x,\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n prefill=True,\n )\n\n # Final Norm\n x = self.norm(x)\n logits_BxTxCxV = self.logits_dense(x)\n\n return logits_BxTxCxV.to(torch.float32)\n\n\nclass DiaModel(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/nari-labs/dia\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n coders={\n DiaConfig: (\n lambda x: x.model_dump(),\n lambda data: DiaConfig.model_validate(data),\n ),\n },\n):\n \"\"\"PyTorch Dia Model using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n self.encoder = Encoder(config, compute_dtype)\n self.decoder = Decoder(config, compute_dtype)\n"], ["/dia/dia/state.py", "from dataclasses import dataclass\nfrom typing import Optional\n\nimport torch\n\nfrom .config import DiaConfig\n\n\ndef create_attn_mask(\n q_padding_mask_1d: torch.Tensor,\n k_padding_mask_1d: torch.Tensor,\n device: torch.device,\n is_causal: bool = False,\n) -> torch.Tensor:\n \"\"\"\n Creates the attention mask (self or cross) mimicking JAX segment ID logic.\n \"\"\"\n # B1, Tq = q_padding_mask_1d.shape\n # B2, Tk = k_padding_mask_1d.shape\n\n p_mask_q = q_padding_mask_1d.unsqueeze(2) # Shape [B, Tq, 1]\n p_mask_k = k_padding_mask_1d.unsqueeze(1) # Shape [B, 1, Tk]\n\n # Condition A: Non-padding query attends to non-padding key\n non_pad_attends_non_pad = p_mask_q & p_mask_k # Shape [B, Tq, Tk]\n\n # Condition B: Padding query attends to padding key\n pad_attends_pad = (~p_mask_q) & (~p_mask_k) # Shape [B, Tq, Tk]\n\n # Combine: True if padding status is compatible (both non-pad OR both pad)\n mask = non_pad_attends_non_pad | pad_attends_pad # Shape [B, Tq, Tk]\n\n if is_causal:\n # assert Tq == Tk, \"Causal mask requires query and key sequence lengths to be equal\"\n causal_mask_2d = torch.tril(torch.ones_like(mask[0], dtype=torch.bool, device=device)) # Shape [B, Tq, Tk]\n causal_mask = mask & causal_mask_2d # Shape [B, Tq, Tk]\n return causal_mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n else:\n return mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n\n\n@dataclass\nclass EncoderInferenceState:\n \"\"\"Parameters specifically for encoder inference.\"\"\"\n\n max_seq_len: int\n device: torch.device\n positions: torch.Tensor\n padding_mask: torch.Tensor\n attn_mask: torch.Tensor\n\n @classmethod\n def new(cls, config: DiaConfig, cond_src: torch.Tensor) -> \"EncoderInferenceState\":\n \"\"\"Creates EtorchrInferenceParams from DiaConfig and a device.\"\"\"\n device = cond_src.device\n\n positions = torch.arange(\n config.encoder_config.max_position_embeddings, dtype=torch.float32, device=device\n ).unsqueeze(0)\n padding_mask = (cond_src.squeeze(1) != 0).to(device).repeat_interleave(2, dim=0)\n attn_mask = create_attn_mask(padding_mask, padding_mask, device, is_causal=False)\n\n return cls(\n max_seq_len=config.encoder_config.max_position_embeddings,\n device=device,\n positions=positions,\n padding_mask=padding_mask,\n attn_mask=attn_mask,\n )\n\n\nclass KVCache(torch.nn.Module):\n k: torch.Tensor\n v: torch.Tensor\n\n def __init__(\n self,\n batch_size: int,\n num_heads: int,\n max_len: int,\n head_dim: int,\n dtype: torch.dtype,\n device: torch.device,\n k: torch.Tensor | None = None,\n v: torch.Tensor | None = None,\n ):\n k = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if k is None else k\n v = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if v is None else v\n super().__init__()\n\n self.register_buffer(\"k\", k)\n self.register_buffer(\"v\", v)\n\n @classmethod\n def from_kv(cls, k: torch.Tensor, v: torch.Tensor) -> \"KVCache\":\n return cls(\n batch_size=k.shape[0] // 2,\n num_heads=k.shape[1],\n max_len=k.shape[2],\n head_dim=k.shape[3],\n dtype=k.dtype,\n device=k.device,\n k=k,\n v=v,\n )\n\n def update(self, k: torch.Tensor, v: torch.Tensor, current_idx: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n k_out, v_out = self.k, self.v\n k_out[:, :, current_idx, :] = k\n v_out[:, :, current_idx, :] = v\n return self.k, self.v\n\n def prefill(self, k: torch.Tensor, v: torch.Tensor):\n prefill_len = k.shape[2]\n self.k[:, :, :prefill_len, :] = k\n self.v[:, :, :prefill_len, :] = v\n\n\n@dataclass\nclass DecoderInferenceState:\n \"\"\"Parameters specifically for decoder inference.\"\"\"\n\n device: torch.device\n dtype: torch.dtype\n enc_out: torch.Tensor\n enc_positions: torch.Tensor\n dec_positions: torch.Tensor\n self_attn_cache: list[KVCache]\n cross_attn_cache: list[KVCache]\n casual_attn_mask: torch.Tensor\n cross_attn_mask: torch.Tensor\n\n @classmethod\n def new(\n cls,\n config: DiaConfig,\n enc_state: EncoderInferenceState,\n enc_out: torch.Tensor,\n dec_cross_attn_cache: list[KVCache],\n compute_dtype: torch.dtype,\n max_generation_length: Optional[int] = None,\n ) -> \"DecoderInferenceState\":\n \"\"\"Creates DecoderInferenceParams from DiaConfig and a device.\"\"\"\n device = enc_out.device\n max_audio_len = max_generation_length or config.decoder_config.max_position_embeddings\n batch_size = enc_out.shape[0] // 2\n\n dec_positions = torch.full((2 * batch_size, 1), fill_value=0, dtype=torch.int32, device=device)\n causal_mask = torch.tril(torch.ones(max_audio_len, max_audio_len, dtype=torch.bool, device=device))\n dec_mask = torch.ones((2 * batch_size, 1), dtype=torch.bool, device=device)\n cross_attn_mask = create_attn_mask(dec_mask, enc_state.padding_mask, device, is_causal=False)\n\n self_attn_cache = [\n KVCache(\n batch_size,\n config.decoder_config.num_key_value_heads,\n max_audio_len,\n config.decoder_config.head_dim,\n compute_dtype,\n device,\n )\n for _ in range(config.decoder_config.num_hidden_layers)\n ]\n\n return cls(\n device=device,\n dtype=compute_dtype,\n enc_out=enc_out,\n enc_positions=enc_state.positions,\n dec_positions=dec_positions,\n self_attn_cache=self_attn_cache,\n cross_attn_cache=dec_cross_attn_cache,\n casual_attn_mask=causal_mask,\n cross_attn_mask=cross_attn_mask,\n )\n\n def prepare_step(self, step_from: int, step_to: int | None = None) -> None:\n if step_to is None:\n step_to = step_from + 1\n self.dec_positions = torch.arange(step_from, step_to, dtype=torch.int32, device=self.device).unsqueeze(0)\n\n\n@dataclass\nclass DecoderOutput:\n generated_tokens: torch.Tensor\n prefill_steps: list[int]\n\n @classmethod\n def new(cls, batch_size: int, config: DiaConfig, device: torch.device) -> \"DecoderOutput\":\n max_audio_len = config.decoder_config.max_position_embeddings\n return cls(\n generated_tokens=torch.full(\n (batch_size, max_audio_len, config.decoder_config.num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=device,\n ),\n prefill_steps=[],\n )\n\n def get_tokens_at(self, step_from: int, step_to: int | None = None) -> torch.Tensor:\n if step_to is None:\n step_to = step_from + 1\n return self.generated_tokens[:, step_from:step_to, :]\n\n def update_one(self, dec_out: torch.Tensor, step: int, apply_mask: bool = False):\n dec_out = dec_out.to(self.generated_tokens.dtype)\n if apply_mask:\n mask = self.generated_tokens[:, step, :] == -1\n self.generated_tokens[:, step, :] = torch.where(mask, dec_out, self.generated_tokens[:, step, :])\n else:\n self.generated_tokens[:, step, :] = dec_out\n\n def prefill(self, dec_out: torch.Tensor, prefill_steps: list[int]):\n length = dec_out.shape[1]\n self.generated_tokens[:, :length, :] = dec_out\n self.prefill_steps = prefill_steps\n"], ["/dia/app.py", "import argparse\nimport contextlib\nimport io\nimport random\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport gradio as gr\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\n# --- Global Setup ---\nparser = argparse.ArgumentParser(description=\"Gradio interface for Nari TTS\")\nparser.add_argument(\"--device\", type=str, default=None, help=\"Force device (e.g., 'cuda', 'mps', 'cpu')\")\nparser.add_argument(\"--share\", action=\"store_true\", help=\"Enable Gradio sharing\")\n\nargs = parser.parse_args()\n\n\n# Determine device\nif args.device:\n device = torch.device(args.device)\nelif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n# Simplified MPS check for broader compatibility\nelif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n # Basic check is usually sufficient, detailed check can be problematic\n device = torch.device(\"mps\")\nelse:\n device = torch.device(\"cpu\")\n\nprint(f\"Using device: {device}\")\n\n# Load Nari model and config\nprint(\"Loading Nari model...\")\ntry:\n dtype_map = {\n \"cpu\": \"float32\",\n \"mps\": \"float32\", # Apple M series – better with float32\n \"cuda\": \"float16\", # NVIDIA – better with float16\n }\n\n dtype = dtype_map.get(device.type, \"float16\")\n print(f\"Using device: {device}, attempting to load model with {dtype}\")\n model = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=dtype, device=device)\nexcept Exception as e:\n print(f\"Error loading Nari model: {e}\")\n raise\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef run_inference(\n text_input: str,\n audio_prompt_text_input: str,\n audio_prompt_input: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: Optional[int] = None,\n):\n \"\"\"\n Runs Nari inference using the globally loaded model and provided inputs.\n Uses temporary files for text and audio prompt compatibility with inference.generate.\n \"\"\"\n global model, device # Access global model, config, device\n console_output_buffer = io.StringIO()\n\n with contextlib.redirect_stdout(console_output_buffer):\n # Prepend transcript text if audio_prompt provided\n if audio_prompt_input and audio_prompt_text_input and not audio_prompt_text_input.isspace():\n text_input = audio_prompt_text_input + \"\\n\" + text_input\n text_input = text_input.strip()\n\n if audio_prompt_input and (not audio_prompt_text_input or audio_prompt_text_input.isspace()):\n raise gr.Error(\"Audio Prompt Text input cannot be empty.\")\n\n if not text_input or text_input.isspace():\n raise gr.Error(\"Text input cannot be empty.\")\n\n # Preprocess Audio\n temp_txt_file_path = None\n temp_audio_prompt_path = None\n output_audio = (44100, np.zeros(1, dtype=np.float32))\n\n try:\n prompt_path_for_generate = None\n if audio_prompt_input is not None:\n sr, audio_data = audio_prompt_input\n # Check if audio_data is valid\n if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: # Check for silence/empty\n gr.Warning(\"Audio prompt seems empty or silent, ignoring prompt.\")\n else:\n # Save prompt audio to a temporary WAV file\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".wav\", delete=False) as f_audio:\n temp_audio_prompt_path = f_audio.name # Store path for cleanup\n\n # Basic audio preprocessing for consistency\n # Convert to float32 in [-1, 1] range if integer type\n if np.issubdtype(audio_data.dtype, np.integer):\n max_val = np.iinfo(audio_data.dtype).max\n audio_data = audio_data.astype(np.float32) / max_val\n elif not np.issubdtype(audio_data.dtype, np.floating):\n gr.Warning(f\"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.\")\n # Attempt conversion, might fail for complex types\n try:\n audio_data = audio_data.astype(np.float32)\n except Exception as conv_e:\n raise gr.Error(f\"Failed to convert audio prompt to float32: {conv_e}\")\n\n # Ensure mono (average channels if stereo)\n if audio_data.ndim > 1:\n if audio_data.shape[0] == 2: # Assume (2, N)\n audio_data = np.mean(audio_data, axis=0)\n elif audio_data.shape[1] == 2: # Assume (N, 2)\n audio_data = np.mean(audio_data, axis=1)\n else:\n gr.Warning(\n f\"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis.\"\n )\n audio_data = (\n audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]\n )\n audio_data = np.ascontiguousarray(audio_data) # Ensure contiguous after slicing/mean\n\n # Write using soundfile\n try:\n sf.write(\n temp_audio_prompt_path, audio_data, sr, subtype=\"FLOAT\"\n ) # Explicitly use FLOAT subtype\n prompt_path_for_generate = temp_audio_prompt_path\n print(f\"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})\")\n except Exception as write_e:\n print(f\"Error writing temporary audio file: {write_e}\")\n raise gr.Error(f\"Failed to save audio prompt: {write_e}\")\n\n # Set and Display Generation Seed\n if seed is None or seed < 0:\n seed = random.randint(0, 2**32 - 1)\n print(f\"\\nNo seed provided, generated random seed: {seed}\\n\")\n else:\n print(f\"\\nUsing user-selected seed: {seed}\\n\")\n set_seed(seed)\n\n # Run Generation\n print(f'Generating speech: \\n\"{text_input}\"\\n')\n\n start_time = time.time()\n\n # Use torch.inference_mode() context manager for the generation call\n with torch.inference_mode():\n output_audio_np = model.generate(\n text_input,\n max_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k, # Pass the value here\n use_torch_compile=False, # Keep False for Gradio stability\n audio_prompt=prompt_path_for_generate,\n verbose=True,\n )\n\n end_time = time.time()\n print(f\"Generation finished in {end_time - start_time:.2f} seconds.\\n\")\n\n # 4. Convert Codes to Audio\n if output_audio_np is not None:\n # Get sample rate from the loaded DAC model\n output_sr = 44100\n\n # --- Slow down audio ---\n original_len = len(output_audio_np)\n # Ensure speed_factor is positive and not excessively small/large to avoid issues\n speed_factor = max(0.1, min(speed_factor, 5.0))\n target_len = int(original_len / speed_factor) # Target length based on speed_factor\n if target_len != original_len and target_len > 0: # Only interpolate if length changes and is valid\n x_original = np.arange(original_len)\n x_resampled = np.linspace(0, original_len - 1, target_len)\n resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)\n output_audio = (\n output_sr,\n resampled_audio_np.astype(np.float32),\n ) # Use resampled audio\n print(\n f\"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.\"\n )\n else:\n output_audio = (\n output_sr,\n output_audio_np,\n ) # Keep original if calculation fails or no change\n print(f\"Skipping audio speed adjustment (factor: {speed_factor:.2f}).\")\n # --- End slowdown ---\n\n print(f\"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}\")\n\n # Explicitly convert to int16 to prevent Gradio warning\n if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:\n audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)\n audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)\n output_audio = (output_sr, audio_for_gradio)\n print(\"Converted audio to int16 for Gradio output.\")\n\n else:\n print(\"\\nGeneration finished, but no valid tokens were produced.\")\n # Return default silence\n gr.Warning(\"Generation produced no output.\")\n\n except Exception as e:\n print(f\"Error during inference: {e}\")\n import traceback\n\n traceback.print_exc()\n # Re-raise as Gradio error to display nicely in the UI\n raise gr.Error(f\"Inference failed: {e}\")\n\n finally:\n # Cleanup Temporary Files defensively\n if temp_txt_file_path and Path(temp_txt_file_path).exists():\n try:\n Path(temp_txt_file_path).unlink()\n print(f\"Deleted temporary text file: {temp_txt_file_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}\")\n if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():\n try:\n Path(temp_audio_prompt_path).unlink()\n print(f\"Deleted temporary audio prompt file: {temp_audio_prompt_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}\")\n\n # After generation, capture the printed output\n console_output = console_output_buffer.getvalue()\n\n return output_audio, seed, console_output\n\n\n# --- Create Gradio Interface ---\ncss = \"\"\"\n#col-container {max-width: 90%; margin-left: auto; margin-right: auto;}\n\"\"\"\n# Attempt to load default text from example.txt\ndefault_text = \"[S1] Dia is an open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] Wow. Amazing. (laughs) \\n[S2] Try it now on Git hub or Hugging Face.\"\nexample_txt_path = Path(\"./example.txt\")\nif example_txt_path.exists():\n try:\n default_text = example_txt_path.read_text(encoding=\"utf-8\").strip()\n if not default_text: # Handle empty example file\n default_text = \"Example text file was empty.\"\n except Exception as e:\n print(f\"Warning: Could not read example.txt: {e}\")\n\n\n# Build Gradio UI\nwith gr.Blocks(css=css, theme=\"gradio/dark\") as demo:\n gr.Markdown(\"# Nari Text-to-Speech Synthesis\")\n\n with gr.Row(equal_height=False):\n with gr.Column(scale=1):\n with gr.Accordion(\"Audio Reference Prompt (Optional)\", open=False):\n audio_prompt_input = gr.Audio(\n label=\"Audio Prompt (Optional)\",\n show_label=True,\n sources=[\"upload\", \"microphone\"],\n type=\"numpy\",\n )\n audio_prompt_text_input = gr.Textbox(\n label=\"Transcript of Audio Prompt (Required if using Audio Prompt)\",\n placeholder=\"Enter text here...\",\n value=\"\",\n lines=5, # Increased lines\n )\n text_input = gr.Textbox(\n label=\"Text To Generate\",\n placeholder=\"Enter text here...\",\n value=default_text,\n lines=5, # Increased lines\n )\n with gr.Accordion(\"Generation Parameters\", open=False):\n max_new_tokens = gr.Slider(\n label=\"Max New Tokens (Audio Length)\",\n minimum=860,\n maximum=3072,\n value=model.config.decoder_config.max_position_embeddings, # Use config default if available, else fallback\n step=50,\n info=\"Controls the maximum length of the generated audio (more tokens = longer audio).\",\n )\n cfg_scale = gr.Slider(\n label=\"CFG Scale (Guidance Strength)\",\n minimum=1.0,\n maximum=5.0,\n value=3.0, # Default from inference.py\n step=0.1,\n info=\"Higher values increase adherence to the text prompt.\",\n )\n temperature = gr.Slider(\n label=\"Temperature (Randomness)\",\n minimum=1.0,\n maximum=2.5,\n value=1.8, # Default from inference.py\n step=0.05,\n info=\"Lower values make the output more deterministic, higher values increase randomness.\",\n )\n top_p = gr.Slider(\n label=\"Top P (Nucleus Sampling)\",\n minimum=0.70,\n maximum=1.0,\n value=0.95, # Default from inference.py\n step=0.01,\n info=\"Filters vocabulary to the most likely tokens cumulatively reaching probability P.\",\n )\n cfg_filter_top_k = gr.Slider(\n label=\"CFG Filter Top K\",\n minimum=15,\n maximum=100,\n value=45,\n step=1,\n info=\"Top k filter for CFG guidance.\",\n )\n speed_factor_slider = gr.Slider(\n label=\"Speed Factor\",\n minimum=0.8,\n maximum=1.0,\n value=1.0,\n step=0.02,\n info=\"Adjusts the speed of the generated audio (1.0 = original speed).\",\n )\n seed_input = gr.Number(\n label=\"Generation Seed (Optional)\",\n value=-1,\n precision=0, # No decimal points\n step=1,\n interactive=True,\n info=\"Set a generation seed for reproducible outputs. Leave empty or -1 for random seed.\",\n )\n\n run_button = gr.Button(\"Generate Audio\", variant=\"primary\")\n\n with gr.Column(scale=1):\n audio_output = gr.Audio(\n label=\"Generated Audio\",\n type=\"numpy\",\n autoplay=False,\n )\n seed_output = gr.Textbox(label=\"Generation Seed\", interactive=False)\n console_output = gr.Textbox(label=\"Console Output Log\", lines=10, interactive=False)\n\n # Link button click to function\n run_button.click(\n fn=run_inference,\n inputs=[\n text_input,\n audio_prompt_text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[\n audio_output,\n seed_output,\n console_output,\n ], # Add status_output here if using it\n api_name=\"generate_audio\",\n )\n\n # Add examples (ensure the prompt path is correct or remove it if example file doesn't exist)\n example_prompt_path = \"./example_prompt.mp3\" # Adjust if needed\n examples_list = [\n [\n \"[S1] Oh fire! Oh my goodness! What's the procedure? What to we do people? The smoke could be coming through an air duct! \\n[S2] Oh my god! Okay.. it's happening. Everybody stay calm! \\n[S1] What's the procedure... \\n[S2] Everybody stay fucking calm!!!... Everybody fucking calm down!!!!! \\n[S1] No! No! If you touch the handle, if its hot there might be a fire down the hallway! \",\n None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n [\n \"[S1] Open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] I'm biased, but I think we clearly won. \\n[S2] Hard to disagree. (laughs) \\n[S1] Thanks for listening to this demo. \\n[S2] Try it now on Git hub and Hugging Face. \\n[S1] If you liked our model, please give us a star and share to your friends. \\n[S2] This was Nari Labs.\",\n example_prompt_path if Path(example_prompt_path).exists() else None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n ]\n\n if examples_list:\n gr.Examples(\n examples=examples_list,\n inputs=[\n text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[audio_output],\n fn=run_inference,\n cache_examples=False,\n label=\"Examples (Click to Run)\",\n )\n else:\n gr.Markdown(\"_(No examples configured or example prompt file missing)_\")\n\n# --- Launch the App ---\nif __name__ == \"__main__\":\n print(\"Launching Gradio interface...\")\n\n # set `GRADIO_SERVER_NAME`, `GRADIO_SERVER_PORT` env vars to override default values\n # use `GRADIO_SERVER_NAME=0.0.0.0` for Docker\n demo.launch(share=args.share)\n"], ["/dia/dia/config.py", "\"\"\"Configuration management module for the Dia model.\n\nThis module provides comprehensive configuration management for the Dia model,\nutilizing Pydantic for validation. It defines configurations for data processing,\nmodel architecture (encoder and decoder), and training settings.\n\nKey components:\n- DataConfig: Parameters for data loading and preprocessing.\n- EncoderConfig: Architecture details for the encoder module.\n- DecoderConfig: Architecture details for the decoder module.\n- ModelConfig: Combined model architecture settings.\n- TrainingConfig: Training hyperparameters and settings.\n- DiaConfig: Master configuration combining all components.\n\"\"\"\n\nimport os\n\nfrom pydantic import BaseModel, Field\n\n\nclass EncoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the encoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_encoder\".\n hidden_size: Size of the encoder layers, defaults to 1024.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the encoder, defaults to 4096.\n num_hidden_layers: Number of hidden layers in the encoder, defaults to 12.\n num_attention_heads: Number of attention heads in the encoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the encoder, defaults to 16.\n head_dim: Dimension of each attention head, defaults to 128.\n hidden_act: Activation function in the encoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings, defaults to 1024.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n vocab_size: Vocabulary size, defaults to 256.\n \"\"\"\n\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=1024, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=4096, gt=0)\n max_position_embeddings: int = Field(default=1024, gt=0)\n model_type: str = Field(default=\"dia_encoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_hidden_layers: int = Field(default=12, gt=0)\n num_key_value_heads: int = Field(default=16, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=256, gt=0)\n\n\nclass DecoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the decoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_decoder\".\n hidden_size: Size of the decoder layers, defaults to 2048.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the decoder, defaults to 8192.\n num_hidden_layers: Number of hidden layers in the decoder, defaults to 18.\n num_attention_heads: Number of attention heads in the decoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the decoder, defaults to 4.\n head_dim: Dimension of each attention head, defaults to 128.\n cross_hidden_size: Size of the cross-attention layers, defaults to 1024.\n cross_num_attention_heads: Number of attention heads in the cross-attention mechanism, defaults to 16.\n cross_num_key_value_heads: Number of key-value heads in the cross-attention mechanism, defaults to 16.\n cross_head_dim: Dimension of each cross-attention head, defaults to 128.\n hidden_act: Activation function in the decoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings in the decoder, defaults to 3072.\n initializer_range: Range for initializing weights in the decoder, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers in the decoder, defaults to 1e-5.\n rope_theta: Theta value for RoPE in the decoder, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE in the decoder.\n vocab_size: Vocabulary size for the decoder, defaults to 1028.\n num_channels: Number of channels in the decoder, defaults to 9.\n \"\"\"\n\n cross_head_dim: int = Field(default=128, gt=0)\n cross_hidden_size: int = Field(default=1024, gt=0)\n cross_num_attention_heads: int = Field(default=16, gt=0)\n cross_num_key_value_heads: int = Field(default=16, gt=0)\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=2048, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=8192, gt=0)\n max_position_embeddings: int = Field(default=3072, gt=0)\n model_type: str = Field(default=\"dia_decoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_channels: int = Field(default=9, gt=0)\n num_hidden_layers: int = Field(default=18, gt=0)\n num_key_value_heads: int = Field(default=4, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=1028, gt=0)\n\n\nclass DiaConfig(BaseModel, frozen=True):\n \"\"\"Main configuration container for the Dia model architecture.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia\".\n is_encoder_decoder: Flag indicating if the model is an encoder-decoder type, defaults to True.\n encoder: Configuration for the encoder component.\n decoder: Configuration for the decoder component.\n src_vocab_size: Size of the source (text) vocabulary.\n tgt_vocab_size: Size of the target (audio code) vocabulary.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n torch_dtype: Data type for model weights in PyTorch, defaults to \"float32\".\n bos_token_id: Beginning-of-sequence token ID, defaults to 1026.\n eos_token_id: End-of-sequence token ID, defaults to 1024.\n pad_token_id: Padding token ID, defaults to 1025.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n transformers_version: Version of the transformers library, defaults to \"4.53.0.dev0\".\n architectures: List of model architectures, defaults to [\"DiaForConditionalGeneration\"].\n delay_pattern: List of delay values for each audio channel, defaults to [0,8,9,10,11,12,13,14,15].\n \"\"\"\n\n architectures: list[str] = Field(default_factory=lambda: [\"DiaForConditionalGeneration\"])\n bos_token_id: int = Field(default=1026)\n decoder_config: DecoderConfig\n delay_pattern: list[int] = Field(default_factory=lambda: [0, 8, 9, 10, 11, 12, 13, 14, 15])\n encoder_config: EncoderConfig\n eos_token_id: int = Field(default=1024)\n initializer_range: float = Field(default=0.02)\n is_encoder_decoder: bool = Field(default=True)\n model_type: str = Field(default=\"dia\")\n norm_eps: float = Field(default=1e-5)\n pad_token_id: int = Field(default=1025)\n torch_dtype: str = Field(default=\"float32\")\n transformers_version: str = Field(default=\"4.53.0.dev0\")\n\n def save(self, path: str) -> None:\n \"\"\"Save the current configuration instance to a JSON file.\n\n Ensures the parent directory exists and the file has a .json extension.\n\n Args:\n path: The target file path to save the configuration.\n\n Raises:\n ValueError: If the path is not a file with a .json extension.\n \"\"\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n config_json = self.model_dump_json(indent=2)\n with open(path, \"w\") as f:\n f.write(config_json)\n\n @classmethod\n def load(cls, path: str) -> \"DiaConfig | None\":\n \"\"\"Load and validate a Dia configuration from a JSON file.\n\n Args:\n path: The path to the configuration file.\n\n Returns:\n A validated DiaConfig instance if the file exists and is valid,\n otherwise None if the file is not found.\n\n Raises:\n ValueError: If the path does not point to an existing .json file.\n pydantic.ValidationError: If the JSON content fails validation against the DiaConfig schema.\n \"\"\"\n try:\n with open(path, \"r\") as f:\n content = f.read()\n return cls.model_validate_json(content)\n except FileNotFoundError:\n return None\n"], ["/dia/cli.py", "import argparse\nimport os\nimport random\n\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # Ensure deterministic behavior for cuDNN (if used)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Generate audio using the Dia model.\")\n\n parser.add_argument(\"text\", type=str, help=\"Input text for speech generation.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"Path to save the generated audio file (e.g., output.wav).\"\n )\n\n parser.add_argument(\n \"--repo-id\",\n type=str,\n default=\"nari-labs/Dia-1.6B-0626\",\n help=\"Hugging Face repository ID (e.g., nari-labs/Dia-1.6B-0626).\",\n )\n parser.add_argument(\n \"--local-paths\", action=\"store_true\", help=\"Load model from local config and checkpoint files.\"\n )\n\n parser.add_argument(\n \"--config\", type=str, help=\"Path to local config.json file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--checkpoint\", type=str, help=\"Path to local model checkpoint .pth file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--audio-prompt\", type=str, default=None, help=\"Path to an optional audio prompt WAV file for voice cloning.\"\n )\n\n gen_group = parser.add_argument_group(\"Generation Parameters\")\n gen_group.add_argument(\n \"--max-tokens\",\n type=int,\n default=None,\n help=\"Maximum number of audio tokens to generate (defaults to config value).\",\n )\n gen_group.add_argument(\n \"--cfg-scale\", type=float, default=3.0, help=\"Classifier-Free Guidance scale (default: 3.0).\"\n )\n gen_group.add_argument(\n \"--temperature\", type=float, default=1.3, help=\"Sampling temperature (higher is more random, default: 0.7).\"\n )\n gen_group.add_argument(\"--top-p\", type=float, default=0.95, help=\"Nucleus sampling probability (default: 0.95).\")\n\n infra_group = parser.add_argument_group(\"Infrastructure\")\n infra_group.add_argument(\"--seed\", type=int, default=None, help=\"Random seed for reproducibility.\")\n infra_group.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to run inference on (e.g., 'cuda', 'cpu', default: auto).\",\n )\n\n args = parser.parse_args()\n\n # Validation for local paths\n if args.local_paths:\n if not args.config:\n parser.error(\"--config is required when --local-paths is set.\")\n if not args.checkpoint:\n parser.error(\"--checkpoint is required when --local-paths is set.\")\n if not os.path.exists(args.config):\n parser.error(f\"Config file not found: {args.config}\")\n if not os.path.exists(args.checkpoint):\n parser.error(f\"Checkpoint file not found: {args.checkpoint}\")\n\n # Set seed if provided\n if args.seed is not None:\n set_seed(args.seed)\n print(f\"Using user-selected seed: {args.seed}\")\n\n # Determine device\n device = torch.device(args.device)\n print(f\"Using device: {device}\")\n\n # Load model\n print(\"Loading model...\")\n if args.local_paths:\n print(f\"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'\")\n try:\n model = Dia.from_local(args.config, args.checkpoint, device=device)\n except Exception as e:\n print(f\"Error loading local model: {e}\")\n exit(1)\n else:\n print(f\"Loading from Hugging Face Hub: repo_id='{args.repo_id}'\")\n try:\n model = Dia.from_pretrained(args.repo_id, device=device)\n except Exception as e:\n print(f\"Error loading model from Hub: {e}\")\n exit(1)\n print(\"Model loaded.\")\n\n # Generate audio\n print(\"Generating audio...\")\n try:\n sample_rate = 44100 # Default assumption\n\n output_audio = model.generate(\n text=args.text,\n audio_prompt=args.audio_prompt,\n max_tokens=args.max_tokens,\n cfg_scale=args.cfg_scale,\n temperature=args.temperature,\n top_p=args.top_p,\n )\n print(\"Audio generation complete.\")\n\n print(f\"Saving audio to {args.output}...\")\n os.makedirs(os.path.dirname(args.output) or \".\", exist_ok=True)\n\n sf.write(args.output, output_audio, sample_rate)\n print(f\"Audio successfully saved to {args.output}\")\n\n except Exception as e:\n print(f\"Error during audio generation or saving: {e}\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n"], ["/dia/example/benchmark.py", "from random import choice\n\nimport torch\n\nfrom dia.model import Dia\n\n\ntorch._inductor.config.coordinate_descent_tuning = True\ntorch._inductor.config.triton.unique_kernel_names = True\ntorch._inductor.config.fx_graph_cache = True\n\n# debugging\ntorch._logging.set_logs(graph_breaks=True, recompiles=True)\n\nmodel_name = \"nari-labs/Dia-1.6B-0626\"\ncompute_dtype = \"float16\"\n\nmodel = Dia.from_pretrained(model_name, compute_dtype=compute_dtype)\n\n\ntest_cases = [\n \"[S1] Dia is an open weights text to dialogue model.\",\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code. [S2] It is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n]\n\n\n# Wram up\nfor _ in range(2):\n text = choice(test_cases)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n\n# Benchmark\nfor _ in range(10):\n text = choice(test_cases)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n"], ["/dia/example/simple-cpu.py", "import torch\n\nfrom dia.model import Dia\n\n\n# Select device: CPU\ndevice = torch.device(\"cpu\")\nprint(f\"Using device: {device}\")\n\n# Load model\nmodel = Dia.from_pretrained(\n \"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float32\", device=device\n) # Float32 works better than float16 on CPU - you can also test with float16\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/voice_clone.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\nclone_from_audio = \"simple.mp3\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Hello, how are you? [S2] I'm good, thank you. [S1] What's your name? [S2] My name is Dia. [S1] Nice to meet you. [S2] Nice to meet you too.\"\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(\n clone_from_text + text_to_generate,\n audio_prompt=clone_from_audio,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=4.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"voice_clone.mp3\", output)\n"], ["/dia/example/voice_clone_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\nclone_from_audios = [f\"simple_{i}.mp3\" for i in range(10)]\n\ntexts = [clone_from_text + text_to_generate for _ in range(10)]\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(texts, audio_prompt=clone_from_audios, use_torch_compile=True, verbose=True, max_tokens=2000)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"voice_clone_{i}.mp3\", o)\n"], ["/dia/hf.py", "from transformers import AutoProcessor, DiaForConditionalGeneration\n\n\ntorch_device = \"cuda\"\nmodel_checkpoint = \"nari-labs/Dia-1.6B-0626\"\n\ntext = [\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n]\nprocessor = AutoProcessor.from_pretrained(model_checkpoint)\ninputs = processor(text=text, padding=True, return_tensors=\"pt\").to(torch_device)\n\nmodel = DiaForConditionalGeneration.from_pretrained(model_checkpoint).to(torch_device)\noutputs = model.generate(**inputs, max_new_tokens=3072, guidance_scale=3.0, temperature=1.8, top_p=0.90, top_k=45)\n\noutputs = processor.batch_decode(outputs)\nprocessor.save_audio(outputs, \"example.mp3\")\n"], ["/dia/example/simple_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\ntexts = [text for _ in range(10)]\n\noutput = model.generate(texts, use_torch_compile=True, verbose=True, max_tokens=1500)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"simple_{i}.mp3\", o)\n"], ["/dia/example/simple-mac.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# It is important to set the `use_torch_compile` argument to `False` when using Dia on MacOS.\n# This is because the `torch.compile` function is not supported on MacOS.\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/simple.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(\n text,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=3.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/dia/__init__.py", "from .model import Dia\n\n\n__all__ = [\n \"Dia\",\n]\n"]], "task_instance_info": {"created_time": "2025-08-20 15:27:36", "created_task_model": "DeepSeek-R1", "code_description": "This code segment functions to: Apply temporal offsets to multi-channel input data by redistributing elements according to precomputed indices while replacing out-of-bound values with designated padding and beginning-of-sequence markers."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\ndef build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute indices for revert operation using PyTorch.\n Returns tuple (t_idx_BxTxC, indices_BTCx3) where:\n - t_idx_BxTxC: [B, T, C] tensor of original time indices (time + delay)\n - indices_BTCx3: [B*T*C, 3] gather indices for original audio\n \"\"\"\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n \n # Create base time indices\n t_idx_BT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T]\n ).unsqueeze(-1) # [B, T, 1]\n \n # Calculate original time indices with delay\n t_idx_BxTxC = torch.clamp(\n t_idx_BT + delay_arr.view(1, 1, C),\n 0, T-1 # Clamp to valid range\n )\n \n # Create broadcasted indices\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C]\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C]\n )\n \n # Stack indices into [B*T*C, 3] shape\n indices_BTCx3 = torch.stack([\n b_idx_BxTxC.reshape(-1),\n t_idx_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1)\n ], dim=1).long()\n\n return t_idx_BxTxC, indices_BTCx3\n[TASK_END]\n```", "inference_time": "2025-08-20 15-28-35"}, "editdistance_info": {"edit_distance": 12.6812, "calculate_time": "2025-08-20 15:28:35", "true_code_clean": "def apply_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n bos_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n) -> torch.Tensor:\n device = audio_BxTxC.device \n t_idx_BxTxC, indices_BTCx3 = precomp\n t_idx_BxTxC = t_idx_BxTxC.to(device) \n indices_BTCx3 = indices_BTCx3.to(device)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)\n mask_bos = t_idx_BxTxC < 0 \n mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] \n bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))\n return result_BxTxC", "predict_code_clean": "def build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n t_idx_BT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T]\n ).unsqueeze(-1) \n t_idx_BxTxC = torch.clamp(\n t_idx_BT + delay_arr.view(1, 1, C),\n 0, T-1 \n )\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C]\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C]\n )\n indices_BTCx3 = torch.stack([\n b_idx_BxTxC.reshape(-1),\n t_idx_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1)\n ], dim=1).long()\n return t_idx_BxTxC, indices_BTCx3"}} {"repo_name": "dia", "file_name": "/dia/app.py", "inference_info": {"prefix_code": "import argparse\nimport contextlib\nimport io\nimport random\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport gradio as gr\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\n# --- Global Setup ---\nparser = argparse.ArgumentParser(description=\"Gradio interface for Nari TTS\")\nparser.add_argument(\"--device\", type=str, default=None, help=\"Force device (e.g., 'cuda', 'mps', 'cpu')\")\nparser.add_argument(\"--share\", action=\"store_true\", help=\"Enable Gradio sharing\")\n\nargs = parser.parse_args()\n\n\n# Determine device\nif args.device:\n device = torch.device(args.device)\nelif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n# Simplified MPS check for broader compatibility\nelif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n # Basic check is usually sufficient, detailed check can be problematic\n device = torch.device(\"mps\")\nelse:\n device = torch.device(\"cpu\")\n\nprint(f\"Using device: {device}\")\n\n# Load Nari model and config\nprint(\"Loading Nari model...\")\ntry:\n dtype_map = {\n \"cpu\": \"float32\",\n \"mps\": \"float32\", # Apple M series – better with float32\n \"cuda\": \"float16\", # NVIDIA – better with float16\n }\n\n dtype = dtype_map.get(device.type, \"float16\")\n print(f\"Using device: {device}, attempting to load model with {dtype}\")\n model = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=dtype, device=device)\nexcept Exception as e:\n print(f\"Error loading Nari model: {e}\")\n raise\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\n", "suffix_code": "\n\n\n# --- Create Gradio Interface ---\ncss = \"\"\"\n#col-container {max-width: 90%; margin-left: auto; margin-right: auto;}\n\"\"\"\n# Attempt to load default text from example.txt\ndefault_text = \"[S1] Dia is an open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] Wow. Amazing. (laughs) \\n[S2] Try it now on Git hub or Hugging Face.\"\nexample_txt_path = Path(\"./example.txt\")\nif example_txt_path.exists():\n try:\n default_text = example_txt_path.read_text(encoding=\"utf-8\").strip()\n if not default_text: # Handle empty example file\n default_text = \"Example text file was empty.\"\n except Exception as e:\n print(f\"Warning: Could not read example.txt: {e}\")\n\n\n# Build Gradio UI\nwith gr.Blocks(css=css, theme=\"gradio/dark\") as demo:\n gr.Markdown(\"# Nari Text-to-Speech Synthesis\")\n\n with gr.Row(equal_height=False):\n with gr.Column(scale=1):\n with gr.Accordion(\"Audio Reference Prompt (Optional)\", open=False):\n audio_prompt_input = gr.Audio(\n label=\"Audio Prompt (Optional)\",\n show_label=True,\n sources=[\"upload\", \"microphone\"],\n type=\"numpy\",\n )\n audio_prompt_text_input = gr.Textbox(\n label=\"Transcript of Audio Prompt (Required if using Audio Prompt)\",\n placeholder=\"Enter text here...\",\n value=\"\",\n lines=5, # Increased lines\n )\n text_input = gr.Textbox(\n label=\"Text To Generate\",\n placeholder=\"Enter text here...\",\n value=default_text,\n lines=5, # Increased lines\n )\n with gr.Accordion(\"Generation Parameters\", open=False):\n max_new_tokens = gr.Slider(\n label=\"Max New Tokens (Audio Length)\",\n minimum=860,\n maximum=3072,\n value=model.config.decoder_config.max_position_embeddings, # Use config default if available, else fallback\n step=50,\n info=\"Controls the maximum length of the generated audio (more tokens = longer audio).\",\n )\n cfg_scale = gr.Slider(\n label=\"CFG Scale (Guidance Strength)\",\n minimum=1.0,\n maximum=5.0,\n value=3.0, # Default from inference.py\n step=0.1,\n info=\"Higher values increase adherence to the text prompt.\",\n )\n temperature = gr.Slider(\n label=\"Temperature (Randomness)\",\n minimum=1.0,\n maximum=2.5,\n value=1.8, # Default from inference.py\n step=0.05,\n info=\"Lower values make the output more deterministic, higher values increase randomness.\",\n )\n top_p = gr.Slider(\n label=\"Top P (Nucleus Sampling)\",\n minimum=0.70,\n maximum=1.0,\n value=0.95, # Default from inference.py\n step=0.01,\n info=\"Filters vocabulary to the most likely tokens cumulatively reaching probability P.\",\n )\n cfg_filter_top_k = gr.Slider(\n label=\"CFG Filter Top K\",\n minimum=15,\n maximum=100,\n value=45,\n step=1,\n info=\"Top k filter for CFG guidance.\",\n )\n speed_factor_slider = gr.Slider(\n label=\"Speed Factor\",\n minimum=0.8,\n maximum=1.0,\n value=1.0,\n step=0.02,\n info=\"Adjusts the speed of the generated audio (1.0 = original speed).\",\n )\n seed_input = gr.Number(\n label=\"Generation Seed (Optional)\",\n value=-1,\n precision=0, # No decimal points\n step=1,\n interactive=True,\n info=\"Set a generation seed for reproducible outputs. Leave empty or -1 for random seed.\",\n )\n\n run_button = gr.Button(\"Generate Audio\", variant=\"primary\")\n\n with gr.Column(scale=1):\n audio_output = gr.Audio(\n label=\"Generated Audio\",\n type=\"numpy\",\n autoplay=False,\n )\n seed_output = gr.Textbox(label=\"Generation Seed\", interactive=False)\n console_output = gr.Textbox(label=\"Console Output Log\", lines=10, interactive=False)\n\n # Link button click to function\n run_button.click(\n fn=run_inference,\n inputs=[\n text_input,\n audio_prompt_text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[\n audio_output,\n seed_output,\n console_output,\n ], # Add status_output here if using it\n api_name=\"generate_audio\",\n )\n\n # Add examples (ensure the prompt path is correct or remove it if example file doesn't exist)\n example_prompt_path = \"./example_prompt.mp3\" # Adjust if needed\n examples_list = [\n [\n \"[S1] Oh fire! Oh my goodness! What's the procedure? What to we do people? The smoke could be coming through an air duct! \\n[S2] Oh my god! Okay.. it's happening. Everybody stay calm! \\n[S1] What's the procedure... \\n[S2] Everybody stay fucking calm!!!... Everybody fucking calm down!!!!! \\n[S1] No! No! If you touch the handle, if its hot there might be a fire down the hallway! \",\n None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n [\n \"[S1] Open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] I'm biased, but I think we clearly won. \\n[S2] Hard to disagree. (laughs) \\n[S1] Thanks for listening to this demo. \\n[S2] Try it now on Git hub and Hugging Face. \\n[S1] If you liked our model, please give us a star and share to your friends. \\n[S2] This was Nari Labs.\",\n example_prompt_path if Path(example_prompt_path).exists() else None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n ]\n\n if examples_list:\n gr.Examples(\n examples=examples_list,\n inputs=[\n text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[audio_output],\n fn=run_inference,\n cache_examples=False,\n label=\"Examples (Click to Run)\",\n )\n else:\n gr.Markdown(\"_(No examples configured or example prompt file missing)_\")\n\n# --- Launch the App ---\nif __name__ == \"__main__\":\n print(\"Launching Gradio interface...\")\n\n # set `GRADIO_SERVER_NAME`, `GRADIO_SERVER_PORT` env vars to override default values\n # use `GRADIO_SERVER_NAME=0.0.0.0` for Docker\n demo.launch(share=args.share)\n", "middle_code": "def run_inference(\n text_input: str,\n audio_prompt_text_input: str,\n audio_prompt_input: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: Optional[int] = None,\n):\n global model, device \n console_output_buffer = io.StringIO()\n with contextlib.redirect_stdout(console_output_buffer):\n if audio_prompt_input and audio_prompt_text_input and not audio_prompt_text_input.isspace():\n text_input = audio_prompt_text_input + \"\\n\" + text_input\n text_input = text_input.strip()\n if audio_prompt_input and (not audio_prompt_text_input or audio_prompt_text_input.isspace()):\n raise gr.Error(\"Audio Prompt Text input cannot be empty.\")\n if not text_input or text_input.isspace():\n raise gr.Error(\"Text input cannot be empty.\")\n temp_txt_file_path = None\n temp_audio_prompt_path = None\n output_audio = (44100, np.zeros(1, dtype=np.float32))\n try:\n prompt_path_for_generate = None\n if audio_prompt_input is not None:\n sr, audio_data = audio_prompt_input\n if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: \n gr.Warning(\"Audio prompt seems empty or silent, ignoring prompt.\")\n else:\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".wav\", delete=False) as f_audio:\n temp_audio_prompt_path = f_audio.name \n if np.issubdtype(audio_data.dtype, np.integer):\n max_val = np.iinfo(audio_data.dtype).max\n audio_data = audio_data.astype(np.float32) / max_val\n elif not np.issubdtype(audio_data.dtype, np.floating):\n gr.Warning(f\"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.\")\n try:\n audio_data = audio_data.astype(np.float32)\n except Exception as conv_e:\n raise gr.Error(f\"Failed to convert audio prompt to float32: {conv_e}\")\n if audio_data.ndim > 1:\n if audio_data.shape[0] == 2: \n audio_data = np.mean(audio_data, axis=0)\n elif audio_data.shape[1] == 2: \n audio_data = np.mean(audio_data, axis=1)\n else:\n gr.Warning(\n f\"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis.\"\n )\n audio_data = (\n audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]\n )\n audio_data = np.ascontiguousarray(audio_data) \n try:\n sf.write(\n temp_audio_prompt_path, audio_data, sr, subtype=\"FLOAT\"\n ) \n prompt_path_for_generate = temp_audio_prompt_path\n print(f\"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})\")\n except Exception as write_e:\n print(f\"Error writing temporary audio file: {write_e}\")\n raise gr.Error(f\"Failed to save audio prompt: {write_e}\")\n if seed is None or seed < 0:\n seed = random.randint(0, 2**32 - 1)\n print(f\"\\nNo seed provided, generated random seed: {seed}\\n\")\n else:\n print(f\"\\nUsing user-selected seed: {seed}\\n\")\n set_seed(seed)\n print(f'Generating speech: \\n\"{text_input}\"\\n')\n start_time = time.time()\n with torch.inference_mode():\n output_audio_np = model.generate(\n text_input,\n max_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k, \n use_torch_compile=False, \n audio_prompt=prompt_path_for_generate,\n verbose=True,\n )\n end_time = time.time()\n print(f\"Generation finished in {end_time - start_time:.2f} seconds.\\n\")\n if output_audio_np is not None:\n output_sr = 44100\n original_len = len(output_audio_np)\n speed_factor = max(0.1, min(speed_factor, 5.0))\n target_len = int(original_len / speed_factor) \n if target_len != original_len and target_len > 0: \n x_original = np.arange(original_len)\n x_resampled = np.linspace(0, original_len - 1, target_len)\n resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)\n output_audio = (\n output_sr,\n resampled_audio_np.astype(np.float32),\n ) \n print(\n f\"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.\"\n )\n else:\n output_audio = (\n output_sr,\n output_audio_np,\n ) \n print(f\"Skipping audio speed adjustment (factor: {speed_factor:.2f}).\")\n print(f\"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}\")\n if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:\n audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)\n audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)\n output_audio = (output_sr, audio_for_gradio)\n print(\"Converted audio to int16 for Gradio output.\")\n else:\n print(\"\\nGeneration finished, but no valid tokens were produced.\")\n gr.Warning(\"Generation produced no output.\")\n except Exception as e:\n print(f\"Error during inference: {e}\")\n import traceback\n traceback.print_exc()\n raise gr.Error(f\"Inference failed: {e}\")\n finally:\n if temp_txt_file_path and Path(temp_txt_file_path).exists():\n try:\n Path(temp_txt_file_path).unlink()\n print(f\"Deleted temporary text file: {temp_txt_file_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}\")\n if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():\n try:\n Path(temp_audio_prompt_path).unlink()\n print(f\"Deleted temporary audio prompt file: {temp_audio_prompt_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}\")\n console_output = console_output_buffer.getvalue()\n return output_audio, seed, console_output", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/dia/dia/audio.py", "import typing as tp\n\nimport torch\n\n\ndef build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute (t_idx_BxTxC, indices_BTCx3) so that out[t, c] = in[t - delay[c], c].\n Negative t_idx => BOS; t_idx >= T => PAD.\n \"\"\"\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n\n t_idx_BxT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T],\n )\n t_idx_BxTx1 = t_idx_BxT[..., None]\n t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)\n\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C],\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C],\n )\n\n # We must clamp time indices to [0..T-1] so gather_nd equivalent won't fail\n t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_clamped_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n dim=1,\n ).long() # Ensure indices are long type for indexing\n\n return t_idx_BxTxC, indices_BTCx3\n\n\ndef apply_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n bos_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n) -> torch.Tensor:\n \"\"\"\n Applies the delay pattern to batched audio tokens using precomputed indices,\n inserting BOS where t_idx < 0 and PAD where t_idx >= T.\n\n Args:\n audio_BxTxC: [B, T, C] int16 audio tokens (or int32/float)\n pad_value: the padding token\n bos_value: the BOS token\n precomp: (t_idx_BxTxC, indices_BTCx3) from build_delay_indices\n\n Returns:\n result_BxTxC: [B, T, C] delayed audio tokens\n \"\"\"\n device = audio_BxTxC.device # Get device from input tensor\n t_idx_BxTxC, indices_BTCx3 = precomp\n t_idx_BxTxC = t_idx_BxTxC.to(device) # Move precomputed indices to device\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Equivalent of tf.gather_nd using advanced indexing\n # Ensure indices are long type if not already (build_delay_indices should handle this)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)\n\n # Create masks on the correct device\n mask_bos = t_idx_BxTxC < 0 # => place bos_value\n mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] # => place pad_value\n\n # Create scalar tensors on the correct device\n bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n\n # If mask_bos, BOS; else if mask_pad, PAD; else original gather\n # All tensors should now be on the same device\n result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))\n\n return result_BxTxC\n\n\ndef build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute indices for the revert operation using PyTorch.\n\n Returns:\n A tuple (t_idx_BxTxC, indices_BTCx3) where:\n - t_idx_BxTxC is a tensor of shape [B, T, C] computed as time indices plus the delay.\n - indices_BTCx3 is a tensor of shape [B*T*C, 3] used for gathering, computed from:\n batch indices, clamped time indices, and channel indices.\n \"\"\"\n # Use default device unless specified otherwise; assumes inputs might define device later\n device = None # Or determine dynamically if needed, e.g., from a model parameter\n\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)\n\n t_idx_BT1 = torch.broadcast_to(torch.arange(T, device=device).unsqueeze(0), [B, T])\n t_idx_BT1 = t_idx_BT1.unsqueeze(-1)\n\n t_idx_BxTxC = torch.minimum(\n t_idx_BT1 + delay_arr.view(1, 1, C),\n torch.tensor(T - 1, device=device),\n )\n b_idx_BxTxC = torch.broadcast_to(torch.arange(B, device=device).view(B, 1, 1), [B, T, C])\n c_idx_BxTxC = torch.broadcast_to(torch.arange(C, device=device).view(1, 1, C), [B, T, C])\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_idx_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n axis=1,\n ).long() # Ensure indices are long type\n\n return t_idx_BxTxC, indices_BTCx3\n\n\ndef revert_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n T: int,\n) -> torch.Tensor:\n \"\"\"\n Reverts a delay pattern from batched audio tokens using precomputed indices (PyTorch version).\n\n Args:\n audio_BxTxC: Input delayed audio tensor\n pad_value: Padding value for out-of-bounds indices\n precomp: Precomputed revert indices tuple containing:\n - t_idx_BxTxC: Time offset indices tensor\n - indices_BTCx3: Gather indices tensor for original audio\n T: Original sequence length before padding\n\n Returns:\n Reverted audio tensor with same shape as input\n \"\"\"\n t_idx_BxTxC, indices_BTCx3 = precomp\n device = audio_BxTxC.device # Get device from input tensor\n\n # Move precomputed indices to the same device as audio_BxTxC if they aren't already\n t_idx_BxTxC = t_idx_BxTxC.to(device)\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Using PyTorch advanced indexing (equivalent to tf.gather_nd or np equivalent)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.size()) # Use .size() for robust reshaping\n\n # Create pad_tensor on the correct device\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n # Create T tensor on the correct device for comparison\n T_tensor = torch.tensor(T, device=device)\n\n result_BxTxC = torch.where(t_idx_BxTxC >= T_tensor, pad_tensor, gathered_BxTxC) # Changed np.where to torch.where\n\n return result_BxTxC\n"], ["/dia/dia/model.py", "import time\nfrom enum import Enum\nfrom typing import Callable\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchaudio\n\nfrom .audio import apply_audio_delay, build_delay_indices, build_revert_indices, revert_audio_delay\nfrom .config import DiaConfig\nfrom .layers import DiaModel\nfrom .state import DecoderInferenceState, DecoderOutput, EncoderInferenceState\n\n\nDEFAULT_SAMPLE_RATE = 44100\nSAMPLE_RATE_RATIO = 512\n\n\ndef _get_default_device():\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n elif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef _sample_next_token(\n logits_BCxV: torch.Tensor,\n temperature: float,\n top_p: float,\n top_k: int | None,\n audio_eos_value: int,\n) -> torch.Tensor:\n if temperature == 0.0:\n return torch.argmax(logits_BCxV, dim=-1)\n\n logits_BCxV = logits_BCxV / temperature\n\n if audio_eos_value is not None and audio_eos_value >= 0:\n top_logit_indices_BC = torch.argmax(logits_BCxV, dim=-1)\n eos_not_highest_mask_BC = top_logit_indices_BC != audio_eos_value\n mask_eos_unless_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_unless_highest_BCxV[eos_not_highest_mask_BC, audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_unless_highest_BCxV, -torch.inf)\n eos_highest_mask_BC = top_logit_indices_BC == audio_eos_value\n mask_eos_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_highest_BCxV[eos_highest_mask_BC, :audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_highest_BCxV, -torch.inf)\n\n if top_k is not None:\n _, top_k_indices_BCxV = torch.topk(logits_BCxV, k=top_k, dim=-1)\n mask = torch.ones_like(logits_BCxV, dtype=torch.bool)\n mask = mask.scatter(dim=-1, index=top_k_indices_BCxV, value=False)\n logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)\n\n if top_p < 1.0:\n probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)\n cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)\n\n sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p\n sorted_indices_to_remove_BCxV = torch.roll(sorted_indices_to_remove_BCxV, shifts=1, dims=-1)\n sorted_indices_to_remove_BCxV[..., 0] = torch.zeros_like(sorted_indices_to_remove_BCxV[..., 0])\n\n indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)\n indices_to_remove_BCxV = indices_to_remove_BCxV.scatter(\n dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV\n )\n logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)\n\n final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n\n sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)\n sampled_indices_C = sampled_indices_BC.squeeze(-1)\n return sampled_indices_C\n\n\nclass ComputeDtype(str, Enum):\n FLOAT32 = \"float32\"\n FLOAT16 = \"float16\"\n BFLOAT16 = \"bfloat16\"\n\n def to_dtype(self) -> torch.dtype:\n if self == ComputeDtype.FLOAT32:\n return torch.float32\n elif self == ComputeDtype.FLOAT16:\n return torch.float16\n elif self == ComputeDtype.BFLOAT16:\n return torch.bfloat16\n else:\n raise ValueError(f\"Unsupported compute dtype: {self}\")\n\n\nclass Dia:\n def __init__(\n self,\n config: DiaConfig,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ):\n \"\"\"Initializes the Dia model.\n\n Args:\n config: The configuration object for the model.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Raises:\n RuntimeError: If there is an error loading the DAC model.\n \"\"\"\n super().__init__()\n self.config = config\n self.device = device if device is not None else _get_default_device()\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n self.compute_dtype = compute_dtype.to_dtype()\n self.model: DiaModel = DiaModel(config, self.compute_dtype)\n self.dac_model = None\n self._compiled_step = None\n self.load_dac = load_dac\n\n if not self.load_dac:\n print(\"Warning: DAC model will not be loaded. This is not recommended.\")\n\n if torch.cuda.is_available():\n torch.backends.cuda.matmul.allow_tf32 = True\n\n @classmethod\n def from_local(\n cls,\n config_path: str,\n checkpoint_path: str,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from local configuration and checkpoint files.\n\n Args:\n config_path: Path to the configuration JSON file.\n checkpoint_path: Path to the model checkpoint (.pth) file.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If the config or checkpoint file is not found.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n config = DiaConfig.load(config_path)\n if config is None:\n raise FileNotFoundError(f\"Config file not found at {config_path}\")\n\n dia = cls(config, compute_dtype, device, load_dac)\n\n try:\n state_dict = torch.load(checkpoint_path, map_location=dia.device)\n dia.model.load_state_dict(state_dict)\n except FileNotFoundError:\n raise FileNotFoundError(f\"Checkpoint file not found at {checkpoint_path}\")\n except Exception as e:\n raise RuntimeError(f\"Error loading checkpoint from {checkpoint_path}\") from e\n\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n @classmethod\n def from_pretrained(\n cls,\n model_name: str = \"nari-labs/Dia-1.6B-0626\",\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from a Hugging Face Hub repository.\n\n Downloads the configuration and checkpoint files from the specified\n repository ID and then loads the model.\n\n Args:\n model_name: The Hugging Face Hub repository ID (e.g., \"nari-labs/Dia-1.6B-0626\").\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If config or checkpoint download/loading fails.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n\n # Load model directly using DiaModel's from_pretrained which handles HF download\n try:\n loaded_model = DiaModel.from_pretrained(model_name, compute_dtype=compute_dtype.to_dtype())\n except Exception as e:\n raise RuntimeError(f\"Error loading model from Hugging Face Hub ({model_name})\") from e\n\n config = loaded_model.config # Get config from the loaded model\n dia = cls(config, compute_dtype, device, load_dac)\n\n dia.model = loaded_model # Assign the already loaded model\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n def _load_dac_model(self):\n \"\"\"Loads the Descript Audio Codec (DAC) model.\n\n Downloads the DAC model if necessary and loads it onto the specified device.\n Sets the DAC model to evaluation mode.\n\n Raises:\n RuntimeError: If downloading or loading the DAC model fails.\n \"\"\"\n import dac\n\n try:\n dac_model_path = dac.utils.download()\n dac_model = dac.DAC.load(dac_model_path).to(self.device)\n dac_model.eval() # Ensure DAC is in eval mode\n except Exception as e:\n raise RuntimeError(\"Failed to load DAC model\") from e\n self.dac_model = dac_model\n\n def _encode_text(self, text: str) -> torch.Tensor:\n \"\"\"Encodes the input text string into a tensor of token IDs using byte-level encoding.\n\n Special tokens [S1] and [S2] are replaced by their byte values. The resulting\n sequence is truncated to the maximum configured text length.\n\n Args:\n text: The input text string.\n\n Returns:\n A tensor containing the encoded byte token IDs.\n \"\"\"\n max_len = self.config.encoder_config.max_position_embeddings\n\n byte_text = text.encode(\"utf-8\")\n # Replace special tokens with their byte values if needed by the specific tokenizer/config\n # Assuming byte values 1 and 2 are correct placeholders based on original code\n replaced_bytes = byte_text.replace(b\"[S1]\", b\"\\x01\").replace(b\"[S2]\", b\"\\x02\")\n text_tokens = list(replaced_bytes)\n return torch.tensor(\n text_tokens[:max_len],\n dtype=torch.long,\n device=self.device,\n )\n\n def _pad_text_input(self, text_tokens: list[torch.Tensor]) -> torch.Tensor:\n \"\"\"Pads the text input to the maximum length.\"\"\"\n text_pad_value = 0\n max_len = self.config.encoder_config.max_position_embeddings\n batch_size = len(text_tokens)\n\n src_tokens = torch.full(\n (batch_size, 1, max_len),\n fill_value=text_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n for i in range(batch_size):\n current_len = len(text_tokens[i])\n src_tokens[i, 0, :current_len] = text_tokens[i]\n return src_tokens\n\n def _prepare_audio_prompt(self, audio_prompts: list[torch.Tensor | None]) -> tuple[torch.Tensor, list[int]]:\n \"\"\"Prepares the audio prompt tensor for the decoder.\n\n Handles padding, adds the beginning-of-sequence (BOS) token, applies the\n delay pattern, and determines the number of prefill steps for each item\n in the batch.\n\n Args:\n audio_prompts: A list of audio prompt tensors (encoded DAC frames) or None.\n Each tensor should have shape [T, C].\n\n Returns:\n A tuple containing:\n - delayed_batch (torch.Tensor): The prepared audio prompt tensor with\n delays applied, shape [B, T_max_padded, C].\n - prefill_steps (list[int]): A list containing the number of valid\n tokens (including BOS) for each prompt in the batch.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n audio_bos_value = self.config.bos_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n batch_size = len(audio_prompts)\n\n max_len = max(p.shape[0] if p is not None else 0 for p in audio_prompts) + max_delay_pattern\n prefill_steps = []\n\n prefill = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=self.device,\n )\n\n prefill[:, 0, :] = audio_bos_value\n\n for i in range(batch_size):\n prompt = audio_prompts[i]\n if prompt is not None:\n prompt = prompt.to(device=self.device, dtype=torch.int)\n prefill[i, 1 : prompt.shape[0] + 1, :] = prompt\n prefill_steps.append(prompt.shape[0] + 1)\n else:\n prefill_steps.append(1)\n\n delay_precomp = build_delay_indices(\n B=batch_size,\n T=max_len,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n delayed_batch = apply_audio_delay(\n audio_BxTxC=prefill,\n pad_value=-1,\n bos_value=audio_bos_value,\n precomp=delay_precomp,\n )\n\n return delayed_batch, prefill_steps\n\n def _prepare_generation(\n self,\n text: torch.Tensor,\n audio_prompts: list[torch.Tensor | None],\n max_tokens: int | None = None,\n attn_fn: Callable = F.scaled_dot_product_attention,\n ):\n \"\"\"Initializes the model state for generation.\n\n Encodes the text input (conditional and unconditional), prepares the\n encoder and decoder states (including KV caches and cross-attention),\n prepares the audio prompt, and performs the initial decoder prefill steps\n based on the audio prompts.\n\n Args:\n text: The padded text input tensor, shape [B, 1, T_text].\n audio_prompts: A list of prepared audio prompt tensors or None.\n\n Returns:\n A tuple containing:\n - dec_state (DecoderInferenceState): The initialized decoder state.\n - dec_output (DecoderOutput): The initialized decoder output manager,\n containing the prefilled audio tokens.\n \"\"\"\n batch_size = text.shape[0]\n\n enc_input_uncond = torch.zeros_like(text)\n enc_input_cond = text\n stacked_inputs = torch.stack([enc_input_uncond, enc_input_cond], dim=1)\n enc_input = stacked_inputs.view(2 * batch_size, -1)\n\n enc_state = EncoderInferenceState.new(self.config, enc_input_cond)\n encoder_out = self.model.encoder(enc_input, enc_state)\n\n dec_cross_attn_cache = self.model.decoder.precompute_cross_attn_cache(encoder_out)\n dec_state = DecoderInferenceState.new(\n self.config,\n enc_state,\n encoder_out,\n dec_cross_attn_cache,\n self.compute_dtype,\n max_generation_length=max_tokens,\n )\n prefill, prefill_steps = self._prepare_audio_prompt(audio_prompts)\n\n dec_output = DecoderOutput.new(batch_size, self.config, self.device)\n dec_output.prefill(prefill, prefill_steps)\n\n dec_step = min(prefill_steps) - 1\n if dec_step > 0:\n dec_state.prepare_step(0, dec_step)\n tokens_BxTxC = dec_output.get_tokens_at(0, dec_step).repeat_interleave(2, dim=0)\n self.model.decoder.forward(tokens_BxTxC, dec_state)\n\n return dec_state, dec_output\n\n def _decoder_step(\n self,\n tokens_Bx1xC: torch.Tensor,\n dec_state: DecoderInferenceState,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n top_k: int,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"Performs a single step of the decoder inference.\n\n Takes the tokens from the previous step, runs them through the decoder\n (for both conditional and unconditional paths), applies classifier-free\n guidance (CFG), samples the next token using temperature, top-p, and top-k\n sampling, and applies constraints (e.g., preventing EOS in certain channels).\n\n Args:\n tokens_Bx1xC: The input tokens for the current step, shape [2*B, 1, C].\n Repeated for CFG (unconditional and conditional).\n dec_state: The current state of the decoder (KV caches, etc.).\n cfg_scale: The scale factor for classifier-free guidance.\n temperature: The temperature for sampling.\n top_p: The cumulative probability threshold for top-p sampling.\n top_k: The number of top logits to consider for top-k sampling.\n current_idx: The current generation step index.\n\n Returns:\n torch.Tensor: The sampled next tokens for each item in the batch,\n shape [B, C].\n \"\"\"\n B = tokens_Bx1xC.shape[0] // 2\n\n audio_eos_value = self.config.eos_token_id\n logits_Bx1xCxV = self.model.decoder.decode_step(tokens_Bx1xC, dec_state, current_idx)\n\n logits_last_2BxCxV = logits_Bx1xCxV[:, -1]\n logits_last_Bx2xCxV = logits_last_2BxCxV.view(B, 2, *logits_last_2BxCxV.shape[1:])\n\n uncond_logits_BxCxV = logits_last_Bx2xCxV[:, 0, :, :] # Shape [B, C, V]\n cond_logits_BxCxV = logits_last_Bx2xCxV[:, 1, :, :] # Shape [B, C, V]\n logits_BxCxV = cond_logits_BxCxV + cfg_scale * (cond_logits_BxCxV - uncond_logits_BxCxV)\n\n _, top_k_indices_BxCxk = torch.topk(logits_BxCxV, k=top_k, dim=-1)\n mask_BxCxV = torch.ones_like(logits_BxCxV, dtype=torch.bool)\n mask_BxCxV = mask_BxCxV.scatter(dim=-1, index=top_k_indices_BxCxk, value=False)\n logits_BxCxV = cond_logits_BxCxV.masked_fill(mask_BxCxV, -torch.inf)\n\n logits_BxCxV[:, :, audio_eos_value + 1 :] = torch.full_like(\n logits_BxCxV[:, :, audio_eos_value + 1 :],\n fill_value=-torch.inf,\n )\n logits_BxCxV[:, 1:, audio_eos_value:] = torch.full_like(\n logits_BxCxV[:, 1:, audio_eos_value:],\n fill_value=-torch.inf,\n )\n\n flat_logits_BCxV = logits_BxCxV.view(B * self.config.decoder_config.num_channels, -1)\n\n pred_BC = _sample_next_token(\n flat_logits_BCxV.float(),\n temperature=temperature,\n top_p=top_p,\n top_k=top_k,\n audio_eos_value=audio_eos_value,\n )\n\n pred_BxC = pred_BC.view(B, self.config.decoder_config.num_channels)\n return pred_BxC\n\n def _generate_output(self, generated_codes: torch.Tensor, lengths_Bx: torch.Tensor) -> list[np.ndarray]:\n \"\"\"Converts generated delayed codes into audio waveforms.\n\n Reverts the delay pattern applied during generation, decodes the resulting\n codebook using the DAC model (if loaded), and returns a list of audio\n waveforms as NumPy arrays. If DAC is not loaded, returns the raw codebook indices.\n\n Args:\n generated_codes: The tensor of generated audio codes with delays,\n shape [B, T_gen, C].\n lengths_Bx: A tensor containing the valid length of generated codes\n (excluding padding and BOS/EOS markers) for each item\n in the batch, shape [B].\n\n Returns:\n A list of NumPy arrays, where each array represents the generated audio\n waveform for one item in the batch. If DAC is not loaded, returns the\n raw, reverted codebook indices as NumPy arrays.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n batch_size = generated_codes.shape[0]\n seq_length = generated_codes.shape[1]\n delay_pattern = self.config.delay_pattern\n audio_pad_value = self.config.pad_token_id\n max_delay_pattern = max(delay_pattern)\n\n revert_precomp = build_revert_indices(\n B=batch_size,\n T=seq_length,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n codebook = revert_audio_delay(\n audio_BxTxC=generated_codes,\n pad_value=audio_pad_value,\n precomp=revert_precomp,\n T=seq_length,\n )[:, :-max_delay_pattern, :]\n\n min_valid_index = 0\n max_valid_index = 1023\n invalid_mask = (codebook < min_valid_index) | (codebook > max_valid_index)\n codebook[invalid_mask] = 0\n\n audios = []\n\n if self.load_dac:\n for i in range(batch_size):\n audio = self._decode(codebook[i, : lengths_Bx[i], :])\n audio_np = audio.cpu().numpy()\n audios.append(audio_np)\n else:\n for i in range(batch_size):\n audios.append(codebook[i, : lengths_Bx[i], :].cpu().numpy())\n return audios\n\n @torch.no_grad()\n @torch.inference_mode()\n def _encode(self, audio: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Encodes the given audio waveform into a tensor of DAC codebook indices\n \"\"\"\n audio = audio.unsqueeze(0)\n audio_data = self.dac_model.preprocess(audio, DEFAULT_SAMPLE_RATE)\n _, encoded_frame, _, _, _ = self.dac_model.encode(audio_data)\n encoded_frame: torch.Tensor\n return encoded_frame.squeeze(0).transpose(0, 1)\n\n @torch.no_grad()\n @torch.inference_mode()\n def _decode(self, audio_codes: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Decodes the given frames into an output audio waveform\n \"\"\"\n audio_codes = audio_codes.unsqueeze(0).transpose(1, 2)\n audio_values, _, _ = self.dac_model.quantizer.from_codes(audio_codes)\n audio_values = self.dac_model.decode(audio_values)\n audio_values: torch.Tensor\n return audio_values.squeeze()\n\n def load_audio(self, audio_path: str) -> torch.Tensor:\n \"\"\"Loads and preprocesses an audio file for use as a prompt.\n\n Loads the audio file, resamples it to the target sample rate if necessary,\n preprocesses it using the DAC model's preprocessing, and encodes it into\n DAC codebook indices.\n\n Args:\n audio_path: Path to the audio file.\n\n Returns:\n torch.Tensor: The encoded audio prompt as DAC codebook indices,\n shape [T, C].\n\n Raises:\n RuntimeError: If the DAC model is not loaded (`load_dac=False` during init).\n FileNotFoundError: If the audio file cannot be found.\n Exception: If there's an error during loading or processing.\n \"\"\"\n if self.dac_model is None:\n raise RuntimeError(\"DAC model is required for loading audio prompts but was not loaded.\")\n audio, sr = torchaudio.load(audio_path, channels_first=True) # C, T\n if sr != DEFAULT_SAMPLE_RATE:\n audio = torchaudio.functional.resample(audio, sr, DEFAULT_SAMPLE_RATE)\n # Convert to mono if stereo\n if audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True) # Average channels to get mono\n return self._encode(audio.to(self.device))\n\n def save_audio(self, path: str, audio: np.ndarray):\n \"\"\"Saves the generated audio waveform to a file.\n\n Uses the soundfile library to write the NumPy audio array to the specified\n path with the default sample rate.\n\n Args:\n path: The path where the audio file will be saved.\n audio: The audio waveform as a NumPy array.\n \"\"\"\n import soundfile as sf\n\n sf.write(path, audio, DEFAULT_SAMPLE_RATE)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str | list[str],\n max_tokens: int = 3072,\n cfg_scale: float = 3.0,\n temperature: float = 1.2,\n top_p: float = 0.95,\n use_torch_compile: bool = False,\n cfg_filter_top_k: int = 45,\n audio_prompt: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n audio_prompt_path: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n use_cfg_filter: bool | None = None,\n verbose: bool = False,\n ) -> np.ndarray | list[np.ndarray]:\n \"\"\"Generates audio corresponding to the input text.\n\n Args:\n text: The input text prompt, or a list of text prompts for batch generation.\n max_tokens: The maximum number of audio tokens to generate per prompt.\n Defaults to the model's configured audio length if None.\n cfg_scale: The scale factor for classifier-free guidance (CFG). Higher values\n lead to stronger guidance towards the text prompt.\n temperature: The temperature for sampling. Higher values increase randomness.\n top_p: The cumulative probability threshold for nucleus (top-p) sampling.\n use_torch_compile: Whether to compile the generation steps using torch.compile.\n Can significantly speed up generation after the initial\n compilation overhead. Defaults to False.\n cfg_filter_top_k: The number of top logits to consider during CFG filtering.\n (Note: This parameter name might be slightly misleading based\n on the code; it's used in the `_sample_next_token` function.)\n audio_prompt: An audio prompt or list of prompts to condition the generation.\n Can be a file path (str), a pre-loaded tensor (DAC codes), or None.\n If a list, its length must match the batch size of the text input.\n audio_prompt_path: (Deprecated) Use `audio_prompt` instead.\n use_cfg_filter: (Deprecated) This parameter is no longer used.\n verbose: If True, prints progress information during generation, including\n speed metrics.\n\n Returns:\n If a single text prompt was provided, returns a NumPy array containing the\n generated audio waveform.\n If a list of text prompts was provided, returns a list of NumPy arrays,\n each corresponding to a prompt in the input list. Returns None for a\n sequence if no audio was generated for it.\n \"\"\"\n batch_size = len(text) if isinstance(text, list) else 1\n audio_eos_value = self.config.eos_token_id\n audio_pad_value = self.config.pad_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n delay_pattern_Cx = torch.tensor(delay_pattern, device=self.device, dtype=torch.long)\n self.model.eval()\n\n if audio_prompt_path:\n print(\"Warning: audio_prompt_path is deprecated. Use audio_prompt instead.\")\n audio_prompt = audio_prompt_path\n if use_cfg_filter is not None:\n print(\"Warning: use_cfg_filter is deprecated.\")\n\n if verbose:\n total_start_time = time.time()\n\n if use_torch_compile and not hasattr(self, \"_compiled\"):\n # Compilation can take about a minute.\n self._prepare_generation = torch.compile(self._prepare_generation, dynamic=True, fullgraph=True)\n self._decoder_step = torch.compile(self._decoder_step, fullgraph=True, mode=\"max-autotune\")\n self._compiled = True\n\n if isinstance(audio_prompt, list):\n audio_prompt = [self.load_audio(p) if isinstance(p, str) else p for p in audio_prompt]\n elif isinstance(audio_prompt, str):\n audio_prompt = [self.load_audio(audio_prompt)]\n elif isinstance(audio_prompt, torch.Tensor):\n audio_prompt = [audio_prompt]\n elif audio_prompt is None:\n audio_prompt = [None] * batch_size\n\n assert len(audio_prompt) == batch_size, \"Number of audio prompts must match batch size\"\n\n if isinstance(text, list):\n text = [self._encode_text(t) for t in text]\n else:\n text = [self._encode_text(text)]\n text = self._pad_text_input(text)\n\n dec_state, dec_output = self._prepare_generation(text, audio_prompt, max_tokens=max_tokens)\n dec_step = min(dec_output.prefill_steps) - 1\n current_idx = torch.tensor([dec_step], device=self.device)\n\n eos_detected_Bx = torch.zeros((batch_size,), dtype=torch.bool, device=self.device)\n eos_countdown_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n finished_step_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n\n bos_over = False\n\n if verbose:\n print(\"generate: starting generation loop\")\n if use_torch_compile:\n print(\"generate: using use_torch_compile=True, the first step may be slow\")\n start_time = time.time()\n\n # --- Generation Loop ---\n while dec_step < max_tokens:\n if (eos_countdown_Bx == 0).all():\n break\n\n current_step_idx = dec_step + 1\n torch.compiler.cudagraph_mark_step_begin()\n dec_state.prepare_step(dec_step)\n tokens_Bx1xC = dec_output.get_tokens_at(dec_step).repeat_interleave(2, dim=0) # Repeat for CFG\n\n pred_BxC = self._decoder_step(\n tokens_Bx1xC,\n dec_state,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n current_idx,\n )\n\n current_idx += 1\n\n active_mask_Bx = eos_countdown_Bx != 0\n eos_trigger_Bx = torch.zeros_like(active_mask_Bx)\n if active_mask_Bx.any():\n is_eos_token = (~eos_detected_Bx[active_mask_Bx]) & (pred_BxC[active_mask_Bx, 0] == audio_eos_value)\n is_max_len = current_step_idx >= max_tokens - max_delay_pattern\n eos_trigger_Bx[active_mask_Bx] = is_eos_token | is_max_len\n eos_detected_Bx |= eos_trigger_Bx\n start_countdown_mask_Bx = eos_trigger_Bx & (eos_countdown_Bx < 0)\n if start_countdown_mask_Bx.any():\n eos_countdown_Bx[start_countdown_mask_Bx] = max_delay_pattern\n finished_step_Bx[start_countdown_mask_Bx] = current_step_idx\n\n padding_mask_Bx = eos_countdown_Bx > 0\n if padding_mask_Bx.any():\n pred_active_BxC = pred_BxC[padding_mask_Bx].clone()\n countdown_active_Bx = eos_countdown_Bx[padding_mask_Bx]\n step_after_eos_Bx = max_delay_pattern - countdown_active_Bx\n step_after_eos_Bx_ = step_after_eos_Bx.unsqueeze(1)\n delay_pattern_Cx_ = delay_pattern_Cx.unsqueeze(0)\n eos_mask_NxC = step_after_eos_Bx_ == delay_pattern_Cx_\n pad_mask_NxC = step_after_eos_Bx_ > delay_pattern_Cx_\n pred_active_BxC[eos_mask_NxC] = audio_eos_value\n pred_active_BxC[pad_mask_NxC] = audio_pad_value\n pred_BxC[padding_mask_Bx] = pred_active_BxC\n eos_countdown_Bx[padding_mask_Bx] -= 1\n\n # --- Update BOS flag (Original) ---\n if not bos_over:\n bos_over = all(\n dec_step - prefill_step > max_delay_pattern for prefill_step in dec_output.prefill_steps\n )\n\n dec_output.update_one(pred_BxC, current_step_idx, not bos_over)\n\n dec_step += 1\n\n if verbose and dec_step % 86 == 0:\n duration = time.time() - start_time\n if duration > 0:\n print(\n f\"generate step {dec_step}: speed={86 * batch_size / duration:.3f} tokens/s, realtime factor={batch_size / duration:.3f}x\"\n )\n start_time = time.time()\n\n # --- Finalize and Extract Output ---\n final_step = dec_step + 1\n\n finished_step_Bx[finished_step_Bx == -1] = final_step - max_delay_pattern\n\n prefill_steps_tensor = torch.tensor(dec_output.prefill_steps, device=self.device)\n lengths_Bx = finished_step_Bx - prefill_steps_tensor\n lengths_Bx = torch.clamp(lengths_Bx, min=0)\n\n max_len = lengths_Bx.max().item() + max_delay_pattern\n outputs = []\n\n if max_len > 0:\n num_channels = self.config.decoder_config.num_channels\n audio_pad_value = self.config.pad_token_id\n generated_codes = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=audio_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n\n for i in range(batch_size):\n start_step = dec_output.prefill_steps[i]\n actual_len = lengths_Bx[i].item() + max_delay_pattern\n if actual_len > 0:\n tokens_to_copy = dec_output.generated_tokens[i, start_step : start_step + actual_len, :]\n generated_codes[i, :actual_len, :] = tokens_to_copy\n\n if verbose:\n avg_steps = lengths_Bx.float().mean().item()\n total_duration = time.time() - total_start_time\n print(f\"generate: avg steps={avg_steps:.1f}, total duration={total_duration:.3f}s\")\n\n del dec_state\n\n outputs = self._generate_output(generated_codes, lengths_Bx)\n else:\n print(\"Warning: Nothing generated for any sequence in the batch.\")\n outputs = [None] * batch_size\n\n return outputs if batch_size > 1 else outputs[0]\n"], ["/dia/cli.py", "import argparse\nimport os\nimport random\n\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # Ensure deterministic behavior for cuDNN (if used)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Generate audio using the Dia model.\")\n\n parser.add_argument(\"text\", type=str, help=\"Input text for speech generation.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"Path to save the generated audio file (e.g., output.wav).\"\n )\n\n parser.add_argument(\n \"--repo-id\",\n type=str,\n default=\"nari-labs/Dia-1.6B-0626\",\n help=\"Hugging Face repository ID (e.g., nari-labs/Dia-1.6B-0626).\",\n )\n parser.add_argument(\n \"--local-paths\", action=\"store_true\", help=\"Load model from local config and checkpoint files.\"\n )\n\n parser.add_argument(\n \"--config\", type=str, help=\"Path to local config.json file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--checkpoint\", type=str, help=\"Path to local model checkpoint .pth file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--audio-prompt\", type=str, default=None, help=\"Path to an optional audio prompt WAV file for voice cloning.\"\n )\n\n gen_group = parser.add_argument_group(\"Generation Parameters\")\n gen_group.add_argument(\n \"--max-tokens\",\n type=int,\n default=None,\n help=\"Maximum number of audio tokens to generate (defaults to config value).\",\n )\n gen_group.add_argument(\n \"--cfg-scale\", type=float, default=3.0, help=\"Classifier-Free Guidance scale (default: 3.0).\"\n )\n gen_group.add_argument(\n \"--temperature\", type=float, default=1.3, help=\"Sampling temperature (higher is more random, default: 0.7).\"\n )\n gen_group.add_argument(\"--top-p\", type=float, default=0.95, help=\"Nucleus sampling probability (default: 0.95).\")\n\n infra_group = parser.add_argument_group(\"Infrastructure\")\n infra_group.add_argument(\"--seed\", type=int, default=None, help=\"Random seed for reproducibility.\")\n infra_group.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to run inference on (e.g., 'cuda', 'cpu', default: auto).\",\n )\n\n args = parser.parse_args()\n\n # Validation for local paths\n if args.local_paths:\n if not args.config:\n parser.error(\"--config is required when --local-paths is set.\")\n if not args.checkpoint:\n parser.error(\"--checkpoint is required when --local-paths is set.\")\n if not os.path.exists(args.config):\n parser.error(f\"Config file not found: {args.config}\")\n if not os.path.exists(args.checkpoint):\n parser.error(f\"Checkpoint file not found: {args.checkpoint}\")\n\n # Set seed if provided\n if args.seed is not None:\n set_seed(args.seed)\n print(f\"Using user-selected seed: {args.seed}\")\n\n # Determine device\n device = torch.device(args.device)\n print(f\"Using device: {device}\")\n\n # Load model\n print(\"Loading model...\")\n if args.local_paths:\n print(f\"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'\")\n try:\n model = Dia.from_local(args.config, args.checkpoint, device=device)\n except Exception as e:\n print(f\"Error loading local model: {e}\")\n exit(1)\n else:\n print(f\"Loading from Hugging Face Hub: repo_id='{args.repo_id}'\")\n try:\n model = Dia.from_pretrained(args.repo_id, device=device)\n except Exception as e:\n print(f\"Error loading model from Hub: {e}\")\n exit(1)\n print(\"Model loaded.\")\n\n # Generate audio\n print(\"Generating audio...\")\n try:\n sample_rate = 44100 # Default assumption\n\n output_audio = model.generate(\n text=args.text,\n audio_prompt=args.audio_prompt,\n max_tokens=args.max_tokens,\n cfg_scale=args.cfg_scale,\n temperature=args.temperature,\n top_p=args.top_p,\n )\n print(\"Audio generation complete.\")\n\n print(f\"Saving audio to {args.output}...\")\n os.makedirs(os.path.dirname(args.output) or \".\", exist_ok=True)\n\n sf.write(args.output, output_audio, sample_rate)\n print(f\"Audio successfully saved to {args.output}\")\n\n except Exception as e:\n print(f\"Error during audio generation or saving: {e}\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n"], ["/dia/dia/layers.py", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torch import Tensor\nfrom torch.nn import RMSNorm\n\nfrom .config import DecoderConfig, DiaConfig, EncoderConfig\nfrom .state import DecoderInferenceState, EncoderInferenceState, KVCache\n\n\ndef _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:\n return tuple(ax if ax >= 0 else ndim + ax for ax in axes)\n\n\nclass DenseGeneral(nn.Module):\n \"\"\"\n PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init.\n Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot\n for the generalized matrix multiplication. Weight/bias shapes are calculated\n and parameters created during initialization based on config.\n `load_weights` validates shapes and copies data.\n Attributes:\n axis (Tuple[int, ...]): Input axis or axes to contract.\n in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`.\n out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims).\n use_bias (bool): Whether to add a bias term.\n weight (nn.Parameter): The kernel parameter.\n bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True).\n \"\"\"\n\n def __init__(\n self,\n in_shapes: tuple[int, ...],\n out_features: tuple[int, ...],\n axis: tuple[int, ...] = (-1,),\n weight_dtype: torch.dtype | None = None,\n device: torch.device | None = None,\n ):\n super().__init__()\n self.in_shapes = in_shapes\n self.out_features = out_features\n self.axis = axis\n self.kernel_shape = self.in_shapes + self.out_features\n\n factory_kwargs = {\"device\": device, \"dtype\": weight_dtype}\n self.weight = nn.Parameter(torch.empty(self.kernel_shape, **factory_kwargs))\n\n def forward(self, inputs: Tensor) -> Tensor:\n norm_axis = _normalize_axes(self.axis, inputs.ndim)\n kernel_contract_axes = tuple(range(len(norm_axis)))\n\n output = torch.tensordot(\n inputs.to(self.weight.dtype),\n self.weight,\n dims=(norm_axis, kernel_contract_axes),\n ).to(inputs.dtype)\n return output\n\n\nclass MlpBlock(nn.Module):\n \"\"\"MLP block using DenseGeneral.\"\"\"\n\n def __init__(self, embed_dim: int, intermediate_dim: int, compute_dtype: torch.dtype):\n super().__init__()\n self.dtype = compute_dtype\n\n self.wi_fused = DenseGeneral(\n in_shapes=(embed_dim,),\n out_features=(2, intermediate_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n self.wo = DenseGeneral(\n in_shapes=(intermediate_dim,),\n out_features=(embed_dim,),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass.\"\"\"\n fused_x = self.wi_fused(x)\n\n gate = fused_x[..., 0, :]\n up = fused_x[..., 1, :]\n\n hidden = torch.mul(F.silu(gate), up).to(self.dtype)\n\n output = self.wo(hidden)\n return output\n\n\nclass RotaryEmbedding(nn.Module):\n \"\"\"Rotary Position Embedding (RoPE) implementation in PyTorch.\"\"\"\n\n def __init__(\n self,\n embedding_dims: int,\n min_timescale: float = 1.0,\n max_timescale: float = 10000.0,\n dtype: torch.dtype = torch.float32,\n ):\n super().__init__()\n if embedding_dims % 2 != 0:\n raise ValueError(\"Embedding dim must be even for RoPE.\")\n self.embedding_dims = embedding_dims\n self.min_timescale = min_timescale\n self.max_timescale = max_timescale\n self.compute_dtype = dtype\n\n half_embedding_dim = embedding_dims // 2\n fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims\n timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32)\n self.register_buffer(\"timescale\", timescale, persistent=False)\n\n def forward(self, inputs: torch.Tensor, position: torch.Tensor):\n \"\"\"Applies RoPE.\"\"\"\n position = position.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat(\n (first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)),\n dim=-1,\n )\n\n def apply_rope(self, inputs: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor):\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat((first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)), dim=-1)\n\n\ndef custom_scaled_dot_product_attention(\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n attn_mask: torch.Tensor | None = None,\n scale: float = 1.0,\n is_causal: bool = False,\n num_gqa_groups: int = 1,\n) -> torch.Tensor:\n \"\"\"\n Custom scaled dot-product attention with GQA support for MPS compatibility.\n\n Args:\n query: (B, N_q, T, H) - Query tensor, N_q = num_query_heads\n key: (B, N_kv, S, H) - Key tensor, N_kv = num_kv_heads\n value: (B, N_kv, S, H) - Value tensor\n attn_mask: (B, 1, T, S) - Attention mask, optional\n scale: Scaling factor for attention scores\n is_causal: If True, apply causal masking\n num_gqa_groups: Number of query groups per KV head (N_q / N_kv)\n\n Returns:\n output: (B, N_q, T, H) - Attention output\n \"\"\"\n B, N_q, T, H = query.shape\n _, N_kv, S, _ = key.shape\n\n # For GQA, repeat key and value tensors to match query heads\n if num_gqa_groups > 1:\n key = key.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n value = value.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n\n # Compute attention scores: (B, N_q, T, H) @ (B, N_q, H, S) -> (B, N_q, T, S)\n scores = torch.matmul(query, key.transpose(-1, -2)) * scale\n\n # Apply causal mask if needed\n if is_causal:\n causal_mask = torch.tril(torch.ones(T, S, dtype=torch.bool, device=query.device))\n scores = scores.masked_fill(~causal_mask, float(\"-inf\"))\n\n # Apply attention mask if provided\n if attn_mask is not None:\n scores = scores.masked_fill(~attn_mask, float(\"-inf\"))\n\n # Softmax over the last dimension (S)\n attn_weights = F.softmax(scores, dim=-1)\n\n # Compute output: (B, N_q, T, S) @ (B, N_q, S, H) -> (B, N_q, T, H)\n output = torch.matmul(attn_weights, value)\n\n return output\n\n\nclass CrossAttention(nn.Module):\n \"\"\"Cross-Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n def forward(\n self,\n Xq: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n is_causal: bool = False,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n original_dtype = Xq.dtype\n\n Xq_BxTxNxH = self.q_proj(Xq)\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xq.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass FusedQKV(nn.Module):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = False,\n num_q_heads: int = 1,\n q_head_dim: int = 1,\n num_kv_heads: int = 1,\n kv_head_dim: int = 1,\n ):\n super().__init__()\n self.num_q_heads = num_q_heads\n self.q_head_dim = q_head_dim\n self.num_kv_heads = num_kv_heads\n self.kv_head_dim = kv_head_dim\n self.q_output_dim = num_q_heads * q_head_dim\n self.kv_output_dim = num_kv_heads * kv_head_dim\n self.linear = nn.Linear(in_features, out_features, bias=bias)\n\n def forward(self, inputs: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n x = self.linear(inputs)\n\n q, k, v = x.split([self.q_output_dim, self.kv_output_dim, self.kv_output_dim], dim=-1)\n\n q = q.reshape(q.shape[:-1] + (self.num_q_heads, self.q_head_dim))\n k = k.reshape(k.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n v = v.reshape(v.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n\n return q, k, v\n\n\nclass SelfAttention(nn.Module):\n \"\"\"Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n self.kv_embed_dim = kv_embed_dim\n self.q_embed_dim = q_embed_dim\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n self.is_fused_qkv = False\n\n def get_linear_weight(self, dense: DenseGeneral):\n W_dg = dense.weight.data\n\n out_features = 1\n input_features = 1\n for dim in dense.out_features:\n out_features *= dim\n for dim in dense.in_shapes:\n input_features *= dim\n\n W_dg_reshaped_for_linear_T = W_dg.reshape(input_features, out_features)\n linear_weight = W_dg_reshaped_for_linear_T.transpose(0, 1).contiguous()\n return linear_weight\n\n def patch_fused_qkv(self):\n q_proj_weight = self.get_linear_weight(self.q_proj)\n k_proj_weight = self.get_linear_weight(self.k_proj)\n v_proj_weight = self.get_linear_weight(self.v_proj)\n\n self.qkv = FusedQKV(\n self.kv_embed_dim,\n (self.num_query_heads * self.head_dim + 2 * (self.num_kv_heads * self.head_dim)),\n bias=False,\n num_q_heads=self.num_query_heads,\n q_head_dim=self.head_dim,\n num_kv_heads=self.num_kv_heads,\n kv_head_dim=self.head_dim,\n )\n self.qkv.linear.weight.data = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=0)\n\n # print(f\"qkv.weight.shape: {self.qkv.linear.weight.shape}\")\n self.is_fused_qkv = True\n\n def forward(\n self,\n X: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n prefill: bool = False,\n is_causal: bool = False,\n current_idx: torch.Tensor | None = None,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n prefill: If True, use prefill mode.\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n\n original_dtype = X.dtype\n\n if self.is_fused_qkv:\n Xq_BxTxNxH, Xk_BxSxKxH, Xv_BxSxKxH = self.qkv(X)\n else:\n Xq_BxTxNxH = self.q_proj(X)\n Xk_BxSxKxH = self.k_proj(X)\n Xv_BxSxKxH = self.v_proj(X)\n\n position = q_positions.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.rotary_emb.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n\n Xq_BxTxNxH = self.rotary_emb.apply_rope(Xq_BxTxNxH, sin, cos)\n Xk_BxSxKxH = self.rotary_emb.apply_rope(Xk_BxSxKxH, sin, cos)\n\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n\n if cache is None:\n attn_k = Xk_BxKxSxH\n attn_v = Xv_BxKxSxH\n elif prefill:\n attn_k, attn_v = Xk_BxKxSxH, Xv_BxKxSxH\n cache.prefill(attn_k, attn_v)\n else:\n attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH, current_idx)\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xv_BxSxKxH.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass EncoderLayer(nn.Module):\n \"\"\"Transformer Encoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n self.pre_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.self_attention = SelfAttention(\n enc_config,\n q_embed_dim=embed_dim,\n kv_embed_dim=embed_dim,\n num_query_heads=enc_config.num_attention_heads,\n num_kv_heads=enc_config.num_key_value_heads,\n head_dim=enc_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=embed_dim,\n )\n self.post_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.mlp = MlpBlock(\n embed_dim=embed_dim,\n intermediate_dim=enc_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n sa_out = self.self_attention(\n X=x_norm,\n q_positions=state.positions,\n kv_positions=state.positions,\n attn_mask=state.attn_mask,\n )\n x = residual + sa_out\n\n residual = x\n x_norm = self.post_sa_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Encoder(nn.Module):\n \"\"\"Transformer Encoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n self.compute_dtype = compute_dtype\n\n self.embedding = nn.Embedding(\n enc_config.vocab_size,\n enc_config.hidden_size,\n dtype=compute_dtype,\n )\n self.layers = nn.ModuleList([EncoderLayer(config, compute_dtype) for _ in range(enc_config.num_hidden_layers)])\n self.norm = RMSNorm(\n enc_config.hidden_size,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n\n def forward(\n self,\n x_ids: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n x = self.embedding(x_ids)\n\n for layer in self.layers:\n x = layer(x, state)\n\n x = self.norm(x).to(self.compute_dtype)\n return x\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"Transformer Decoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n enc_config = config.encoder_config\n dec_embed_dim = dec_config.hidden_size\n enc_embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n # Norms\n self.pre_sa_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_ca_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_mlp_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n # Self-Attention (GQA) with Causal Masking\n self.self_attention = SelfAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=dec_embed_dim,\n num_query_heads=dec_config.num_attention_heads,\n num_kv_heads=dec_config.num_key_value_heads,\n head_dim=dec_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # Cross-Attention (MHA)\n self.cross_attention = CrossAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=enc_embed_dim, # Note kv_embed_dim\n num_query_heads=dec_config.cross_num_attention_heads,\n num_kv_heads=dec_config.cross_num_key_value_heads,\n head_dim=dec_config.cross_head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # MLP\n self.mlp = MlpBlock(\n embed_dim=dec_embed_dim,\n intermediate_dim=dec_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: DecoderInferenceState,\n self_attn_cache: KVCache | None = None,\n cross_attn_cache: KVCache | None = None,\n prefill: bool = False,\n current_idx: int = 0,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n self_attn_mask = state.casual_attn_mask[None, None, current_idx]\n\n sa_out = self.self_attention(\n X=x_norm, # (2, 1, D)\n q_positions=state.dec_positions, # (2, 1)\n kv_positions=state.dec_positions, # (2, 1)\n attn_mask=self_attn_mask,\n cache=self_attn_cache,\n prefill=prefill,\n is_causal=prefill,\n current_idx=current_idx,\n )\n\n x = residual + sa_out\n\n residual = x\n x_norm = self.pre_ca_norm(x).to(self.compute_dtype)\n ca_out = self.cross_attention(\n Xq=x_norm,\n q_positions=state.dec_positions,\n kv_positions=state.enc_positions,\n attn_mask=state.cross_attn_mask,\n cache=cross_attn_cache,\n )\n x = residual + ca_out\n\n residual = x\n x_norm = self.pre_mlp_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Decoder(nn.Module):\n \"\"\"Transformer Decoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n self.num_channels = dec_config.num_channels\n self.num_layers = dec_config.num_hidden_layers\n\n self.embeddings = nn.ModuleList(\n [\n nn.Embedding(dec_config.vocab_size, dec_config.hidden_size, dtype=compute_dtype)\n for _ in range(self.num_channels)\n ]\n )\n self.layers = nn.ModuleList(\n [DecoderLayer(config=config, compute_dtype=compute_dtype) for _ in range(self.num_layers)]\n )\n\n self.norm = RMSNorm(\n dec_config.hidden_size,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n self.logits_dense = DenseGeneral(\n in_shapes=(dec_config.hidden_size,),\n out_features=(self.num_channels, dec_config.vocab_size),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def precompute_cross_attn_cache(\n self,\n enc_out: torch.Tensor, # (B, S, E)\n ) -> list[KVCache]:\n \"\"\"\n Computes the Key and Value tensors for cross-attention for each layer from the encoder output.\n \"\"\"\n per_layer_kv_cache: list[KVCache] = []\n\n for layer in self.layers:\n cross_attn_module = layer.cross_attention\n k_proj = cross_attn_module.k_proj(enc_out)\n v_proj = cross_attn_module.v_proj(enc_out)\n\n k = k_proj.transpose(1, 2)\n v = v_proj.transpose(1, 2)\n\n per_layer_kv_cache.append(KVCache.from_kv(k, v))\n\n return per_layer_kv_cache\n\n def decode_step(\n self,\n tgt_ids_Bx1xC: torch.Tensor, # [B, 1, C]\n state: DecoderInferenceState,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"\n Performs a single decoding step, managing KV caches layer by layer.\n Returns:\n A tuple containing:\n - logits_Bx1xCV: The final output logits for the current step (B, 1, C*V), cast to float32.\n \"\"\"\n\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_Bx1xC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x, # (2, 1, D)\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n current_idx=current_idx,\n )\n\n x = self.norm(x)\n logits_Bx1xCxV = self.logits_dense(x)\n\n return logits_Bx1xCxV.to(torch.float32)\n\n def forward(self, tgt_ids_BxTxC: torch.Tensor, state: DecoderInferenceState) -> torch.Tensor:\n \"\"\"\n Forward pass for the Decoder stack, managing KV caches.\n Args:\n tgt_ids_BxTxC: Target token IDs (B, T, C).\n encoder_out: Output from the encoder (B, S, E).\n tgt_positions: Positions for target sequence (B, T).\n src_positions: Positions for source sequence (B, S).\n self_attn_mask: Mask for self-attention.\n cross_attn_mask: Mask for cross-attention.\n past_key_values: List containing the self-attention KV cache for each layer\n from the previous decoding step. `len(past_key_values)` should\n equal `num_layers`.\n precomputed_cross_attn_kv: A single tuple containing the pre-computed K/V cache\n derived from `encoder_out`. This is passed identically\n to all layers.\n Returns:\n A tuple containing:\n - logits: The final output logits (B, T, C * V), cast to float32.\n - present_key_values: A list containing the updated self-attention KV cache\n for each layer for the *current* decoding step.\n \"\"\"\n _, _, num_channels_in = tgt_ids_BxTxC.shape\n assert num_channels_in == self.num_channels, \"Input channels mismatch\"\n\n # Embeddings\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_BxTxC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x,\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n prefill=True,\n )\n\n # Final Norm\n x = self.norm(x)\n logits_BxTxCxV = self.logits_dense(x)\n\n return logits_BxTxCxV.to(torch.float32)\n\n\nclass DiaModel(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/nari-labs/dia\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n coders={\n DiaConfig: (\n lambda x: x.model_dump(),\n lambda data: DiaConfig.model_validate(data),\n ),\n },\n):\n \"\"\"PyTorch Dia Model using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n self.encoder = Encoder(config, compute_dtype)\n self.decoder = Decoder(config, compute_dtype)\n"], ["/dia/dia/state.py", "from dataclasses import dataclass\nfrom typing import Optional\n\nimport torch\n\nfrom .config import DiaConfig\n\n\ndef create_attn_mask(\n q_padding_mask_1d: torch.Tensor,\n k_padding_mask_1d: torch.Tensor,\n device: torch.device,\n is_causal: bool = False,\n) -> torch.Tensor:\n \"\"\"\n Creates the attention mask (self or cross) mimicking JAX segment ID logic.\n \"\"\"\n # B1, Tq = q_padding_mask_1d.shape\n # B2, Tk = k_padding_mask_1d.shape\n\n p_mask_q = q_padding_mask_1d.unsqueeze(2) # Shape [B, Tq, 1]\n p_mask_k = k_padding_mask_1d.unsqueeze(1) # Shape [B, 1, Tk]\n\n # Condition A: Non-padding query attends to non-padding key\n non_pad_attends_non_pad = p_mask_q & p_mask_k # Shape [B, Tq, Tk]\n\n # Condition B: Padding query attends to padding key\n pad_attends_pad = (~p_mask_q) & (~p_mask_k) # Shape [B, Tq, Tk]\n\n # Combine: True if padding status is compatible (both non-pad OR both pad)\n mask = non_pad_attends_non_pad | pad_attends_pad # Shape [B, Tq, Tk]\n\n if is_causal:\n # assert Tq == Tk, \"Causal mask requires query and key sequence lengths to be equal\"\n causal_mask_2d = torch.tril(torch.ones_like(mask[0], dtype=torch.bool, device=device)) # Shape [B, Tq, Tk]\n causal_mask = mask & causal_mask_2d # Shape [B, Tq, Tk]\n return causal_mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n else:\n return mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n\n\n@dataclass\nclass EncoderInferenceState:\n \"\"\"Parameters specifically for encoder inference.\"\"\"\n\n max_seq_len: int\n device: torch.device\n positions: torch.Tensor\n padding_mask: torch.Tensor\n attn_mask: torch.Tensor\n\n @classmethod\n def new(cls, config: DiaConfig, cond_src: torch.Tensor) -> \"EncoderInferenceState\":\n \"\"\"Creates EtorchrInferenceParams from DiaConfig and a device.\"\"\"\n device = cond_src.device\n\n positions = torch.arange(\n config.encoder_config.max_position_embeddings, dtype=torch.float32, device=device\n ).unsqueeze(0)\n padding_mask = (cond_src.squeeze(1) != 0).to(device).repeat_interleave(2, dim=0)\n attn_mask = create_attn_mask(padding_mask, padding_mask, device, is_causal=False)\n\n return cls(\n max_seq_len=config.encoder_config.max_position_embeddings,\n device=device,\n positions=positions,\n padding_mask=padding_mask,\n attn_mask=attn_mask,\n )\n\n\nclass KVCache(torch.nn.Module):\n k: torch.Tensor\n v: torch.Tensor\n\n def __init__(\n self,\n batch_size: int,\n num_heads: int,\n max_len: int,\n head_dim: int,\n dtype: torch.dtype,\n device: torch.device,\n k: torch.Tensor | None = None,\n v: torch.Tensor | None = None,\n ):\n k = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if k is None else k\n v = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if v is None else v\n super().__init__()\n\n self.register_buffer(\"k\", k)\n self.register_buffer(\"v\", v)\n\n @classmethod\n def from_kv(cls, k: torch.Tensor, v: torch.Tensor) -> \"KVCache\":\n return cls(\n batch_size=k.shape[0] // 2,\n num_heads=k.shape[1],\n max_len=k.shape[2],\n head_dim=k.shape[3],\n dtype=k.dtype,\n device=k.device,\n k=k,\n v=v,\n )\n\n def update(self, k: torch.Tensor, v: torch.Tensor, current_idx: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n k_out, v_out = self.k, self.v\n k_out[:, :, current_idx, :] = k\n v_out[:, :, current_idx, :] = v\n return self.k, self.v\n\n def prefill(self, k: torch.Tensor, v: torch.Tensor):\n prefill_len = k.shape[2]\n self.k[:, :, :prefill_len, :] = k\n self.v[:, :, :prefill_len, :] = v\n\n\n@dataclass\nclass DecoderInferenceState:\n \"\"\"Parameters specifically for decoder inference.\"\"\"\n\n device: torch.device\n dtype: torch.dtype\n enc_out: torch.Tensor\n enc_positions: torch.Tensor\n dec_positions: torch.Tensor\n self_attn_cache: list[KVCache]\n cross_attn_cache: list[KVCache]\n casual_attn_mask: torch.Tensor\n cross_attn_mask: torch.Tensor\n\n @classmethod\n def new(\n cls,\n config: DiaConfig,\n enc_state: EncoderInferenceState,\n enc_out: torch.Tensor,\n dec_cross_attn_cache: list[KVCache],\n compute_dtype: torch.dtype,\n max_generation_length: Optional[int] = None,\n ) -> \"DecoderInferenceState\":\n \"\"\"Creates DecoderInferenceParams from DiaConfig and a device.\"\"\"\n device = enc_out.device\n max_audio_len = max_generation_length or config.decoder_config.max_position_embeddings\n batch_size = enc_out.shape[0] // 2\n\n dec_positions = torch.full((2 * batch_size, 1), fill_value=0, dtype=torch.int32, device=device)\n causal_mask = torch.tril(torch.ones(max_audio_len, max_audio_len, dtype=torch.bool, device=device))\n dec_mask = torch.ones((2 * batch_size, 1), dtype=torch.bool, device=device)\n cross_attn_mask = create_attn_mask(dec_mask, enc_state.padding_mask, device, is_causal=False)\n\n self_attn_cache = [\n KVCache(\n batch_size,\n config.decoder_config.num_key_value_heads,\n max_audio_len,\n config.decoder_config.head_dim,\n compute_dtype,\n device,\n )\n for _ in range(config.decoder_config.num_hidden_layers)\n ]\n\n return cls(\n device=device,\n dtype=compute_dtype,\n enc_out=enc_out,\n enc_positions=enc_state.positions,\n dec_positions=dec_positions,\n self_attn_cache=self_attn_cache,\n cross_attn_cache=dec_cross_attn_cache,\n casual_attn_mask=causal_mask,\n cross_attn_mask=cross_attn_mask,\n )\n\n def prepare_step(self, step_from: int, step_to: int | None = None) -> None:\n if step_to is None:\n step_to = step_from + 1\n self.dec_positions = torch.arange(step_from, step_to, dtype=torch.int32, device=self.device).unsqueeze(0)\n\n\n@dataclass\nclass DecoderOutput:\n generated_tokens: torch.Tensor\n prefill_steps: list[int]\n\n @classmethod\n def new(cls, batch_size: int, config: DiaConfig, device: torch.device) -> \"DecoderOutput\":\n max_audio_len = config.decoder_config.max_position_embeddings\n return cls(\n generated_tokens=torch.full(\n (batch_size, max_audio_len, config.decoder_config.num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=device,\n ),\n prefill_steps=[],\n )\n\n def get_tokens_at(self, step_from: int, step_to: int | None = None) -> torch.Tensor:\n if step_to is None:\n step_to = step_from + 1\n return self.generated_tokens[:, step_from:step_to, :]\n\n def update_one(self, dec_out: torch.Tensor, step: int, apply_mask: bool = False):\n dec_out = dec_out.to(self.generated_tokens.dtype)\n if apply_mask:\n mask = self.generated_tokens[:, step, :] == -1\n self.generated_tokens[:, step, :] = torch.where(mask, dec_out, self.generated_tokens[:, step, :])\n else:\n self.generated_tokens[:, step, :] = dec_out\n\n def prefill(self, dec_out: torch.Tensor, prefill_steps: list[int]):\n length = dec_out.shape[1]\n self.generated_tokens[:, :length, :] = dec_out\n self.prefill_steps = prefill_steps\n"], ["/dia/dia/config.py", "\"\"\"Configuration management module for the Dia model.\n\nThis module provides comprehensive configuration management for the Dia model,\nutilizing Pydantic for validation. It defines configurations for data processing,\nmodel architecture (encoder and decoder), and training settings.\n\nKey components:\n- DataConfig: Parameters for data loading and preprocessing.\n- EncoderConfig: Architecture details for the encoder module.\n- DecoderConfig: Architecture details for the decoder module.\n- ModelConfig: Combined model architecture settings.\n- TrainingConfig: Training hyperparameters and settings.\n- DiaConfig: Master configuration combining all components.\n\"\"\"\n\nimport os\n\nfrom pydantic import BaseModel, Field\n\n\nclass EncoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the encoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_encoder\".\n hidden_size: Size of the encoder layers, defaults to 1024.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the encoder, defaults to 4096.\n num_hidden_layers: Number of hidden layers in the encoder, defaults to 12.\n num_attention_heads: Number of attention heads in the encoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the encoder, defaults to 16.\n head_dim: Dimension of each attention head, defaults to 128.\n hidden_act: Activation function in the encoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings, defaults to 1024.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n vocab_size: Vocabulary size, defaults to 256.\n \"\"\"\n\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=1024, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=4096, gt=0)\n max_position_embeddings: int = Field(default=1024, gt=0)\n model_type: str = Field(default=\"dia_encoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_hidden_layers: int = Field(default=12, gt=0)\n num_key_value_heads: int = Field(default=16, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=256, gt=0)\n\n\nclass DecoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the decoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_decoder\".\n hidden_size: Size of the decoder layers, defaults to 2048.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the decoder, defaults to 8192.\n num_hidden_layers: Number of hidden layers in the decoder, defaults to 18.\n num_attention_heads: Number of attention heads in the decoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the decoder, defaults to 4.\n head_dim: Dimension of each attention head, defaults to 128.\n cross_hidden_size: Size of the cross-attention layers, defaults to 1024.\n cross_num_attention_heads: Number of attention heads in the cross-attention mechanism, defaults to 16.\n cross_num_key_value_heads: Number of key-value heads in the cross-attention mechanism, defaults to 16.\n cross_head_dim: Dimension of each cross-attention head, defaults to 128.\n hidden_act: Activation function in the decoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings in the decoder, defaults to 3072.\n initializer_range: Range for initializing weights in the decoder, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers in the decoder, defaults to 1e-5.\n rope_theta: Theta value for RoPE in the decoder, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE in the decoder.\n vocab_size: Vocabulary size for the decoder, defaults to 1028.\n num_channels: Number of channels in the decoder, defaults to 9.\n \"\"\"\n\n cross_head_dim: int = Field(default=128, gt=0)\n cross_hidden_size: int = Field(default=1024, gt=0)\n cross_num_attention_heads: int = Field(default=16, gt=0)\n cross_num_key_value_heads: int = Field(default=16, gt=0)\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=2048, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=8192, gt=0)\n max_position_embeddings: int = Field(default=3072, gt=0)\n model_type: str = Field(default=\"dia_decoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_channels: int = Field(default=9, gt=0)\n num_hidden_layers: int = Field(default=18, gt=0)\n num_key_value_heads: int = Field(default=4, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=1028, gt=0)\n\n\nclass DiaConfig(BaseModel, frozen=True):\n \"\"\"Main configuration container for the Dia model architecture.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia\".\n is_encoder_decoder: Flag indicating if the model is an encoder-decoder type, defaults to True.\n encoder: Configuration for the encoder component.\n decoder: Configuration for the decoder component.\n src_vocab_size: Size of the source (text) vocabulary.\n tgt_vocab_size: Size of the target (audio code) vocabulary.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n torch_dtype: Data type for model weights in PyTorch, defaults to \"float32\".\n bos_token_id: Beginning-of-sequence token ID, defaults to 1026.\n eos_token_id: End-of-sequence token ID, defaults to 1024.\n pad_token_id: Padding token ID, defaults to 1025.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n transformers_version: Version of the transformers library, defaults to \"4.53.0.dev0\".\n architectures: List of model architectures, defaults to [\"DiaForConditionalGeneration\"].\n delay_pattern: List of delay values for each audio channel, defaults to [0,8,9,10,11,12,13,14,15].\n \"\"\"\n\n architectures: list[str] = Field(default_factory=lambda: [\"DiaForConditionalGeneration\"])\n bos_token_id: int = Field(default=1026)\n decoder_config: DecoderConfig\n delay_pattern: list[int] = Field(default_factory=lambda: [0, 8, 9, 10, 11, 12, 13, 14, 15])\n encoder_config: EncoderConfig\n eos_token_id: int = Field(default=1024)\n initializer_range: float = Field(default=0.02)\n is_encoder_decoder: bool = Field(default=True)\n model_type: str = Field(default=\"dia\")\n norm_eps: float = Field(default=1e-5)\n pad_token_id: int = Field(default=1025)\n torch_dtype: str = Field(default=\"float32\")\n transformers_version: str = Field(default=\"4.53.0.dev0\")\n\n def save(self, path: str) -> None:\n \"\"\"Save the current configuration instance to a JSON file.\n\n Ensures the parent directory exists and the file has a .json extension.\n\n Args:\n path: The target file path to save the configuration.\n\n Raises:\n ValueError: If the path is not a file with a .json extension.\n \"\"\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n config_json = self.model_dump_json(indent=2)\n with open(path, \"w\") as f:\n f.write(config_json)\n\n @classmethod\n def load(cls, path: str) -> \"DiaConfig | None\":\n \"\"\"Load and validate a Dia configuration from a JSON file.\n\n Args:\n path: The path to the configuration file.\n\n Returns:\n A validated DiaConfig instance if the file exists and is valid,\n otherwise None if the file is not found.\n\n Raises:\n ValueError: If the path does not point to an existing .json file.\n pydantic.ValidationError: If the JSON content fails validation against the DiaConfig schema.\n \"\"\"\n try:\n with open(path, \"r\") as f:\n content = f.read()\n return cls.model_validate_json(content)\n except FileNotFoundError:\n return None\n"], ["/dia/example/voice_clone.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\nclone_from_audio = \"simple.mp3\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Hello, how are you? [S2] I'm good, thank you. [S1] What's your name? [S2] My name is Dia. [S1] Nice to meet you. [S2] Nice to meet you too.\"\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(\n clone_from_text + text_to_generate,\n audio_prompt=clone_from_audio,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=4.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"voice_clone.mp3\", output)\n"], ["/dia/example/voice_clone_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\nclone_from_audios = [f\"simple_{i}.mp3\" for i in range(10)]\n\ntexts = [clone_from_text + text_to_generate for _ in range(10)]\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(texts, audio_prompt=clone_from_audios, use_torch_compile=True, verbose=True, max_tokens=2000)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"voice_clone_{i}.mp3\", o)\n"], ["/dia/example/simple-cpu.py", "import torch\n\nfrom dia.model import Dia\n\n\n# Select device: CPU\ndevice = torch.device(\"cpu\")\nprint(f\"Using device: {device}\")\n\n# Load model\nmodel = Dia.from_pretrained(\n \"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float32\", device=device\n) # Float32 works better than float16 on CPU - you can also test with float16\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/benchmark.py", "from random import choice\n\nimport torch\n\nfrom dia.model import Dia\n\n\ntorch._inductor.config.coordinate_descent_tuning = True\ntorch._inductor.config.triton.unique_kernel_names = True\ntorch._inductor.config.fx_graph_cache = True\n\n# debugging\ntorch._logging.set_logs(graph_breaks=True, recompiles=True)\n\nmodel_name = \"nari-labs/Dia-1.6B-0626\"\ncompute_dtype = \"float16\"\n\nmodel = Dia.from_pretrained(model_name, compute_dtype=compute_dtype)\n\n\ntest_cases = [\n \"[S1] Dia is an open weights text to dialogue model.\",\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code. [S2] It is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n]\n\n\n# Wram up\nfor _ in range(2):\n text = choice(test_cases)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n\n# Benchmark\nfor _ in range(10):\n text = choice(test_cases)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n"], ["/dia/example/simple.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(\n text,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=3.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/hf.py", "from transformers import AutoProcessor, DiaForConditionalGeneration\n\n\ntorch_device = \"cuda\"\nmodel_checkpoint = \"nari-labs/Dia-1.6B-0626\"\n\ntext = [\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n]\nprocessor = AutoProcessor.from_pretrained(model_checkpoint)\ninputs = processor(text=text, padding=True, return_tensors=\"pt\").to(torch_device)\n\nmodel = DiaForConditionalGeneration.from_pretrained(model_checkpoint).to(torch_device)\noutputs = model.generate(**inputs, max_new_tokens=3072, guidance_scale=3.0, temperature=1.8, top_p=0.90, top_k=45)\n\noutputs = processor.batch_decode(outputs)\nprocessor.save_audio(outputs, \"example.mp3\")\n"], ["/dia/example/simple_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\ntexts = [text for _ in range(10)]\n\noutput = model.generate(texts, use_torch_compile=True, verbose=True, max_tokens=1500)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"simple_{i}.mp3\", o)\n"], ["/dia/example/simple-mac.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# It is important to set the `use_torch_compile` argument to `False` when using Dia on MacOS.\n# This is because the `torch.compile` function is not supported on MacOS.\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/dia/__init__.py", "from .model import Dia\n\n\n__all__ = [\n \"Dia\",\n]\n"]], "task_instance_info": {"created_time": "2025-08-20 15:28:05", "created_task_model": "DeepSeek-R1", "code_description": "This code segment functions to: Process user-provided text and audio inputs to generate synthesized speech output through a machine learning model, including input validation, audio data preparation, model parameter configuration, execution of the generation process with performance timing, audio speed adjustment through signal resampling, and cleanup of temporary resources."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\ndef run_inference(\n text: str,\n audio_prompt_text: Optional[str],\n audio_prompt: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: int,\n) -> Tuple[Optional[np.ndarray], Optional[int], str]:\n \"\"\"Main inference function that handles audio generation with error handling and logging.\"\"\"\n console_buffer = io.StringIO()\n audio_prompt_path = None\n output_audio = None\n used_seed = seed if seed not in (-1, None) else random.randint(0, 2**32 - 1)\n\n try:\n # Handle audio prompt\n if audio_prompt is not None:\n if not audio_prompt_text.strip():\n raise ValueError(\"Transcript required when using audio prompt\")\n \n with tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False) as tmp_file:\n sample_rate, audio_data = audio_prompt\n sf.write(tmp_file.name, audio_data.T, sample_rate)\n audio_prompt_path = tmp_file.name\n\n # Seed management\n set_seed(used_seed)\n\n # Redirect console output\n with contextlib.redirect_stdout(console_buffer), contextlib.redirect_stderr(console_buffer):\n start_time = time.time()\n \n # Generate audio\n generated_audio = model.generate(\n text=text,\n audio_prompt_path=audio_prompt_path,\n transcript_prompt=audio_prompt_text if audio_prompt_path else None,\n max_new_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k\n )\n\n # Process output\n sample_rate = model.config.sample_rate\n audio_array = generated_audio.cpu().numpy().T\n adjusted_sr = int(sample_rate * speed_factor)\n\n # Save to buffer\n with io.BytesIO() as buf:\n sf.write(buf, audio_array, adjusted_sr, format=\"WAV\")\n buf.seek(0)\n output_audio, _ = sf.read(buf, dtype=\"float32\")\n\n console_buffer.write(f\"\\nGeneration completed in {time.time()-start_time:.2f}s\")\n\n except Exception as e:\n console_buffer.write(f\"\\nERROR: {str(e)}\")\n return None, None, console_buffer.getvalue()\n finally:\n if audio_prompt_path is not None:\n Path(audio_prompt_path).unlink(missing_ok=True)\n\n return (output_audio, used_seed, console_buffer.getvalue())\n[TASK_END]\n```", "inference_time": "2025-08-20 15-29-22"}, "editdistance_info": {"edit_distance": 14.5205, "calculate_time": "2025-08-20 15:29:22", "true_code_clean": "def run_inference(\n text_input: str,\n audio_prompt_text_input: str,\n audio_prompt_input: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: Optional[int] = None,\n):\n global model, device \n console_output_buffer = io.StringIO()\n with contextlib.redirect_stdout(console_output_buffer):\n if audio_prompt_input and audio_prompt_text_input and not audio_prompt_text_input.isspace():\n text_input = audio_prompt_text_input + \"\\n\" + text_input\n text_input = text_input.strip()\n if audio_prompt_input and (not audio_prompt_text_input or audio_prompt_text_input.isspace()):\n raise gr.Error(\"Audio Prompt Text input cannot be empty.\")\n if not text_input or text_input.isspace():\n raise gr.Error(\"Text input cannot be empty.\")\n temp_txt_file_path = None\n temp_audio_prompt_path = None\n output_audio = (44100, np.zeros(1, dtype=np.float32))\n try:\n prompt_path_for_generate = None\n if audio_prompt_input is not None:\n sr, audio_data = audio_prompt_input\n if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: \n gr.Warning(\"Audio prompt seems empty or silent, ignoring prompt.\")\n else:\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".wav\", delete=False) as f_audio:\n temp_audio_prompt_path = f_audio.name \n if np.issubdtype(audio_data.dtype, np.integer):\n max_val = np.iinfo(audio_data.dtype).max\n audio_data = audio_data.astype(np.float32) / max_val\n elif not np.issubdtype(audio_data.dtype, np.floating):\n gr.Warning(f\"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.\")\n try:\n audio_data = audio_data.astype(np.float32)\n except Exception as conv_e:\n raise gr.Error(f\"Failed to convert audio prompt to float32: {conv_e}\")\n if audio_data.ndim > 1:\n if audio_data.shape[0] == 2: \n audio_data = np.mean(audio_data, axis=0)\n elif audio_data.shape[1] == 2: \n audio_data = np.mean(audio_data, axis=1)\n else:\n gr.Warning(\n f\"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis.\"\n )\n audio_data = (\n audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]\n )\n audio_data = np.ascontiguousarray(audio_data) \n try:\n sf.write(\n temp_audio_prompt_path, audio_data, sr, subtype=\"FLOAT\"\n ) \n prompt_path_for_generate = temp_audio_prompt_path\n print(f\"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})\")\n except Exception as write_e:\n print(f\"Error writing temporary audio file: {write_e}\")\n raise gr.Error(f\"Failed to save audio prompt: {write_e}\")\n if seed is None or seed < 0:\n seed = random.randint(0, 2**32 - 1)\n print(f\"\\nNo seed provided, generated random seed: {seed}\\n\")\n else:\n print(f\"\\nUsing user-selected seed: {seed}\\n\")\n set_seed(seed)\n print(f'Generating speech: \\n\"{text_input}\"\\n')\n start_time = time.time()\n with torch.inference_mode():\n output_audio_np = model.generate(\n text_input,\n max_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k, \n use_torch_compile=False, \n audio_prompt=prompt_path_for_generate,\n verbose=True,\n )\n end_time = time.time()\n print(f\"Generation finished in {end_time - start_time:.2f} seconds.\\n\")\n if output_audio_np is not None:\n output_sr = 44100\n original_len = len(output_audio_np)\n speed_factor = max(0.1, min(speed_factor, 5.0))\n target_len = int(original_len / speed_factor) \n if target_len != original_len and target_len > 0: \n x_original = np.arange(original_len)\n x_resampled = np.linspace(0, original_len - 1, target_len)\n resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)\n output_audio = (\n output_sr,\n resampled_audio_np.astype(np.float32),\n ) \n print(\n f\"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.\"\n )\n else:\n output_audio = (\n output_sr,\n output_audio_np,\n ) \n print(f\"Skipping audio speed adjustment (factor: {speed_factor:.2f}).\")\n print(f\"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}\")\n if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:\n audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)\n audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)\n output_audio = (output_sr, audio_for_gradio)\n print(\"Converted audio to int16 for Gradio output.\")\n else:\n print(\"\\nGeneration finished, but no valid tokens were produced.\")\n gr.Warning(\"Generation produced no output.\")\n except Exception as e:\n print(f\"Error during inference: {e}\")\n import traceback\n traceback.print_exc()\n raise gr.Error(f\"Inference failed: {e}\")\n finally:\n if temp_txt_file_path and Path(temp_txt_file_path).exists():\n try:\n Path(temp_txt_file_path).unlink()\n print(f\"Deleted temporary text file: {temp_txt_file_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}\")\n if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():\n try:\n Path(temp_audio_prompt_path).unlink()\n print(f\"Deleted temporary audio prompt file: {temp_audio_prompt_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}\")\n console_output = console_output_buffer.getvalue()\n return output_audio, seed, console_output", "predict_code_clean": "def run_inference(\n text: str,\n audio_prompt_text: Optional[str],\n audio_prompt: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: int,\n) -> Tuple[Optional[np.ndarray], Optional[int], str]:\n console_buffer = io.StringIO()\n audio_prompt_path = None\n output_audio = None\n used_seed = seed if seed not in (-1, None) else random.randint(0, 2**32 - 1)\n try:\n if audio_prompt is not None:\n if not audio_prompt_text.strip():\n raise ValueError(\"Transcript required when using audio prompt\")\n with tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False) as tmp_file:\n sample_rate, audio_data = audio_prompt\n sf.write(tmp_file.name, audio_data.T, sample_rate)\n audio_prompt_path = tmp_file.name\n set_seed(used_seed)\n with contextlib.redirect_stdout(console_buffer), contextlib.redirect_stderr(console_buffer):\n start_time = time.time()\n generated_audio = model.generate(\n text=text,\n audio_prompt_path=audio_prompt_path,\n transcript_prompt=audio_prompt_text if audio_prompt_path else None,\n max_new_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k\n )\n sample_rate = model.config.sample_rate\n audio_array = generated_audio.cpu().numpy().T\n adjusted_sr = int(sample_rate * speed_factor)\n with io.BytesIO() as buf:\n sf.write(buf, audio_array, adjusted_sr, format=\"WAV\")\n buf.seek(0)\n output_audio, _ = sf.read(buf, dtype=\"float32\")\n console_buffer.write(f\"\\nGeneration completed in {time.time()-start_time:.2f}s\")\n except Exception as e:\n console_buffer.write(f\"\\nERROR: {str(e)}\")\n return None, None, console_buffer.getvalue()\n finally:\n if audio_prompt_path is not None:\n Path(audio_prompt_path).unlink(missing_ok=True)\n return (output_audio, used_seed, console_buffer.getvalue())"}} {"repo_name": "dia", "file_name": "/dia/dia/layers.py", "inference_info": {"prefix_code": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torch import Tensor\nfrom torch.nn import RMSNorm\n\nfrom .config import DecoderConfig, DiaConfig, EncoderConfig\nfrom .state import DecoderInferenceState, EncoderInferenceState, KVCache\n\n\ndef _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:\n return tuple(ax if ax >= 0 else ndim + ax for ax in axes)\n\n\nclass DenseGeneral(nn.Module):\n \"\"\"\n PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init.\n Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot\n for the generalized matrix multiplication. Weight/bias shapes are calculated\n and parameters created during initialization based on config.\n `load_weights` validates shapes and copies data.\n Attributes:\n axis (Tuple[int, ...]): Input axis or axes to contract.\n in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`.\n out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims).\n use_bias (bool): Whether to add a bias term.\n weight (nn.Parameter): The kernel parameter.\n bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True).\n \"\"\"\n\n def __init__(\n self,\n in_shapes: tuple[int, ...],\n out_features: tuple[int, ...],\n axis: tuple[int, ...] = (-1,),\n weight_dtype: torch.dtype | None = None,\n device: torch.device | None = None,\n ):\n super().__init__()\n self.in_shapes = in_shapes\n self.out_features = out_features\n self.axis = axis\n self.kernel_shape = self.in_shapes + self.out_features\n\n factory_kwargs = {\"device\": device, \"dtype\": weight_dtype}\n self.weight = nn.Parameter(torch.empty(self.kernel_shape, **factory_kwargs))\n\n def forward(self, inputs: Tensor) -> Tensor:\n norm_axis = _normalize_axes(self.axis, inputs.ndim)\n kernel_contract_axes = tuple(range(len(norm_axis)))\n\n output = torch.tensordot(\n inputs.to(self.weight.dtype),\n self.weight,\n dims=(norm_axis, kernel_contract_axes),\n ).to(inputs.dtype)\n return output\n\n\nclass MlpBlock(nn.Module):\n \"\"\"MLP block using DenseGeneral.\"\"\"\n\n def __init__(self, embed_dim: int, intermediate_dim: int, compute_dtype: torch.dtype):\n super().__init__()\n self.dtype = compute_dtype\n\n self.wi_fused = DenseGeneral(\n in_shapes=(embed_dim,),\n out_features=(2, intermediate_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n self.wo = DenseGeneral(\n in_shapes=(intermediate_dim,),\n out_features=(embed_dim,),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass.\"\"\"\n fused_x = self.wi_fused(x)\n\n gate = fused_x[..., 0, :]\n up = fused_x[..., 1, :]\n\n hidden = torch.mul(F.silu(gate), up).to(self.dtype)\n\n output = self.wo(hidden)\n return output\n\n\nclass RotaryEmbedding(nn.Module):\n \"\"\"Rotary Position Embedding (RoPE) implementation in PyTorch.\"\"\"\n\n def __init__(\n self,\n embedding_dims: int,\n min_timescale: float = 1.0,\n max_timescale: float = 10000.0,\n dtype: torch.dtype = torch.float32,\n ):\n super().__init__()\n if embedding_dims % 2 != 0:\n raise ValueError(\"Embedding dim must be even for RoPE.\")\n self.embedding_dims = embedding_dims\n self.min_timescale = min_timescale\n self.max_timescale = max_timescale\n self.compute_dtype = dtype\n\n half_embedding_dim = embedding_dims // 2\n fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims\n timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32)\n self.register_buffer(\"timescale\", timescale, persistent=False)\n\n def forward(self, inputs: torch.Tensor, position: torch.Tensor):\n \"\"\"Applies RoPE.\"\"\"\n position = position.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat(\n (first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)),\n dim=-1,\n )\n\n def apply_rope(self, inputs: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor):\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat((first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)), dim=-1)\n\n\ndef custom_scaled_dot_product_attention(\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n attn_mask: torch.Tensor | None = None,\n scale: float = 1.0,\n is_causal: bool = False,\n num_gqa_groups: int = 1,\n) -> torch.Tensor:\n \"\"\"\n Custom scaled dot-product attention with GQA support for MPS compatibility.\n\n Args:\n query: (B, N_q, T, H) - Query tensor, N_q = num_query_heads\n key: (B, N_kv, S, H) - Key tensor, N_kv = num_kv_heads\n value: (B, N_kv, S, H) - Value tensor\n attn_mask: (B, 1, T, S) - Attention mask, optional\n scale: Scaling factor for attention scores\n is_causal: If True, apply causal masking\n num_gqa_groups: Number of query groups per KV head (N_q / N_kv)\n\n Returns:\n output: (B, N_q, T, H) - Attention output\n \"\"\"\n B, N_q, T, H = query.shape\n _, N_kv, S, _ = key.shape\n\n # For GQA, repeat key and value tensors to match query heads\n if num_gqa_groups > 1:\n key = key.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n value = value.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n\n # Compute attention scores: (B, N_q, T, H) @ (B, N_q, H, S) -> (B, N_q, T, S)\n scores = torch.matmul(query, key.transpose(-1, -2)) * scale\n\n # Apply causal mask if needed\n if is_causal:\n causal_mask = torch.tril(torch.ones(T, S, dtype=torch.bool, device=query.device))\n scores = scores.masked_fill(~causal_mask, float(\"-inf\"))\n\n # Apply attention mask if provided\n if attn_mask is not None:\n scores = scores.masked_fill(~attn_mask, float(\"-inf\"))\n\n # Softmax over the last dimension (S)\n attn_weights = F.softmax(scores, dim=-1)\n\n # Compute output: (B, N_q, T, S) @ (B, N_q, S, H) -> (B, N_q, T, H)\n output = torch.matmul(attn_weights, value)\n\n return output\n\n\nclass CrossAttention(nn.Module):\n \"\"\"Cross-Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n def forward(\n self,\n Xq: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n is_causal: bool = False,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n original_dtype = Xq.dtype\n\n Xq_BxTxNxH = self.q_proj(Xq)\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xq.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass FusedQKV(nn.Module):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = False,\n num_q_heads: int = 1,\n q_head_dim: int = 1,\n num_kv_heads: int = 1,\n kv_head_dim: int = 1,\n ):\n super().__init__()\n self.num_q_heads = num_q_heads\n self.q_head_dim = q_head_dim\n self.num_kv_heads = num_kv_heads\n self.kv_head_dim = kv_head_dim\n self.q_output_dim = num_q_heads * q_head_dim\n self.kv_output_dim = num_kv_heads * kv_head_dim\n self.linear = nn.Linear(in_features, out_features, bias=bias)\n\n def forward(self, inputs: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n x = self.linear(inputs)\n\n q, k, v = x.split([self.q_output_dim, self.kv_output_dim, self.kv_output_dim], dim=-1)\n\n q = q.reshape(q.shape[:-1] + (self.num_q_heads, self.q_head_dim))\n k = k.reshape(k.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n v = v.reshape(v.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n\n return q, k, v\n\n\nclass SelfAttention(nn.Module):\n \"\"\"Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n self.kv_embed_dim = kv_embed_dim\n self.q_embed_dim = q_embed_dim\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n self.is_fused_qkv = False\n\n ", "suffix_code": "\n\n def patch_fused_qkv(self):\n q_proj_weight = self.get_linear_weight(self.q_proj)\n k_proj_weight = self.get_linear_weight(self.k_proj)\n v_proj_weight = self.get_linear_weight(self.v_proj)\n\n self.qkv = FusedQKV(\n self.kv_embed_dim,\n (self.num_query_heads * self.head_dim + 2 * (self.num_kv_heads * self.head_dim)),\n bias=False,\n num_q_heads=self.num_query_heads,\n q_head_dim=self.head_dim,\n num_kv_heads=self.num_kv_heads,\n kv_head_dim=self.head_dim,\n )\n self.qkv.linear.weight.data = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=0)\n\n # print(f\"qkv.weight.shape: {self.qkv.linear.weight.shape}\")\n self.is_fused_qkv = True\n\n def forward(\n self,\n X: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n prefill: bool = False,\n is_causal: bool = False,\n current_idx: torch.Tensor | None = None,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n prefill: If True, use prefill mode.\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n\n original_dtype = X.dtype\n\n if self.is_fused_qkv:\n Xq_BxTxNxH, Xk_BxSxKxH, Xv_BxSxKxH = self.qkv(X)\n else:\n Xq_BxTxNxH = self.q_proj(X)\n Xk_BxSxKxH = self.k_proj(X)\n Xv_BxSxKxH = self.v_proj(X)\n\n position = q_positions.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.rotary_emb.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n\n Xq_BxTxNxH = self.rotary_emb.apply_rope(Xq_BxTxNxH, sin, cos)\n Xk_BxSxKxH = self.rotary_emb.apply_rope(Xk_BxSxKxH, sin, cos)\n\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n\n if cache is None:\n attn_k = Xk_BxKxSxH\n attn_v = Xv_BxKxSxH\n elif prefill:\n attn_k, attn_v = Xk_BxKxSxH, Xv_BxKxSxH\n cache.prefill(attn_k, attn_v)\n else:\n attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH, current_idx)\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xv_BxSxKxH.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass EncoderLayer(nn.Module):\n \"\"\"Transformer Encoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n self.pre_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.self_attention = SelfAttention(\n enc_config,\n q_embed_dim=embed_dim,\n kv_embed_dim=embed_dim,\n num_query_heads=enc_config.num_attention_heads,\n num_kv_heads=enc_config.num_key_value_heads,\n head_dim=enc_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=embed_dim,\n )\n self.post_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.mlp = MlpBlock(\n embed_dim=embed_dim,\n intermediate_dim=enc_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n sa_out = self.self_attention(\n X=x_norm,\n q_positions=state.positions,\n kv_positions=state.positions,\n attn_mask=state.attn_mask,\n )\n x = residual + sa_out\n\n residual = x\n x_norm = self.post_sa_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Encoder(nn.Module):\n \"\"\"Transformer Encoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n self.compute_dtype = compute_dtype\n\n self.embedding = nn.Embedding(\n enc_config.vocab_size,\n enc_config.hidden_size,\n dtype=compute_dtype,\n )\n self.layers = nn.ModuleList([EncoderLayer(config, compute_dtype) for _ in range(enc_config.num_hidden_layers)])\n self.norm = RMSNorm(\n enc_config.hidden_size,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n\n def forward(\n self,\n x_ids: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n x = self.embedding(x_ids)\n\n for layer in self.layers:\n x = layer(x, state)\n\n x = self.norm(x).to(self.compute_dtype)\n return x\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"Transformer Decoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n enc_config = config.encoder_config\n dec_embed_dim = dec_config.hidden_size\n enc_embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n # Norms\n self.pre_sa_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_ca_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_mlp_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n # Self-Attention (GQA) with Causal Masking\n self.self_attention = SelfAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=dec_embed_dim,\n num_query_heads=dec_config.num_attention_heads,\n num_kv_heads=dec_config.num_key_value_heads,\n head_dim=dec_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # Cross-Attention (MHA)\n self.cross_attention = CrossAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=enc_embed_dim, # Note kv_embed_dim\n num_query_heads=dec_config.cross_num_attention_heads,\n num_kv_heads=dec_config.cross_num_key_value_heads,\n head_dim=dec_config.cross_head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # MLP\n self.mlp = MlpBlock(\n embed_dim=dec_embed_dim,\n intermediate_dim=dec_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: DecoderInferenceState,\n self_attn_cache: KVCache | None = None,\n cross_attn_cache: KVCache | None = None,\n prefill: bool = False,\n current_idx: int = 0,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n self_attn_mask = state.casual_attn_mask[None, None, current_idx]\n\n sa_out = self.self_attention(\n X=x_norm, # (2, 1, D)\n q_positions=state.dec_positions, # (2, 1)\n kv_positions=state.dec_positions, # (2, 1)\n attn_mask=self_attn_mask,\n cache=self_attn_cache,\n prefill=prefill,\n is_causal=prefill,\n current_idx=current_idx,\n )\n\n x = residual + sa_out\n\n residual = x\n x_norm = self.pre_ca_norm(x).to(self.compute_dtype)\n ca_out = self.cross_attention(\n Xq=x_norm,\n q_positions=state.dec_positions,\n kv_positions=state.enc_positions,\n attn_mask=state.cross_attn_mask,\n cache=cross_attn_cache,\n )\n x = residual + ca_out\n\n residual = x\n x_norm = self.pre_mlp_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Decoder(nn.Module):\n \"\"\"Transformer Decoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n self.num_channels = dec_config.num_channels\n self.num_layers = dec_config.num_hidden_layers\n\n self.embeddings = nn.ModuleList(\n [\n nn.Embedding(dec_config.vocab_size, dec_config.hidden_size, dtype=compute_dtype)\n for _ in range(self.num_channels)\n ]\n )\n self.layers = nn.ModuleList(\n [DecoderLayer(config=config, compute_dtype=compute_dtype) for _ in range(self.num_layers)]\n )\n\n self.norm = RMSNorm(\n dec_config.hidden_size,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n self.logits_dense = DenseGeneral(\n in_shapes=(dec_config.hidden_size,),\n out_features=(self.num_channels, dec_config.vocab_size),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def precompute_cross_attn_cache(\n self,\n enc_out: torch.Tensor, # (B, S, E)\n ) -> list[KVCache]:\n \"\"\"\n Computes the Key and Value tensors for cross-attention for each layer from the encoder output.\n \"\"\"\n per_layer_kv_cache: list[KVCache] = []\n\n for layer in self.layers:\n cross_attn_module = layer.cross_attention\n k_proj = cross_attn_module.k_proj(enc_out)\n v_proj = cross_attn_module.v_proj(enc_out)\n\n k = k_proj.transpose(1, 2)\n v = v_proj.transpose(1, 2)\n\n per_layer_kv_cache.append(KVCache.from_kv(k, v))\n\n return per_layer_kv_cache\n\n def decode_step(\n self,\n tgt_ids_Bx1xC: torch.Tensor, # [B, 1, C]\n state: DecoderInferenceState,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"\n Performs a single decoding step, managing KV caches layer by layer.\n Returns:\n A tuple containing:\n - logits_Bx1xCV: The final output logits for the current step (B, 1, C*V), cast to float32.\n \"\"\"\n\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_Bx1xC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x, # (2, 1, D)\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n current_idx=current_idx,\n )\n\n x = self.norm(x)\n logits_Bx1xCxV = self.logits_dense(x)\n\n return logits_Bx1xCxV.to(torch.float32)\n\n def forward(self, tgt_ids_BxTxC: torch.Tensor, state: DecoderInferenceState) -> torch.Tensor:\n \"\"\"\n Forward pass for the Decoder stack, managing KV caches.\n Args:\n tgt_ids_BxTxC: Target token IDs (B, T, C).\n encoder_out: Output from the encoder (B, S, E).\n tgt_positions: Positions for target sequence (B, T).\n src_positions: Positions for source sequence (B, S).\n self_attn_mask: Mask for self-attention.\n cross_attn_mask: Mask for cross-attention.\n past_key_values: List containing the self-attention KV cache for each layer\n from the previous decoding step. `len(past_key_values)` should\n equal `num_layers`.\n precomputed_cross_attn_kv: A single tuple containing the pre-computed K/V cache\n derived from `encoder_out`. This is passed identically\n to all layers.\n Returns:\n A tuple containing:\n - logits: The final output logits (B, T, C * V), cast to float32.\n - present_key_values: A list containing the updated self-attention KV cache\n for each layer for the *current* decoding step.\n \"\"\"\n _, _, num_channels_in = tgt_ids_BxTxC.shape\n assert num_channels_in == self.num_channels, \"Input channels mismatch\"\n\n # Embeddings\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_BxTxC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x,\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n prefill=True,\n )\n\n # Final Norm\n x = self.norm(x)\n logits_BxTxCxV = self.logits_dense(x)\n\n return logits_BxTxCxV.to(torch.float32)\n\n\nclass DiaModel(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/nari-labs/dia\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n coders={\n DiaConfig: (\n lambda x: x.model_dump(),\n lambda data: DiaConfig.model_validate(data),\n ),\n },\n):\n \"\"\"PyTorch Dia Model using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n self.encoder = Encoder(config, compute_dtype)\n self.decoder = Decoder(config, compute_dtype)\n", "middle_code": "def get_linear_weight(self, dense: DenseGeneral):\n W_dg = dense.weight.data\n out_features = 1\n input_features = 1\n for dim in dense.out_features:\n out_features *= dim\n for dim in dense.in_shapes:\n input_features *= dim\n W_dg_reshaped_for_linear_T = W_dg.reshape(input_features, out_features)\n linear_weight = W_dg_reshaped_for_linear_T.transpose(0, 1).contiguous()\n return linear_weight", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/dia/dia/state.py", "from dataclasses import dataclass\nfrom typing import Optional\n\nimport torch\n\nfrom .config import DiaConfig\n\n\ndef create_attn_mask(\n q_padding_mask_1d: torch.Tensor,\n k_padding_mask_1d: torch.Tensor,\n device: torch.device,\n is_causal: bool = False,\n) -> torch.Tensor:\n \"\"\"\n Creates the attention mask (self or cross) mimicking JAX segment ID logic.\n \"\"\"\n # B1, Tq = q_padding_mask_1d.shape\n # B2, Tk = k_padding_mask_1d.shape\n\n p_mask_q = q_padding_mask_1d.unsqueeze(2) # Shape [B, Tq, 1]\n p_mask_k = k_padding_mask_1d.unsqueeze(1) # Shape [B, 1, Tk]\n\n # Condition A: Non-padding query attends to non-padding key\n non_pad_attends_non_pad = p_mask_q & p_mask_k # Shape [B, Tq, Tk]\n\n # Condition B: Padding query attends to padding key\n pad_attends_pad = (~p_mask_q) & (~p_mask_k) # Shape [B, Tq, Tk]\n\n # Combine: True if padding status is compatible (both non-pad OR both pad)\n mask = non_pad_attends_non_pad | pad_attends_pad # Shape [B, Tq, Tk]\n\n if is_causal:\n # assert Tq == Tk, \"Causal mask requires query and key sequence lengths to be equal\"\n causal_mask_2d = torch.tril(torch.ones_like(mask[0], dtype=torch.bool, device=device)) # Shape [B, Tq, Tk]\n causal_mask = mask & causal_mask_2d # Shape [B, Tq, Tk]\n return causal_mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n else:\n return mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n\n\n@dataclass\nclass EncoderInferenceState:\n \"\"\"Parameters specifically for encoder inference.\"\"\"\n\n max_seq_len: int\n device: torch.device\n positions: torch.Tensor\n padding_mask: torch.Tensor\n attn_mask: torch.Tensor\n\n @classmethod\n def new(cls, config: DiaConfig, cond_src: torch.Tensor) -> \"EncoderInferenceState\":\n \"\"\"Creates EtorchrInferenceParams from DiaConfig and a device.\"\"\"\n device = cond_src.device\n\n positions = torch.arange(\n config.encoder_config.max_position_embeddings, dtype=torch.float32, device=device\n ).unsqueeze(0)\n padding_mask = (cond_src.squeeze(1) != 0).to(device).repeat_interleave(2, dim=0)\n attn_mask = create_attn_mask(padding_mask, padding_mask, device, is_causal=False)\n\n return cls(\n max_seq_len=config.encoder_config.max_position_embeddings,\n device=device,\n positions=positions,\n padding_mask=padding_mask,\n attn_mask=attn_mask,\n )\n\n\nclass KVCache(torch.nn.Module):\n k: torch.Tensor\n v: torch.Tensor\n\n def __init__(\n self,\n batch_size: int,\n num_heads: int,\n max_len: int,\n head_dim: int,\n dtype: torch.dtype,\n device: torch.device,\n k: torch.Tensor | None = None,\n v: torch.Tensor | None = None,\n ):\n k = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if k is None else k\n v = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if v is None else v\n super().__init__()\n\n self.register_buffer(\"k\", k)\n self.register_buffer(\"v\", v)\n\n @classmethod\n def from_kv(cls, k: torch.Tensor, v: torch.Tensor) -> \"KVCache\":\n return cls(\n batch_size=k.shape[0] // 2,\n num_heads=k.shape[1],\n max_len=k.shape[2],\n head_dim=k.shape[3],\n dtype=k.dtype,\n device=k.device,\n k=k,\n v=v,\n )\n\n def update(self, k: torch.Tensor, v: torch.Tensor, current_idx: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n k_out, v_out = self.k, self.v\n k_out[:, :, current_idx, :] = k\n v_out[:, :, current_idx, :] = v\n return self.k, self.v\n\n def prefill(self, k: torch.Tensor, v: torch.Tensor):\n prefill_len = k.shape[2]\n self.k[:, :, :prefill_len, :] = k\n self.v[:, :, :prefill_len, :] = v\n\n\n@dataclass\nclass DecoderInferenceState:\n \"\"\"Parameters specifically for decoder inference.\"\"\"\n\n device: torch.device\n dtype: torch.dtype\n enc_out: torch.Tensor\n enc_positions: torch.Tensor\n dec_positions: torch.Tensor\n self_attn_cache: list[KVCache]\n cross_attn_cache: list[KVCache]\n casual_attn_mask: torch.Tensor\n cross_attn_mask: torch.Tensor\n\n @classmethod\n def new(\n cls,\n config: DiaConfig,\n enc_state: EncoderInferenceState,\n enc_out: torch.Tensor,\n dec_cross_attn_cache: list[KVCache],\n compute_dtype: torch.dtype,\n max_generation_length: Optional[int] = None,\n ) -> \"DecoderInferenceState\":\n \"\"\"Creates DecoderInferenceParams from DiaConfig and a device.\"\"\"\n device = enc_out.device\n max_audio_len = max_generation_length or config.decoder_config.max_position_embeddings\n batch_size = enc_out.shape[0] // 2\n\n dec_positions = torch.full((2 * batch_size, 1), fill_value=0, dtype=torch.int32, device=device)\n causal_mask = torch.tril(torch.ones(max_audio_len, max_audio_len, dtype=torch.bool, device=device))\n dec_mask = torch.ones((2 * batch_size, 1), dtype=torch.bool, device=device)\n cross_attn_mask = create_attn_mask(dec_mask, enc_state.padding_mask, device, is_causal=False)\n\n self_attn_cache = [\n KVCache(\n batch_size,\n config.decoder_config.num_key_value_heads,\n max_audio_len,\n config.decoder_config.head_dim,\n compute_dtype,\n device,\n )\n for _ in range(config.decoder_config.num_hidden_layers)\n ]\n\n return cls(\n device=device,\n dtype=compute_dtype,\n enc_out=enc_out,\n enc_positions=enc_state.positions,\n dec_positions=dec_positions,\n self_attn_cache=self_attn_cache,\n cross_attn_cache=dec_cross_attn_cache,\n casual_attn_mask=causal_mask,\n cross_attn_mask=cross_attn_mask,\n )\n\n def prepare_step(self, step_from: int, step_to: int | None = None) -> None:\n if step_to is None:\n step_to = step_from + 1\n self.dec_positions = torch.arange(step_from, step_to, dtype=torch.int32, device=self.device).unsqueeze(0)\n\n\n@dataclass\nclass DecoderOutput:\n generated_tokens: torch.Tensor\n prefill_steps: list[int]\n\n @classmethod\n def new(cls, batch_size: int, config: DiaConfig, device: torch.device) -> \"DecoderOutput\":\n max_audio_len = config.decoder_config.max_position_embeddings\n return cls(\n generated_tokens=torch.full(\n (batch_size, max_audio_len, config.decoder_config.num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=device,\n ),\n prefill_steps=[],\n )\n\n def get_tokens_at(self, step_from: int, step_to: int | None = None) -> torch.Tensor:\n if step_to is None:\n step_to = step_from + 1\n return self.generated_tokens[:, step_from:step_to, :]\n\n def update_one(self, dec_out: torch.Tensor, step: int, apply_mask: bool = False):\n dec_out = dec_out.to(self.generated_tokens.dtype)\n if apply_mask:\n mask = self.generated_tokens[:, step, :] == -1\n self.generated_tokens[:, step, :] = torch.where(mask, dec_out, self.generated_tokens[:, step, :])\n else:\n self.generated_tokens[:, step, :] = dec_out\n\n def prefill(self, dec_out: torch.Tensor, prefill_steps: list[int]):\n length = dec_out.shape[1]\n self.generated_tokens[:, :length, :] = dec_out\n self.prefill_steps = prefill_steps\n"], ["/dia/dia/config.py", "\"\"\"Configuration management module for the Dia model.\n\nThis module provides comprehensive configuration management for the Dia model,\nutilizing Pydantic for validation. It defines configurations for data processing,\nmodel architecture (encoder and decoder), and training settings.\n\nKey components:\n- DataConfig: Parameters for data loading and preprocessing.\n- EncoderConfig: Architecture details for the encoder module.\n- DecoderConfig: Architecture details for the decoder module.\n- ModelConfig: Combined model architecture settings.\n- TrainingConfig: Training hyperparameters and settings.\n- DiaConfig: Master configuration combining all components.\n\"\"\"\n\nimport os\n\nfrom pydantic import BaseModel, Field\n\n\nclass EncoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the encoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_encoder\".\n hidden_size: Size of the encoder layers, defaults to 1024.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the encoder, defaults to 4096.\n num_hidden_layers: Number of hidden layers in the encoder, defaults to 12.\n num_attention_heads: Number of attention heads in the encoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the encoder, defaults to 16.\n head_dim: Dimension of each attention head, defaults to 128.\n hidden_act: Activation function in the encoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings, defaults to 1024.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n vocab_size: Vocabulary size, defaults to 256.\n \"\"\"\n\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=1024, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=4096, gt=0)\n max_position_embeddings: int = Field(default=1024, gt=0)\n model_type: str = Field(default=\"dia_encoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_hidden_layers: int = Field(default=12, gt=0)\n num_key_value_heads: int = Field(default=16, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=256, gt=0)\n\n\nclass DecoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the decoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_decoder\".\n hidden_size: Size of the decoder layers, defaults to 2048.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the decoder, defaults to 8192.\n num_hidden_layers: Number of hidden layers in the decoder, defaults to 18.\n num_attention_heads: Number of attention heads in the decoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the decoder, defaults to 4.\n head_dim: Dimension of each attention head, defaults to 128.\n cross_hidden_size: Size of the cross-attention layers, defaults to 1024.\n cross_num_attention_heads: Number of attention heads in the cross-attention mechanism, defaults to 16.\n cross_num_key_value_heads: Number of key-value heads in the cross-attention mechanism, defaults to 16.\n cross_head_dim: Dimension of each cross-attention head, defaults to 128.\n hidden_act: Activation function in the decoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings in the decoder, defaults to 3072.\n initializer_range: Range for initializing weights in the decoder, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers in the decoder, defaults to 1e-5.\n rope_theta: Theta value for RoPE in the decoder, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE in the decoder.\n vocab_size: Vocabulary size for the decoder, defaults to 1028.\n num_channels: Number of channels in the decoder, defaults to 9.\n \"\"\"\n\n cross_head_dim: int = Field(default=128, gt=0)\n cross_hidden_size: int = Field(default=1024, gt=0)\n cross_num_attention_heads: int = Field(default=16, gt=0)\n cross_num_key_value_heads: int = Field(default=16, gt=0)\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=2048, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=8192, gt=0)\n max_position_embeddings: int = Field(default=3072, gt=0)\n model_type: str = Field(default=\"dia_decoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_channels: int = Field(default=9, gt=0)\n num_hidden_layers: int = Field(default=18, gt=0)\n num_key_value_heads: int = Field(default=4, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=1028, gt=0)\n\n\nclass DiaConfig(BaseModel, frozen=True):\n \"\"\"Main configuration container for the Dia model architecture.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia\".\n is_encoder_decoder: Flag indicating if the model is an encoder-decoder type, defaults to True.\n encoder: Configuration for the encoder component.\n decoder: Configuration for the decoder component.\n src_vocab_size: Size of the source (text) vocabulary.\n tgt_vocab_size: Size of the target (audio code) vocabulary.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n torch_dtype: Data type for model weights in PyTorch, defaults to \"float32\".\n bos_token_id: Beginning-of-sequence token ID, defaults to 1026.\n eos_token_id: End-of-sequence token ID, defaults to 1024.\n pad_token_id: Padding token ID, defaults to 1025.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n transformers_version: Version of the transformers library, defaults to \"4.53.0.dev0\".\n architectures: List of model architectures, defaults to [\"DiaForConditionalGeneration\"].\n delay_pattern: List of delay values for each audio channel, defaults to [0,8,9,10,11,12,13,14,15].\n \"\"\"\n\n architectures: list[str] = Field(default_factory=lambda: [\"DiaForConditionalGeneration\"])\n bos_token_id: int = Field(default=1026)\n decoder_config: DecoderConfig\n delay_pattern: list[int] = Field(default_factory=lambda: [0, 8, 9, 10, 11, 12, 13, 14, 15])\n encoder_config: EncoderConfig\n eos_token_id: int = Field(default=1024)\n initializer_range: float = Field(default=0.02)\n is_encoder_decoder: bool = Field(default=True)\n model_type: str = Field(default=\"dia\")\n norm_eps: float = Field(default=1e-5)\n pad_token_id: int = Field(default=1025)\n torch_dtype: str = Field(default=\"float32\")\n transformers_version: str = Field(default=\"4.53.0.dev0\")\n\n def save(self, path: str) -> None:\n \"\"\"Save the current configuration instance to a JSON file.\n\n Ensures the parent directory exists and the file has a .json extension.\n\n Args:\n path: The target file path to save the configuration.\n\n Raises:\n ValueError: If the path is not a file with a .json extension.\n \"\"\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n config_json = self.model_dump_json(indent=2)\n with open(path, \"w\") as f:\n f.write(config_json)\n\n @classmethod\n def load(cls, path: str) -> \"DiaConfig | None\":\n \"\"\"Load and validate a Dia configuration from a JSON file.\n\n Args:\n path: The path to the configuration file.\n\n Returns:\n A validated DiaConfig instance if the file exists and is valid,\n otherwise None if the file is not found.\n\n Raises:\n ValueError: If the path does not point to an existing .json file.\n pydantic.ValidationError: If the JSON content fails validation against the DiaConfig schema.\n \"\"\"\n try:\n with open(path, \"r\") as f:\n content = f.read()\n return cls.model_validate_json(content)\n except FileNotFoundError:\n return None\n"], ["/dia/dia/model.py", "import time\nfrom enum import Enum\nfrom typing import Callable\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchaudio\n\nfrom .audio import apply_audio_delay, build_delay_indices, build_revert_indices, revert_audio_delay\nfrom .config import DiaConfig\nfrom .layers import DiaModel\nfrom .state import DecoderInferenceState, DecoderOutput, EncoderInferenceState\n\n\nDEFAULT_SAMPLE_RATE = 44100\nSAMPLE_RATE_RATIO = 512\n\n\ndef _get_default_device():\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n elif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef _sample_next_token(\n logits_BCxV: torch.Tensor,\n temperature: float,\n top_p: float,\n top_k: int | None,\n audio_eos_value: int,\n) -> torch.Tensor:\n if temperature == 0.0:\n return torch.argmax(logits_BCxV, dim=-1)\n\n logits_BCxV = logits_BCxV / temperature\n\n if audio_eos_value is not None and audio_eos_value >= 0:\n top_logit_indices_BC = torch.argmax(logits_BCxV, dim=-1)\n eos_not_highest_mask_BC = top_logit_indices_BC != audio_eos_value\n mask_eos_unless_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_unless_highest_BCxV[eos_not_highest_mask_BC, audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_unless_highest_BCxV, -torch.inf)\n eos_highest_mask_BC = top_logit_indices_BC == audio_eos_value\n mask_eos_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_highest_BCxV[eos_highest_mask_BC, :audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_highest_BCxV, -torch.inf)\n\n if top_k is not None:\n _, top_k_indices_BCxV = torch.topk(logits_BCxV, k=top_k, dim=-1)\n mask = torch.ones_like(logits_BCxV, dtype=torch.bool)\n mask = mask.scatter(dim=-1, index=top_k_indices_BCxV, value=False)\n logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)\n\n if top_p < 1.0:\n probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)\n cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)\n\n sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p\n sorted_indices_to_remove_BCxV = torch.roll(sorted_indices_to_remove_BCxV, shifts=1, dims=-1)\n sorted_indices_to_remove_BCxV[..., 0] = torch.zeros_like(sorted_indices_to_remove_BCxV[..., 0])\n\n indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)\n indices_to_remove_BCxV = indices_to_remove_BCxV.scatter(\n dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV\n )\n logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)\n\n final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n\n sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)\n sampled_indices_C = sampled_indices_BC.squeeze(-1)\n return sampled_indices_C\n\n\nclass ComputeDtype(str, Enum):\n FLOAT32 = \"float32\"\n FLOAT16 = \"float16\"\n BFLOAT16 = \"bfloat16\"\n\n def to_dtype(self) -> torch.dtype:\n if self == ComputeDtype.FLOAT32:\n return torch.float32\n elif self == ComputeDtype.FLOAT16:\n return torch.float16\n elif self == ComputeDtype.BFLOAT16:\n return torch.bfloat16\n else:\n raise ValueError(f\"Unsupported compute dtype: {self}\")\n\n\nclass Dia:\n def __init__(\n self,\n config: DiaConfig,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ):\n \"\"\"Initializes the Dia model.\n\n Args:\n config: The configuration object for the model.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Raises:\n RuntimeError: If there is an error loading the DAC model.\n \"\"\"\n super().__init__()\n self.config = config\n self.device = device if device is not None else _get_default_device()\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n self.compute_dtype = compute_dtype.to_dtype()\n self.model: DiaModel = DiaModel(config, self.compute_dtype)\n self.dac_model = None\n self._compiled_step = None\n self.load_dac = load_dac\n\n if not self.load_dac:\n print(\"Warning: DAC model will not be loaded. This is not recommended.\")\n\n if torch.cuda.is_available():\n torch.backends.cuda.matmul.allow_tf32 = True\n\n @classmethod\n def from_local(\n cls,\n config_path: str,\n checkpoint_path: str,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from local configuration and checkpoint files.\n\n Args:\n config_path: Path to the configuration JSON file.\n checkpoint_path: Path to the model checkpoint (.pth) file.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If the config or checkpoint file is not found.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n config = DiaConfig.load(config_path)\n if config is None:\n raise FileNotFoundError(f\"Config file not found at {config_path}\")\n\n dia = cls(config, compute_dtype, device, load_dac)\n\n try:\n state_dict = torch.load(checkpoint_path, map_location=dia.device)\n dia.model.load_state_dict(state_dict)\n except FileNotFoundError:\n raise FileNotFoundError(f\"Checkpoint file not found at {checkpoint_path}\")\n except Exception as e:\n raise RuntimeError(f\"Error loading checkpoint from {checkpoint_path}\") from e\n\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n @classmethod\n def from_pretrained(\n cls,\n model_name: str = \"nari-labs/Dia-1.6B-0626\",\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from a Hugging Face Hub repository.\n\n Downloads the configuration and checkpoint files from the specified\n repository ID and then loads the model.\n\n Args:\n model_name: The Hugging Face Hub repository ID (e.g., \"nari-labs/Dia-1.6B-0626\").\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If config or checkpoint download/loading fails.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n\n # Load model directly using DiaModel's from_pretrained which handles HF download\n try:\n loaded_model = DiaModel.from_pretrained(model_name, compute_dtype=compute_dtype.to_dtype())\n except Exception as e:\n raise RuntimeError(f\"Error loading model from Hugging Face Hub ({model_name})\") from e\n\n config = loaded_model.config # Get config from the loaded model\n dia = cls(config, compute_dtype, device, load_dac)\n\n dia.model = loaded_model # Assign the already loaded model\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n def _load_dac_model(self):\n \"\"\"Loads the Descript Audio Codec (DAC) model.\n\n Downloads the DAC model if necessary and loads it onto the specified device.\n Sets the DAC model to evaluation mode.\n\n Raises:\n RuntimeError: If downloading or loading the DAC model fails.\n \"\"\"\n import dac\n\n try:\n dac_model_path = dac.utils.download()\n dac_model = dac.DAC.load(dac_model_path).to(self.device)\n dac_model.eval() # Ensure DAC is in eval mode\n except Exception as e:\n raise RuntimeError(\"Failed to load DAC model\") from e\n self.dac_model = dac_model\n\n def _encode_text(self, text: str) -> torch.Tensor:\n \"\"\"Encodes the input text string into a tensor of token IDs using byte-level encoding.\n\n Special tokens [S1] and [S2] are replaced by their byte values. The resulting\n sequence is truncated to the maximum configured text length.\n\n Args:\n text: The input text string.\n\n Returns:\n A tensor containing the encoded byte token IDs.\n \"\"\"\n max_len = self.config.encoder_config.max_position_embeddings\n\n byte_text = text.encode(\"utf-8\")\n # Replace special tokens with their byte values if needed by the specific tokenizer/config\n # Assuming byte values 1 and 2 are correct placeholders based on original code\n replaced_bytes = byte_text.replace(b\"[S1]\", b\"\\x01\").replace(b\"[S2]\", b\"\\x02\")\n text_tokens = list(replaced_bytes)\n return torch.tensor(\n text_tokens[:max_len],\n dtype=torch.long,\n device=self.device,\n )\n\n def _pad_text_input(self, text_tokens: list[torch.Tensor]) -> torch.Tensor:\n \"\"\"Pads the text input to the maximum length.\"\"\"\n text_pad_value = 0\n max_len = self.config.encoder_config.max_position_embeddings\n batch_size = len(text_tokens)\n\n src_tokens = torch.full(\n (batch_size, 1, max_len),\n fill_value=text_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n for i in range(batch_size):\n current_len = len(text_tokens[i])\n src_tokens[i, 0, :current_len] = text_tokens[i]\n return src_tokens\n\n def _prepare_audio_prompt(self, audio_prompts: list[torch.Tensor | None]) -> tuple[torch.Tensor, list[int]]:\n \"\"\"Prepares the audio prompt tensor for the decoder.\n\n Handles padding, adds the beginning-of-sequence (BOS) token, applies the\n delay pattern, and determines the number of prefill steps for each item\n in the batch.\n\n Args:\n audio_prompts: A list of audio prompt tensors (encoded DAC frames) or None.\n Each tensor should have shape [T, C].\n\n Returns:\n A tuple containing:\n - delayed_batch (torch.Tensor): The prepared audio prompt tensor with\n delays applied, shape [B, T_max_padded, C].\n - prefill_steps (list[int]): A list containing the number of valid\n tokens (including BOS) for each prompt in the batch.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n audio_bos_value = self.config.bos_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n batch_size = len(audio_prompts)\n\n max_len = max(p.shape[0] if p is not None else 0 for p in audio_prompts) + max_delay_pattern\n prefill_steps = []\n\n prefill = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=self.device,\n )\n\n prefill[:, 0, :] = audio_bos_value\n\n for i in range(batch_size):\n prompt = audio_prompts[i]\n if prompt is not None:\n prompt = prompt.to(device=self.device, dtype=torch.int)\n prefill[i, 1 : prompt.shape[0] + 1, :] = prompt\n prefill_steps.append(prompt.shape[0] + 1)\n else:\n prefill_steps.append(1)\n\n delay_precomp = build_delay_indices(\n B=batch_size,\n T=max_len,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n delayed_batch = apply_audio_delay(\n audio_BxTxC=prefill,\n pad_value=-1,\n bos_value=audio_bos_value,\n precomp=delay_precomp,\n )\n\n return delayed_batch, prefill_steps\n\n def _prepare_generation(\n self,\n text: torch.Tensor,\n audio_prompts: list[torch.Tensor | None],\n max_tokens: int | None = None,\n attn_fn: Callable = F.scaled_dot_product_attention,\n ):\n \"\"\"Initializes the model state for generation.\n\n Encodes the text input (conditional and unconditional), prepares the\n encoder and decoder states (including KV caches and cross-attention),\n prepares the audio prompt, and performs the initial decoder prefill steps\n based on the audio prompts.\n\n Args:\n text: The padded text input tensor, shape [B, 1, T_text].\n audio_prompts: A list of prepared audio prompt tensors or None.\n\n Returns:\n A tuple containing:\n - dec_state (DecoderInferenceState): The initialized decoder state.\n - dec_output (DecoderOutput): The initialized decoder output manager,\n containing the prefilled audio tokens.\n \"\"\"\n batch_size = text.shape[0]\n\n enc_input_uncond = torch.zeros_like(text)\n enc_input_cond = text\n stacked_inputs = torch.stack([enc_input_uncond, enc_input_cond], dim=1)\n enc_input = stacked_inputs.view(2 * batch_size, -1)\n\n enc_state = EncoderInferenceState.new(self.config, enc_input_cond)\n encoder_out = self.model.encoder(enc_input, enc_state)\n\n dec_cross_attn_cache = self.model.decoder.precompute_cross_attn_cache(encoder_out)\n dec_state = DecoderInferenceState.new(\n self.config,\n enc_state,\n encoder_out,\n dec_cross_attn_cache,\n self.compute_dtype,\n max_generation_length=max_tokens,\n )\n prefill, prefill_steps = self._prepare_audio_prompt(audio_prompts)\n\n dec_output = DecoderOutput.new(batch_size, self.config, self.device)\n dec_output.prefill(prefill, prefill_steps)\n\n dec_step = min(prefill_steps) - 1\n if dec_step > 0:\n dec_state.prepare_step(0, dec_step)\n tokens_BxTxC = dec_output.get_tokens_at(0, dec_step).repeat_interleave(2, dim=0)\n self.model.decoder.forward(tokens_BxTxC, dec_state)\n\n return dec_state, dec_output\n\n def _decoder_step(\n self,\n tokens_Bx1xC: torch.Tensor,\n dec_state: DecoderInferenceState,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n top_k: int,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"Performs a single step of the decoder inference.\n\n Takes the tokens from the previous step, runs them through the decoder\n (for both conditional and unconditional paths), applies classifier-free\n guidance (CFG), samples the next token using temperature, top-p, and top-k\n sampling, and applies constraints (e.g., preventing EOS in certain channels).\n\n Args:\n tokens_Bx1xC: The input tokens for the current step, shape [2*B, 1, C].\n Repeated for CFG (unconditional and conditional).\n dec_state: The current state of the decoder (KV caches, etc.).\n cfg_scale: The scale factor for classifier-free guidance.\n temperature: The temperature for sampling.\n top_p: The cumulative probability threshold for top-p sampling.\n top_k: The number of top logits to consider for top-k sampling.\n current_idx: The current generation step index.\n\n Returns:\n torch.Tensor: The sampled next tokens for each item in the batch,\n shape [B, C].\n \"\"\"\n B = tokens_Bx1xC.shape[0] // 2\n\n audio_eos_value = self.config.eos_token_id\n logits_Bx1xCxV = self.model.decoder.decode_step(tokens_Bx1xC, dec_state, current_idx)\n\n logits_last_2BxCxV = logits_Bx1xCxV[:, -1]\n logits_last_Bx2xCxV = logits_last_2BxCxV.view(B, 2, *logits_last_2BxCxV.shape[1:])\n\n uncond_logits_BxCxV = logits_last_Bx2xCxV[:, 0, :, :] # Shape [B, C, V]\n cond_logits_BxCxV = logits_last_Bx2xCxV[:, 1, :, :] # Shape [B, C, V]\n logits_BxCxV = cond_logits_BxCxV + cfg_scale * (cond_logits_BxCxV - uncond_logits_BxCxV)\n\n _, top_k_indices_BxCxk = torch.topk(logits_BxCxV, k=top_k, dim=-1)\n mask_BxCxV = torch.ones_like(logits_BxCxV, dtype=torch.bool)\n mask_BxCxV = mask_BxCxV.scatter(dim=-1, index=top_k_indices_BxCxk, value=False)\n logits_BxCxV = cond_logits_BxCxV.masked_fill(mask_BxCxV, -torch.inf)\n\n logits_BxCxV[:, :, audio_eos_value + 1 :] = torch.full_like(\n logits_BxCxV[:, :, audio_eos_value + 1 :],\n fill_value=-torch.inf,\n )\n logits_BxCxV[:, 1:, audio_eos_value:] = torch.full_like(\n logits_BxCxV[:, 1:, audio_eos_value:],\n fill_value=-torch.inf,\n )\n\n flat_logits_BCxV = logits_BxCxV.view(B * self.config.decoder_config.num_channels, -1)\n\n pred_BC = _sample_next_token(\n flat_logits_BCxV.float(),\n temperature=temperature,\n top_p=top_p,\n top_k=top_k,\n audio_eos_value=audio_eos_value,\n )\n\n pred_BxC = pred_BC.view(B, self.config.decoder_config.num_channels)\n return pred_BxC\n\n def _generate_output(self, generated_codes: torch.Tensor, lengths_Bx: torch.Tensor) -> list[np.ndarray]:\n \"\"\"Converts generated delayed codes into audio waveforms.\n\n Reverts the delay pattern applied during generation, decodes the resulting\n codebook using the DAC model (if loaded), and returns a list of audio\n waveforms as NumPy arrays. If DAC is not loaded, returns the raw codebook indices.\n\n Args:\n generated_codes: The tensor of generated audio codes with delays,\n shape [B, T_gen, C].\n lengths_Bx: A tensor containing the valid length of generated codes\n (excluding padding and BOS/EOS markers) for each item\n in the batch, shape [B].\n\n Returns:\n A list of NumPy arrays, where each array represents the generated audio\n waveform for one item in the batch. If DAC is not loaded, returns the\n raw, reverted codebook indices as NumPy arrays.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n batch_size = generated_codes.shape[0]\n seq_length = generated_codes.shape[1]\n delay_pattern = self.config.delay_pattern\n audio_pad_value = self.config.pad_token_id\n max_delay_pattern = max(delay_pattern)\n\n revert_precomp = build_revert_indices(\n B=batch_size,\n T=seq_length,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n codebook = revert_audio_delay(\n audio_BxTxC=generated_codes,\n pad_value=audio_pad_value,\n precomp=revert_precomp,\n T=seq_length,\n )[:, :-max_delay_pattern, :]\n\n min_valid_index = 0\n max_valid_index = 1023\n invalid_mask = (codebook < min_valid_index) | (codebook > max_valid_index)\n codebook[invalid_mask] = 0\n\n audios = []\n\n if self.load_dac:\n for i in range(batch_size):\n audio = self._decode(codebook[i, : lengths_Bx[i], :])\n audio_np = audio.cpu().numpy()\n audios.append(audio_np)\n else:\n for i in range(batch_size):\n audios.append(codebook[i, : lengths_Bx[i], :].cpu().numpy())\n return audios\n\n @torch.no_grad()\n @torch.inference_mode()\n def _encode(self, audio: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Encodes the given audio waveform into a tensor of DAC codebook indices\n \"\"\"\n audio = audio.unsqueeze(0)\n audio_data = self.dac_model.preprocess(audio, DEFAULT_SAMPLE_RATE)\n _, encoded_frame, _, _, _ = self.dac_model.encode(audio_data)\n encoded_frame: torch.Tensor\n return encoded_frame.squeeze(0).transpose(0, 1)\n\n @torch.no_grad()\n @torch.inference_mode()\n def _decode(self, audio_codes: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Decodes the given frames into an output audio waveform\n \"\"\"\n audio_codes = audio_codes.unsqueeze(0).transpose(1, 2)\n audio_values, _, _ = self.dac_model.quantizer.from_codes(audio_codes)\n audio_values = self.dac_model.decode(audio_values)\n audio_values: torch.Tensor\n return audio_values.squeeze()\n\n def load_audio(self, audio_path: str) -> torch.Tensor:\n \"\"\"Loads and preprocesses an audio file for use as a prompt.\n\n Loads the audio file, resamples it to the target sample rate if necessary,\n preprocesses it using the DAC model's preprocessing, and encodes it into\n DAC codebook indices.\n\n Args:\n audio_path: Path to the audio file.\n\n Returns:\n torch.Tensor: The encoded audio prompt as DAC codebook indices,\n shape [T, C].\n\n Raises:\n RuntimeError: If the DAC model is not loaded (`load_dac=False` during init).\n FileNotFoundError: If the audio file cannot be found.\n Exception: If there's an error during loading or processing.\n \"\"\"\n if self.dac_model is None:\n raise RuntimeError(\"DAC model is required for loading audio prompts but was not loaded.\")\n audio, sr = torchaudio.load(audio_path, channels_first=True) # C, T\n if sr != DEFAULT_SAMPLE_RATE:\n audio = torchaudio.functional.resample(audio, sr, DEFAULT_SAMPLE_RATE)\n # Convert to mono if stereo\n if audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True) # Average channels to get mono\n return self._encode(audio.to(self.device))\n\n def save_audio(self, path: str, audio: np.ndarray):\n \"\"\"Saves the generated audio waveform to a file.\n\n Uses the soundfile library to write the NumPy audio array to the specified\n path with the default sample rate.\n\n Args:\n path: The path where the audio file will be saved.\n audio: The audio waveform as a NumPy array.\n \"\"\"\n import soundfile as sf\n\n sf.write(path, audio, DEFAULT_SAMPLE_RATE)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str | list[str],\n max_tokens: int = 3072,\n cfg_scale: float = 3.0,\n temperature: float = 1.2,\n top_p: float = 0.95,\n use_torch_compile: bool = False,\n cfg_filter_top_k: int = 45,\n audio_prompt: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n audio_prompt_path: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n use_cfg_filter: bool | None = None,\n verbose: bool = False,\n ) -> np.ndarray | list[np.ndarray]:\n \"\"\"Generates audio corresponding to the input text.\n\n Args:\n text: The input text prompt, or a list of text prompts for batch generation.\n max_tokens: The maximum number of audio tokens to generate per prompt.\n Defaults to the model's configured audio length if None.\n cfg_scale: The scale factor for classifier-free guidance (CFG). Higher values\n lead to stronger guidance towards the text prompt.\n temperature: The temperature for sampling. Higher values increase randomness.\n top_p: The cumulative probability threshold for nucleus (top-p) sampling.\n use_torch_compile: Whether to compile the generation steps using torch.compile.\n Can significantly speed up generation after the initial\n compilation overhead. Defaults to False.\n cfg_filter_top_k: The number of top logits to consider during CFG filtering.\n (Note: This parameter name might be slightly misleading based\n on the code; it's used in the `_sample_next_token` function.)\n audio_prompt: An audio prompt or list of prompts to condition the generation.\n Can be a file path (str), a pre-loaded tensor (DAC codes), or None.\n If a list, its length must match the batch size of the text input.\n audio_prompt_path: (Deprecated) Use `audio_prompt` instead.\n use_cfg_filter: (Deprecated) This parameter is no longer used.\n verbose: If True, prints progress information during generation, including\n speed metrics.\n\n Returns:\n If a single text prompt was provided, returns a NumPy array containing the\n generated audio waveform.\n If a list of text prompts was provided, returns a list of NumPy arrays,\n each corresponding to a prompt in the input list. Returns None for a\n sequence if no audio was generated for it.\n \"\"\"\n batch_size = len(text) if isinstance(text, list) else 1\n audio_eos_value = self.config.eos_token_id\n audio_pad_value = self.config.pad_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n delay_pattern_Cx = torch.tensor(delay_pattern, device=self.device, dtype=torch.long)\n self.model.eval()\n\n if audio_prompt_path:\n print(\"Warning: audio_prompt_path is deprecated. Use audio_prompt instead.\")\n audio_prompt = audio_prompt_path\n if use_cfg_filter is not None:\n print(\"Warning: use_cfg_filter is deprecated.\")\n\n if verbose:\n total_start_time = time.time()\n\n if use_torch_compile and not hasattr(self, \"_compiled\"):\n # Compilation can take about a minute.\n self._prepare_generation = torch.compile(self._prepare_generation, dynamic=True, fullgraph=True)\n self._decoder_step = torch.compile(self._decoder_step, fullgraph=True, mode=\"max-autotune\")\n self._compiled = True\n\n if isinstance(audio_prompt, list):\n audio_prompt = [self.load_audio(p) if isinstance(p, str) else p for p in audio_prompt]\n elif isinstance(audio_prompt, str):\n audio_prompt = [self.load_audio(audio_prompt)]\n elif isinstance(audio_prompt, torch.Tensor):\n audio_prompt = [audio_prompt]\n elif audio_prompt is None:\n audio_prompt = [None] * batch_size\n\n assert len(audio_prompt) == batch_size, \"Number of audio prompts must match batch size\"\n\n if isinstance(text, list):\n text = [self._encode_text(t) for t in text]\n else:\n text = [self._encode_text(text)]\n text = self._pad_text_input(text)\n\n dec_state, dec_output = self._prepare_generation(text, audio_prompt, max_tokens=max_tokens)\n dec_step = min(dec_output.prefill_steps) - 1\n current_idx = torch.tensor([dec_step], device=self.device)\n\n eos_detected_Bx = torch.zeros((batch_size,), dtype=torch.bool, device=self.device)\n eos_countdown_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n finished_step_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n\n bos_over = False\n\n if verbose:\n print(\"generate: starting generation loop\")\n if use_torch_compile:\n print(\"generate: using use_torch_compile=True, the first step may be slow\")\n start_time = time.time()\n\n # --- Generation Loop ---\n while dec_step < max_tokens:\n if (eos_countdown_Bx == 0).all():\n break\n\n current_step_idx = dec_step + 1\n torch.compiler.cudagraph_mark_step_begin()\n dec_state.prepare_step(dec_step)\n tokens_Bx1xC = dec_output.get_tokens_at(dec_step).repeat_interleave(2, dim=0) # Repeat for CFG\n\n pred_BxC = self._decoder_step(\n tokens_Bx1xC,\n dec_state,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n current_idx,\n )\n\n current_idx += 1\n\n active_mask_Bx = eos_countdown_Bx != 0\n eos_trigger_Bx = torch.zeros_like(active_mask_Bx)\n if active_mask_Bx.any():\n is_eos_token = (~eos_detected_Bx[active_mask_Bx]) & (pred_BxC[active_mask_Bx, 0] == audio_eos_value)\n is_max_len = current_step_idx >= max_tokens - max_delay_pattern\n eos_trigger_Bx[active_mask_Bx] = is_eos_token | is_max_len\n eos_detected_Bx |= eos_trigger_Bx\n start_countdown_mask_Bx = eos_trigger_Bx & (eos_countdown_Bx < 0)\n if start_countdown_mask_Bx.any():\n eos_countdown_Bx[start_countdown_mask_Bx] = max_delay_pattern\n finished_step_Bx[start_countdown_mask_Bx] = current_step_idx\n\n padding_mask_Bx = eos_countdown_Bx > 0\n if padding_mask_Bx.any():\n pred_active_BxC = pred_BxC[padding_mask_Bx].clone()\n countdown_active_Bx = eos_countdown_Bx[padding_mask_Bx]\n step_after_eos_Bx = max_delay_pattern - countdown_active_Bx\n step_after_eos_Bx_ = step_after_eos_Bx.unsqueeze(1)\n delay_pattern_Cx_ = delay_pattern_Cx.unsqueeze(0)\n eos_mask_NxC = step_after_eos_Bx_ == delay_pattern_Cx_\n pad_mask_NxC = step_after_eos_Bx_ > delay_pattern_Cx_\n pred_active_BxC[eos_mask_NxC] = audio_eos_value\n pred_active_BxC[pad_mask_NxC] = audio_pad_value\n pred_BxC[padding_mask_Bx] = pred_active_BxC\n eos_countdown_Bx[padding_mask_Bx] -= 1\n\n # --- Update BOS flag (Original) ---\n if not bos_over:\n bos_over = all(\n dec_step - prefill_step > max_delay_pattern for prefill_step in dec_output.prefill_steps\n )\n\n dec_output.update_one(pred_BxC, current_step_idx, not bos_over)\n\n dec_step += 1\n\n if verbose and dec_step % 86 == 0:\n duration = time.time() - start_time\n if duration > 0:\n print(\n f\"generate step {dec_step}: speed={86 * batch_size / duration:.3f} tokens/s, realtime factor={batch_size / duration:.3f}x\"\n )\n start_time = time.time()\n\n # --- Finalize and Extract Output ---\n final_step = dec_step + 1\n\n finished_step_Bx[finished_step_Bx == -1] = final_step - max_delay_pattern\n\n prefill_steps_tensor = torch.tensor(dec_output.prefill_steps, device=self.device)\n lengths_Bx = finished_step_Bx - prefill_steps_tensor\n lengths_Bx = torch.clamp(lengths_Bx, min=0)\n\n max_len = lengths_Bx.max().item() + max_delay_pattern\n outputs = []\n\n if max_len > 0:\n num_channels = self.config.decoder_config.num_channels\n audio_pad_value = self.config.pad_token_id\n generated_codes = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=audio_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n\n for i in range(batch_size):\n start_step = dec_output.prefill_steps[i]\n actual_len = lengths_Bx[i].item() + max_delay_pattern\n if actual_len > 0:\n tokens_to_copy = dec_output.generated_tokens[i, start_step : start_step + actual_len, :]\n generated_codes[i, :actual_len, :] = tokens_to_copy\n\n if verbose:\n avg_steps = lengths_Bx.float().mean().item()\n total_duration = time.time() - total_start_time\n print(f\"generate: avg steps={avg_steps:.1f}, total duration={total_duration:.3f}s\")\n\n del dec_state\n\n outputs = self._generate_output(generated_codes, lengths_Bx)\n else:\n print(\"Warning: Nothing generated for any sequence in the batch.\")\n outputs = [None] * batch_size\n\n return outputs if batch_size > 1 else outputs[0]\n"], ["/dia/dia/audio.py", "import typing as tp\n\nimport torch\n\n\ndef build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute (t_idx_BxTxC, indices_BTCx3) so that out[t, c] = in[t - delay[c], c].\n Negative t_idx => BOS; t_idx >= T => PAD.\n \"\"\"\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n\n t_idx_BxT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T],\n )\n t_idx_BxTx1 = t_idx_BxT[..., None]\n t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)\n\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C],\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C],\n )\n\n # We must clamp time indices to [0..T-1] so gather_nd equivalent won't fail\n t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_clamped_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n dim=1,\n ).long() # Ensure indices are long type for indexing\n\n return t_idx_BxTxC, indices_BTCx3\n\n\ndef apply_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n bos_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n) -> torch.Tensor:\n \"\"\"\n Applies the delay pattern to batched audio tokens using precomputed indices,\n inserting BOS where t_idx < 0 and PAD where t_idx >= T.\n\n Args:\n audio_BxTxC: [B, T, C] int16 audio tokens (or int32/float)\n pad_value: the padding token\n bos_value: the BOS token\n precomp: (t_idx_BxTxC, indices_BTCx3) from build_delay_indices\n\n Returns:\n result_BxTxC: [B, T, C] delayed audio tokens\n \"\"\"\n device = audio_BxTxC.device # Get device from input tensor\n t_idx_BxTxC, indices_BTCx3 = precomp\n t_idx_BxTxC = t_idx_BxTxC.to(device) # Move precomputed indices to device\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Equivalent of tf.gather_nd using advanced indexing\n # Ensure indices are long type if not already (build_delay_indices should handle this)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)\n\n # Create masks on the correct device\n mask_bos = t_idx_BxTxC < 0 # => place bos_value\n mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] # => place pad_value\n\n # Create scalar tensors on the correct device\n bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n\n # If mask_bos, BOS; else if mask_pad, PAD; else original gather\n # All tensors should now be on the same device\n result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))\n\n return result_BxTxC\n\n\ndef build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute indices for the revert operation using PyTorch.\n\n Returns:\n A tuple (t_idx_BxTxC, indices_BTCx3) where:\n - t_idx_BxTxC is a tensor of shape [B, T, C] computed as time indices plus the delay.\n - indices_BTCx3 is a tensor of shape [B*T*C, 3] used for gathering, computed from:\n batch indices, clamped time indices, and channel indices.\n \"\"\"\n # Use default device unless specified otherwise; assumes inputs might define device later\n device = None # Or determine dynamically if needed, e.g., from a model parameter\n\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)\n\n t_idx_BT1 = torch.broadcast_to(torch.arange(T, device=device).unsqueeze(0), [B, T])\n t_idx_BT1 = t_idx_BT1.unsqueeze(-1)\n\n t_idx_BxTxC = torch.minimum(\n t_idx_BT1 + delay_arr.view(1, 1, C),\n torch.tensor(T - 1, device=device),\n )\n b_idx_BxTxC = torch.broadcast_to(torch.arange(B, device=device).view(B, 1, 1), [B, T, C])\n c_idx_BxTxC = torch.broadcast_to(torch.arange(C, device=device).view(1, 1, C), [B, T, C])\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_idx_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n axis=1,\n ).long() # Ensure indices are long type\n\n return t_idx_BxTxC, indices_BTCx3\n\n\ndef revert_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n T: int,\n) -> torch.Tensor:\n \"\"\"\n Reverts a delay pattern from batched audio tokens using precomputed indices (PyTorch version).\n\n Args:\n audio_BxTxC: Input delayed audio tensor\n pad_value: Padding value for out-of-bounds indices\n precomp: Precomputed revert indices tuple containing:\n - t_idx_BxTxC: Time offset indices tensor\n - indices_BTCx3: Gather indices tensor for original audio\n T: Original sequence length before padding\n\n Returns:\n Reverted audio tensor with same shape as input\n \"\"\"\n t_idx_BxTxC, indices_BTCx3 = precomp\n device = audio_BxTxC.device # Get device from input tensor\n\n # Move precomputed indices to the same device as audio_BxTxC if they aren't already\n t_idx_BxTxC = t_idx_BxTxC.to(device)\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Using PyTorch advanced indexing (equivalent to tf.gather_nd or np equivalent)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.size()) # Use .size() for robust reshaping\n\n # Create pad_tensor on the correct device\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n # Create T tensor on the correct device for comparison\n T_tensor = torch.tensor(T, device=device)\n\n result_BxTxC = torch.where(t_idx_BxTxC >= T_tensor, pad_tensor, gathered_BxTxC) # Changed np.where to torch.where\n\n return result_BxTxC\n"], ["/dia/app.py", "import argparse\nimport contextlib\nimport io\nimport random\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport gradio as gr\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\n# --- Global Setup ---\nparser = argparse.ArgumentParser(description=\"Gradio interface for Nari TTS\")\nparser.add_argument(\"--device\", type=str, default=None, help=\"Force device (e.g., 'cuda', 'mps', 'cpu')\")\nparser.add_argument(\"--share\", action=\"store_true\", help=\"Enable Gradio sharing\")\n\nargs = parser.parse_args()\n\n\n# Determine device\nif args.device:\n device = torch.device(args.device)\nelif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n# Simplified MPS check for broader compatibility\nelif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n # Basic check is usually sufficient, detailed check can be problematic\n device = torch.device(\"mps\")\nelse:\n device = torch.device(\"cpu\")\n\nprint(f\"Using device: {device}\")\n\n# Load Nari model and config\nprint(\"Loading Nari model...\")\ntry:\n dtype_map = {\n \"cpu\": \"float32\",\n \"mps\": \"float32\", # Apple M series – better with float32\n \"cuda\": \"float16\", # NVIDIA – better with float16\n }\n\n dtype = dtype_map.get(device.type, \"float16\")\n print(f\"Using device: {device}, attempting to load model with {dtype}\")\n model = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=dtype, device=device)\nexcept Exception as e:\n print(f\"Error loading Nari model: {e}\")\n raise\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef run_inference(\n text_input: str,\n audio_prompt_text_input: str,\n audio_prompt_input: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: Optional[int] = None,\n):\n \"\"\"\n Runs Nari inference using the globally loaded model and provided inputs.\n Uses temporary files for text and audio prompt compatibility with inference.generate.\n \"\"\"\n global model, device # Access global model, config, device\n console_output_buffer = io.StringIO()\n\n with contextlib.redirect_stdout(console_output_buffer):\n # Prepend transcript text if audio_prompt provided\n if audio_prompt_input and audio_prompt_text_input and not audio_prompt_text_input.isspace():\n text_input = audio_prompt_text_input + \"\\n\" + text_input\n text_input = text_input.strip()\n\n if audio_prompt_input and (not audio_prompt_text_input or audio_prompt_text_input.isspace()):\n raise gr.Error(\"Audio Prompt Text input cannot be empty.\")\n\n if not text_input or text_input.isspace():\n raise gr.Error(\"Text input cannot be empty.\")\n\n # Preprocess Audio\n temp_txt_file_path = None\n temp_audio_prompt_path = None\n output_audio = (44100, np.zeros(1, dtype=np.float32))\n\n try:\n prompt_path_for_generate = None\n if audio_prompt_input is not None:\n sr, audio_data = audio_prompt_input\n # Check if audio_data is valid\n if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: # Check for silence/empty\n gr.Warning(\"Audio prompt seems empty or silent, ignoring prompt.\")\n else:\n # Save prompt audio to a temporary WAV file\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".wav\", delete=False) as f_audio:\n temp_audio_prompt_path = f_audio.name # Store path for cleanup\n\n # Basic audio preprocessing for consistency\n # Convert to float32 in [-1, 1] range if integer type\n if np.issubdtype(audio_data.dtype, np.integer):\n max_val = np.iinfo(audio_data.dtype).max\n audio_data = audio_data.astype(np.float32) / max_val\n elif not np.issubdtype(audio_data.dtype, np.floating):\n gr.Warning(f\"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.\")\n # Attempt conversion, might fail for complex types\n try:\n audio_data = audio_data.astype(np.float32)\n except Exception as conv_e:\n raise gr.Error(f\"Failed to convert audio prompt to float32: {conv_e}\")\n\n # Ensure mono (average channels if stereo)\n if audio_data.ndim > 1:\n if audio_data.shape[0] == 2: # Assume (2, N)\n audio_data = np.mean(audio_data, axis=0)\n elif audio_data.shape[1] == 2: # Assume (N, 2)\n audio_data = np.mean(audio_data, axis=1)\n else:\n gr.Warning(\n f\"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis.\"\n )\n audio_data = (\n audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]\n )\n audio_data = np.ascontiguousarray(audio_data) # Ensure contiguous after slicing/mean\n\n # Write using soundfile\n try:\n sf.write(\n temp_audio_prompt_path, audio_data, sr, subtype=\"FLOAT\"\n ) # Explicitly use FLOAT subtype\n prompt_path_for_generate = temp_audio_prompt_path\n print(f\"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})\")\n except Exception as write_e:\n print(f\"Error writing temporary audio file: {write_e}\")\n raise gr.Error(f\"Failed to save audio prompt: {write_e}\")\n\n # Set and Display Generation Seed\n if seed is None or seed < 0:\n seed = random.randint(0, 2**32 - 1)\n print(f\"\\nNo seed provided, generated random seed: {seed}\\n\")\n else:\n print(f\"\\nUsing user-selected seed: {seed}\\n\")\n set_seed(seed)\n\n # Run Generation\n print(f'Generating speech: \\n\"{text_input}\"\\n')\n\n start_time = time.time()\n\n # Use torch.inference_mode() context manager for the generation call\n with torch.inference_mode():\n output_audio_np = model.generate(\n text_input,\n max_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k, # Pass the value here\n use_torch_compile=False, # Keep False for Gradio stability\n audio_prompt=prompt_path_for_generate,\n verbose=True,\n )\n\n end_time = time.time()\n print(f\"Generation finished in {end_time - start_time:.2f} seconds.\\n\")\n\n # 4. Convert Codes to Audio\n if output_audio_np is not None:\n # Get sample rate from the loaded DAC model\n output_sr = 44100\n\n # --- Slow down audio ---\n original_len = len(output_audio_np)\n # Ensure speed_factor is positive and not excessively small/large to avoid issues\n speed_factor = max(0.1, min(speed_factor, 5.0))\n target_len = int(original_len / speed_factor) # Target length based on speed_factor\n if target_len != original_len and target_len > 0: # Only interpolate if length changes and is valid\n x_original = np.arange(original_len)\n x_resampled = np.linspace(0, original_len - 1, target_len)\n resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)\n output_audio = (\n output_sr,\n resampled_audio_np.astype(np.float32),\n ) # Use resampled audio\n print(\n f\"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.\"\n )\n else:\n output_audio = (\n output_sr,\n output_audio_np,\n ) # Keep original if calculation fails or no change\n print(f\"Skipping audio speed adjustment (factor: {speed_factor:.2f}).\")\n # --- End slowdown ---\n\n print(f\"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}\")\n\n # Explicitly convert to int16 to prevent Gradio warning\n if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:\n audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)\n audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)\n output_audio = (output_sr, audio_for_gradio)\n print(\"Converted audio to int16 for Gradio output.\")\n\n else:\n print(\"\\nGeneration finished, but no valid tokens were produced.\")\n # Return default silence\n gr.Warning(\"Generation produced no output.\")\n\n except Exception as e:\n print(f\"Error during inference: {e}\")\n import traceback\n\n traceback.print_exc()\n # Re-raise as Gradio error to display nicely in the UI\n raise gr.Error(f\"Inference failed: {e}\")\n\n finally:\n # Cleanup Temporary Files defensively\n if temp_txt_file_path and Path(temp_txt_file_path).exists():\n try:\n Path(temp_txt_file_path).unlink()\n print(f\"Deleted temporary text file: {temp_txt_file_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}\")\n if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():\n try:\n Path(temp_audio_prompt_path).unlink()\n print(f\"Deleted temporary audio prompt file: {temp_audio_prompt_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}\")\n\n # After generation, capture the printed output\n console_output = console_output_buffer.getvalue()\n\n return output_audio, seed, console_output\n\n\n# --- Create Gradio Interface ---\ncss = \"\"\"\n#col-container {max-width: 90%; margin-left: auto; margin-right: auto;}\n\"\"\"\n# Attempt to load default text from example.txt\ndefault_text = \"[S1] Dia is an open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] Wow. Amazing. (laughs) \\n[S2] Try it now on Git hub or Hugging Face.\"\nexample_txt_path = Path(\"./example.txt\")\nif example_txt_path.exists():\n try:\n default_text = example_txt_path.read_text(encoding=\"utf-8\").strip()\n if not default_text: # Handle empty example file\n default_text = \"Example text file was empty.\"\n except Exception as e:\n print(f\"Warning: Could not read example.txt: {e}\")\n\n\n# Build Gradio UI\nwith gr.Blocks(css=css, theme=\"gradio/dark\") as demo:\n gr.Markdown(\"# Nari Text-to-Speech Synthesis\")\n\n with gr.Row(equal_height=False):\n with gr.Column(scale=1):\n with gr.Accordion(\"Audio Reference Prompt (Optional)\", open=False):\n audio_prompt_input = gr.Audio(\n label=\"Audio Prompt (Optional)\",\n show_label=True,\n sources=[\"upload\", \"microphone\"],\n type=\"numpy\",\n )\n audio_prompt_text_input = gr.Textbox(\n label=\"Transcript of Audio Prompt (Required if using Audio Prompt)\",\n placeholder=\"Enter text here...\",\n value=\"\",\n lines=5, # Increased lines\n )\n text_input = gr.Textbox(\n label=\"Text To Generate\",\n placeholder=\"Enter text here...\",\n value=default_text,\n lines=5, # Increased lines\n )\n with gr.Accordion(\"Generation Parameters\", open=False):\n max_new_tokens = gr.Slider(\n label=\"Max New Tokens (Audio Length)\",\n minimum=860,\n maximum=3072,\n value=model.config.decoder_config.max_position_embeddings, # Use config default if available, else fallback\n step=50,\n info=\"Controls the maximum length of the generated audio (more tokens = longer audio).\",\n )\n cfg_scale = gr.Slider(\n label=\"CFG Scale (Guidance Strength)\",\n minimum=1.0,\n maximum=5.0,\n value=3.0, # Default from inference.py\n step=0.1,\n info=\"Higher values increase adherence to the text prompt.\",\n )\n temperature = gr.Slider(\n label=\"Temperature (Randomness)\",\n minimum=1.0,\n maximum=2.5,\n value=1.8, # Default from inference.py\n step=0.05,\n info=\"Lower values make the output more deterministic, higher values increase randomness.\",\n )\n top_p = gr.Slider(\n label=\"Top P (Nucleus Sampling)\",\n minimum=0.70,\n maximum=1.0,\n value=0.95, # Default from inference.py\n step=0.01,\n info=\"Filters vocabulary to the most likely tokens cumulatively reaching probability P.\",\n )\n cfg_filter_top_k = gr.Slider(\n label=\"CFG Filter Top K\",\n minimum=15,\n maximum=100,\n value=45,\n step=1,\n info=\"Top k filter for CFG guidance.\",\n )\n speed_factor_slider = gr.Slider(\n label=\"Speed Factor\",\n minimum=0.8,\n maximum=1.0,\n value=1.0,\n step=0.02,\n info=\"Adjusts the speed of the generated audio (1.0 = original speed).\",\n )\n seed_input = gr.Number(\n label=\"Generation Seed (Optional)\",\n value=-1,\n precision=0, # No decimal points\n step=1,\n interactive=True,\n info=\"Set a generation seed for reproducible outputs. Leave empty or -1 for random seed.\",\n )\n\n run_button = gr.Button(\"Generate Audio\", variant=\"primary\")\n\n with gr.Column(scale=1):\n audio_output = gr.Audio(\n label=\"Generated Audio\",\n type=\"numpy\",\n autoplay=False,\n )\n seed_output = gr.Textbox(label=\"Generation Seed\", interactive=False)\n console_output = gr.Textbox(label=\"Console Output Log\", lines=10, interactive=False)\n\n # Link button click to function\n run_button.click(\n fn=run_inference,\n inputs=[\n text_input,\n audio_prompt_text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[\n audio_output,\n seed_output,\n console_output,\n ], # Add status_output here if using it\n api_name=\"generate_audio\",\n )\n\n # Add examples (ensure the prompt path is correct or remove it if example file doesn't exist)\n example_prompt_path = \"./example_prompt.mp3\" # Adjust if needed\n examples_list = [\n [\n \"[S1] Oh fire! Oh my goodness! What's the procedure? What to we do people? The smoke could be coming through an air duct! \\n[S2] Oh my god! Okay.. it's happening. Everybody stay calm! \\n[S1] What's the procedure... \\n[S2] Everybody stay fucking calm!!!... Everybody fucking calm down!!!!! \\n[S1] No! No! If you touch the handle, if its hot there might be a fire down the hallway! \",\n None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n [\n \"[S1] Open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] I'm biased, but I think we clearly won. \\n[S2] Hard to disagree. (laughs) \\n[S1] Thanks for listening to this demo. \\n[S2] Try it now on Git hub and Hugging Face. \\n[S1] If you liked our model, please give us a star and share to your friends. \\n[S2] This was Nari Labs.\",\n example_prompt_path if Path(example_prompt_path).exists() else None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n ]\n\n if examples_list:\n gr.Examples(\n examples=examples_list,\n inputs=[\n text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[audio_output],\n fn=run_inference,\n cache_examples=False,\n label=\"Examples (Click to Run)\",\n )\n else:\n gr.Markdown(\"_(No examples configured or example prompt file missing)_\")\n\n# --- Launch the App ---\nif __name__ == \"__main__\":\n print(\"Launching Gradio interface...\")\n\n # set `GRADIO_SERVER_NAME`, `GRADIO_SERVER_PORT` env vars to override default values\n # use `GRADIO_SERVER_NAME=0.0.0.0` for Docker\n demo.launch(share=args.share)\n"], ["/dia/cli.py", "import argparse\nimport os\nimport random\n\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # Ensure deterministic behavior for cuDNN (if used)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Generate audio using the Dia model.\")\n\n parser.add_argument(\"text\", type=str, help=\"Input text for speech generation.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"Path to save the generated audio file (e.g., output.wav).\"\n )\n\n parser.add_argument(\n \"--repo-id\",\n type=str,\n default=\"nari-labs/Dia-1.6B-0626\",\n help=\"Hugging Face repository ID (e.g., nari-labs/Dia-1.6B-0626).\",\n )\n parser.add_argument(\n \"--local-paths\", action=\"store_true\", help=\"Load model from local config and checkpoint files.\"\n )\n\n parser.add_argument(\n \"--config\", type=str, help=\"Path to local config.json file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--checkpoint\", type=str, help=\"Path to local model checkpoint .pth file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--audio-prompt\", type=str, default=None, help=\"Path to an optional audio prompt WAV file for voice cloning.\"\n )\n\n gen_group = parser.add_argument_group(\"Generation Parameters\")\n gen_group.add_argument(\n \"--max-tokens\",\n type=int,\n default=None,\n help=\"Maximum number of audio tokens to generate (defaults to config value).\",\n )\n gen_group.add_argument(\n \"--cfg-scale\", type=float, default=3.0, help=\"Classifier-Free Guidance scale (default: 3.0).\"\n )\n gen_group.add_argument(\n \"--temperature\", type=float, default=1.3, help=\"Sampling temperature (higher is more random, default: 0.7).\"\n )\n gen_group.add_argument(\"--top-p\", type=float, default=0.95, help=\"Nucleus sampling probability (default: 0.95).\")\n\n infra_group = parser.add_argument_group(\"Infrastructure\")\n infra_group.add_argument(\"--seed\", type=int, default=None, help=\"Random seed for reproducibility.\")\n infra_group.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to run inference on (e.g., 'cuda', 'cpu', default: auto).\",\n )\n\n args = parser.parse_args()\n\n # Validation for local paths\n if args.local_paths:\n if not args.config:\n parser.error(\"--config is required when --local-paths is set.\")\n if not args.checkpoint:\n parser.error(\"--checkpoint is required when --local-paths is set.\")\n if not os.path.exists(args.config):\n parser.error(f\"Config file not found: {args.config}\")\n if not os.path.exists(args.checkpoint):\n parser.error(f\"Checkpoint file not found: {args.checkpoint}\")\n\n # Set seed if provided\n if args.seed is not None:\n set_seed(args.seed)\n print(f\"Using user-selected seed: {args.seed}\")\n\n # Determine device\n device = torch.device(args.device)\n print(f\"Using device: {device}\")\n\n # Load model\n print(\"Loading model...\")\n if args.local_paths:\n print(f\"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'\")\n try:\n model = Dia.from_local(args.config, args.checkpoint, device=device)\n except Exception as e:\n print(f\"Error loading local model: {e}\")\n exit(1)\n else:\n print(f\"Loading from Hugging Face Hub: repo_id='{args.repo_id}'\")\n try:\n model = Dia.from_pretrained(args.repo_id, device=device)\n except Exception as e:\n print(f\"Error loading model from Hub: {e}\")\n exit(1)\n print(\"Model loaded.\")\n\n # Generate audio\n print(\"Generating audio...\")\n try:\n sample_rate = 44100 # Default assumption\n\n output_audio = model.generate(\n text=args.text,\n audio_prompt=args.audio_prompt,\n max_tokens=args.max_tokens,\n cfg_scale=args.cfg_scale,\n temperature=args.temperature,\n top_p=args.top_p,\n )\n print(\"Audio generation complete.\")\n\n print(f\"Saving audio to {args.output}...\")\n os.makedirs(os.path.dirname(args.output) or \".\", exist_ok=True)\n\n sf.write(args.output, output_audio, sample_rate)\n print(f\"Audio successfully saved to {args.output}\")\n\n except Exception as e:\n print(f\"Error during audio generation or saving: {e}\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n"], ["/dia/example/voice_clone.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\nclone_from_audio = \"simple.mp3\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Hello, how are you? [S2] I'm good, thank you. [S1] What's your name? [S2] My name is Dia. [S1] Nice to meet you. [S2] Nice to meet you too.\"\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(\n clone_from_text + text_to_generate,\n audio_prompt=clone_from_audio,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=4.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"voice_clone.mp3\", output)\n"], ["/dia/example/simple-cpu.py", "import torch\n\nfrom dia.model import Dia\n\n\n# Select device: CPU\ndevice = torch.device(\"cpu\")\nprint(f\"Using device: {device}\")\n\n# Load model\nmodel = Dia.from_pretrained(\n \"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float32\", device=device\n) # Float32 works better than float16 on CPU - you can also test with float16\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/benchmark.py", "from random import choice\n\nimport torch\n\nfrom dia.model import Dia\n\n\ntorch._inductor.config.coordinate_descent_tuning = True\ntorch._inductor.config.triton.unique_kernel_names = True\ntorch._inductor.config.fx_graph_cache = True\n\n# debugging\ntorch._logging.set_logs(graph_breaks=True, recompiles=True)\n\nmodel_name = \"nari-labs/Dia-1.6B-0626\"\ncompute_dtype = \"float16\"\n\nmodel = Dia.from_pretrained(model_name, compute_dtype=compute_dtype)\n\n\ntest_cases = [\n \"[S1] Dia is an open weights text to dialogue model.\",\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code. [S2] It is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n]\n\n\n# Wram up\nfor _ in range(2):\n text = choice(test_cases)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n\n# Benchmark\nfor _ in range(10):\n text = choice(test_cases)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n"], ["/dia/example/voice_clone_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\nclone_from_audios = [f\"simple_{i}.mp3\" for i in range(10)]\n\ntexts = [clone_from_text + text_to_generate for _ in range(10)]\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(texts, audio_prompt=clone_from_audios, use_torch_compile=True, verbose=True, max_tokens=2000)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"voice_clone_{i}.mp3\", o)\n"], ["/dia/example/simple.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(\n text,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=3.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/simple_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\ntexts = [text for _ in range(10)]\n\noutput = model.generate(texts, use_torch_compile=True, verbose=True, max_tokens=1500)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"simple_{i}.mp3\", o)\n"], ["/dia/hf.py", "from transformers import AutoProcessor, DiaForConditionalGeneration\n\n\ntorch_device = \"cuda\"\nmodel_checkpoint = \"nari-labs/Dia-1.6B-0626\"\n\ntext = [\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n]\nprocessor = AutoProcessor.from_pretrained(model_checkpoint)\ninputs = processor(text=text, padding=True, return_tensors=\"pt\").to(torch_device)\n\nmodel = DiaForConditionalGeneration.from_pretrained(model_checkpoint).to(torch_device)\noutputs = model.generate(**inputs, max_new_tokens=3072, guidance_scale=3.0, temperature=1.8, top_p=0.90, top_k=45)\n\noutputs = processor.batch_decode(outputs)\nprocessor.save_audio(outputs, \"example.mp3\")\n"], ["/dia/example/simple-mac.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# It is important to set the `use_torch_compile` argument to `False` when using Dia on MacOS.\n# This is because the `torch.compile` function is not supported on MacOS.\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/dia/__init__.py", "from .model import Dia\n\n\n__all__ = [\n \"Dia\",\n]\n"]], "task_instance_info": {"created_time": "2025-08-20 15:28:53", "created_task_model": "DeepSeek-R1", "function_skeleton": "def get_linear_weight(self, dense: \n pass", "code_description": "This code segment functions to: Reshape and transpose multidimensional weight matrices from a generalized linear transformation layer into a two-dimensional format compatible with standard linear layer weight layouts."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\n def get_linear_weight(self, dense: DenseGeneral):\n \"\"\"Extract and reshape DenseGeneral weights to match linear layer format.\"\"\"\n in_ndim = len(dense.in_shapes)\n perm = list(range(in_ndim, dense.weight.ndim)) + list(range(in_ndim))\n return dense.weight.permute(perm).reshape(-1, *dense.in_shapes).squeeze()\n[TASK_END]\n```", "inference_time": "2025-08-20 15-29-44"}, "editdistance_info": {"edit_distance": 17.2727, "calculate_time": "2025-08-20 15:29:44", "true_code_clean": "def get_linear_weight(self, dense: DenseGeneral):\n W_dg = dense.weight.data\n out_features = 1\n input_features = 1\n for dim in dense.out_features:\n out_features *= dim\n for dim in dense.in_shapes:\n input_features *= dim\n W_dg_reshaped_for_linear_T = W_dg.reshape(input_features, out_features)\n linear_weight = W_dg_reshaped_for_linear_T.transpose(0, 1).contiguous()\n return linear_weight", "predict_code_clean": "def get_linear_weight(self, dense: DenseGeneral):\n in_ndim = len(dense.in_shapes)\n perm = list(range(in_ndim, dense.weight.ndim)) + list(range(in_ndim))\n return dense.weight.permute(perm).reshape(-1, *dense.in_shapes).squeeze()"}} {"repo_name": "dia", "file_name": "/dia/dia/audio.py", "inference_info": {"prefix_code": "import typing as tp\n\nimport torch\n\n\n", "suffix_code": "\n\n\ndef apply_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n bos_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n) -> torch.Tensor:\n \"\"\"\n Applies the delay pattern to batched audio tokens using precomputed indices,\n inserting BOS where t_idx < 0 and PAD where t_idx >= T.\n\n Args:\n audio_BxTxC: [B, T, C] int16 audio tokens (or int32/float)\n pad_value: the padding token\n bos_value: the BOS token\n precomp: (t_idx_BxTxC, indices_BTCx3) from build_delay_indices\n\n Returns:\n result_BxTxC: [B, T, C] delayed audio tokens\n \"\"\"\n device = audio_BxTxC.device # Get device from input tensor\n t_idx_BxTxC, indices_BTCx3 = precomp\n t_idx_BxTxC = t_idx_BxTxC.to(device) # Move precomputed indices to device\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Equivalent of tf.gather_nd using advanced indexing\n # Ensure indices are long type if not already (build_delay_indices should handle this)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)\n\n # Create masks on the correct device\n mask_bos = t_idx_BxTxC < 0 # => place bos_value\n mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] # => place pad_value\n\n # Create scalar tensors on the correct device\n bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n\n # If mask_bos, BOS; else if mask_pad, PAD; else original gather\n # All tensors should now be on the same device\n result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))\n\n return result_BxTxC\n\n\ndef build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute indices for the revert operation using PyTorch.\n\n Returns:\n A tuple (t_idx_BxTxC, indices_BTCx3) where:\n - t_idx_BxTxC is a tensor of shape [B, T, C] computed as time indices plus the delay.\n - indices_BTCx3 is a tensor of shape [B*T*C, 3] used for gathering, computed from:\n batch indices, clamped time indices, and channel indices.\n \"\"\"\n # Use default device unless specified otherwise; assumes inputs might define device later\n device = None # Or determine dynamically if needed, e.g., from a model parameter\n\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)\n\n t_idx_BT1 = torch.broadcast_to(torch.arange(T, device=device).unsqueeze(0), [B, T])\n t_idx_BT1 = t_idx_BT1.unsqueeze(-1)\n\n t_idx_BxTxC = torch.minimum(\n t_idx_BT1 + delay_arr.view(1, 1, C),\n torch.tensor(T - 1, device=device),\n )\n b_idx_BxTxC = torch.broadcast_to(torch.arange(B, device=device).view(B, 1, 1), [B, T, C])\n c_idx_BxTxC = torch.broadcast_to(torch.arange(C, device=device).view(1, 1, C), [B, T, C])\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_idx_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n axis=1,\n ).long() # Ensure indices are long type\n\n return t_idx_BxTxC, indices_BTCx3\n\n\ndef revert_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n T: int,\n) -> torch.Tensor:\n \"\"\"\n Reverts a delay pattern from batched audio tokens using precomputed indices (PyTorch version).\n\n Args:\n audio_BxTxC: Input delayed audio tensor\n pad_value: Padding value for out-of-bounds indices\n precomp: Precomputed revert indices tuple containing:\n - t_idx_BxTxC: Time offset indices tensor\n - indices_BTCx3: Gather indices tensor for original audio\n T: Original sequence length before padding\n\n Returns:\n Reverted audio tensor with same shape as input\n \"\"\"\n t_idx_BxTxC, indices_BTCx3 = precomp\n device = audio_BxTxC.device # Get device from input tensor\n\n # Move precomputed indices to the same device as audio_BxTxC if they aren't already\n t_idx_BxTxC = t_idx_BxTxC.to(device)\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Using PyTorch advanced indexing (equivalent to tf.gather_nd or np equivalent)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.size()) # Use .size() for robust reshaping\n\n # Create pad_tensor on the correct device\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n # Create T tensor on the correct device for comparison\n T_tensor = torch.tensor(T, device=device)\n\n result_BxTxC = torch.where(t_idx_BxTxC >= T_tensor, pad_tensor, gathered_BxTxC) # Changed np.where to torch.where\n\n return result_BxTxC\n", "middle_code": "def build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n t_idx_BxT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T],\n )\n t_idx_BxTx1 = t_idx_BxT[..., None]\n t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C],\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C],\n )\n t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_clamped_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n dim=1,\n ).long() \n return t_idx_BxTxC, indices_BTCx3", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/dia/dia/model.py", "import time\nfrom enum import Enum\nfrom typing import Callable\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchaudio\n\nfrom .audio import apply_audio_delay, build_delay_indices, build_revert_indices, revert_audio_delay\nfrom .config import DiaConfig\nfrom .layers import DiaModel\nfrom .state import DecoderInferenceState, DecoderOutput, EncoderInferenceState\n\n\nDEFAULT_SAMPLE_RATE = 44100\nSAMPLE_RATE_RATIO = 512\n\n\ndef _get_default_device():\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n elif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef _sample_next_token(\n logits_BCxV: torch.Tensor,\n temperature: float,\n top_p: float,\n top_k: int | None,\n audio_eos_value: int,\n) -> torch.Tensor:\n if temperature == 0.0:\n return torch.argmax(logits_BCxV, dim=-1)\n\n logits_BCxV = logits_BCxV / temperature\n\n if audio_eos_value is not None and audio_eos_value >= 0:\n top_logit_indices_BC = torch.argmax(logits_BCxV, dim=-1)\n eos_not_highest_mask_BC = top_logit_indices_BC != audio_eos_value\n mask_eos_unless_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_unless_highest_BCxV[eos_not_highest_mask_BC, audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_unless_highest_BCxV, -torch.inf)\n eos_highest_mask_BC = top_logit_indices_BC == audio_eos_value\n mask_eos_highest_BCxV = torch.zeros_like(logits_BCxV, dtype=torch.bool)\n mask_eos_highest_BCxV[eos_highest_mask_BC, :audio_eos_value] = True\n logits_BCxV = logits_BCxV.masked_fill(mask_eos_highest_BCxV, -torch.inf)\n\n if top_k is not None:\n _, top_k_indices_BCxV = torch.topk(logits_BCxV, k=top_k, dim=-1)\n mask = torch.ones_like(logits_BCxV, dtype=torch.bool)\n mask = mask.scatter(dim=-1, index=top_k_indices_BCxV, value=False)\n logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)\n\n if top_p < 1.0:\n probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)\n cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)\n\n sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p\n sorted_indices_to_remove_BCxV = torch.roll(sorted_indices_to_remove_BCxV, shifts=1, dims=-1)\n sorted_indices_to_remove_BCxV[..., 0] = torch.zeros_like(sorted_indices_to_remove_BCxV[..., 0])\n\n indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)\n indices_to_remove_BCxV = indices_to_remove_BCxV.scatter(\n dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV\n )\n logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)\n\n final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)\n\n sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)\n sampled_indices_C = sampled_indices_BC.squeeze(-1)\n return sampled_indices_C\n\n\nclass ComputeDtype(str, Enum):\n FLOAT32 = \"float32\"\n FLOAT16 = \"float16\"\n BFLOAT16 = \"bfloat16\"\n\n def to_dtype(self) -> torch.dtype:\n if self == ComputeDtype.FLOAT32:\n return torch.float32\n elif self == ComputeDtype.FLOAT16:\n return torch.float16\n elif self == ComputeDtype.BFLOAT16:\n return torch.bfloat16\n else:\n raise ValueError(f\"Unsupported compute dtype: {self}\")\n\n\nclass Dia:\n def __init__(\n self,\n config: DiaConfig,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ):\n \"\"\"Initializes the Dia model.\n\n Args:\n config: The configuration object for the model.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Raises:\n RuntimeError: If there is an error loading the DAC model.\n \"\"\"\n super().__init__()\n self.config = config\n self.device = device if device is not None else _get_default_device()\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n self.compute_dtype = compute_dtype.to_dtype()\n self.model: DiaModel = DiaModel(config, self.compute_dtype)\n self.dac_model = None\n self._compiled_step = None\n self.load_dac = load_dac\n\n if not self.load_dac:\n print(\"Warning: DAC model will not be loaded. This is not recommended.\")\n\n if torch.cuda.is_available():\n torch.backends.cuda.matmul.allow_tf32 = True\n\n @classmethod\n def from_local(\n cls,\n config_path: str,\n checkpoint_path: str,\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from local configuration and checkpoint files.\n\n Args:\n config_path: Path to the configuration JSON file.\n checkpoint_path: Path to the model checkpoint (.pth) file.\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If the config or checkpoint file is not found.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n config = DiaConfig.load(config_path)\n if config is None:\n raise FileNotFoundError(f\"Config file not found at {config_path}\")\n\n dia = cls(config, compute_dtype, device, load_dac)\n\n try:\n state_dict = torch.load(checkpoint_path, map_location=dia.device)\n dia.model.load_state_dict(state_dict)\n except FileNotFoundError:\n raise FileNotFoundError(f\"Checkpoint file not found at {checkpoint_path}\")\n except Exception as e:\n raise RuntimeError(f\"Error loading checkpoint from {checkpoint_path}\") from e\n\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n @classmethod\n def from_pretrained(\n cls,\n model_name: str = \"nari-labs/Dia-1.6B-0626\",\n compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,\n device: torch.device | None = None,\n load_dac: bool = True,\n ) -> \"Dia\":\n \"\"\"Loads the Dia model from a Hugging Face Hub repository.\n\n Downloads the configuration and checkpoint files from the specified\n repository ID and then loads the model.\n\n Args:\n model_name: The Hugging Face Hub repository ID (e.g., \"nari-labs/Dia-1.6B-0626\").\n compute_dtype: The computation dtype to use.\n device: The device to load the model onto. If None, will automatically select the best available device.\n load_dac: Whether to load the DAC model.\n\n Returns:\n An instance of the Dia model loaded with weights and set to eval mode.\n\n Raises:\n FileNotFoundError: If config or checkpoint download/loading fails.\n RuntimeError: If there is an error loading the checkpoint.\n \"\"\"\n if isinstance(compute_dtype, str):\n compute_dtype = ComputeDtype(compute_dtype)\n\n # Load model directly using DiaModel's from_pretrained which handles HF download\n try:\n loaded_model = DiaModel.from_pretrained(model_name, compute_dtype=compute_dtype.to_dtype())\n except Exception as e:\n raise RuntimeError(f\"Error loading model from Hugging Face Hub ({model_name})\") from e\n\n config = loaded_model.config # Get config from the loaded model\n dia = cls(config, compute_dtype, device, load_dac)\n\n dia.model = loaded_model # Assign the already loaded model\n dia.model.to(dia.device)\n dia.model.eval()\n if load_dac:\n dia._load_dac_model()\n return dia\n\n def _load_dac_model(self):\n \"\"\"Loads the Descript Audio Codec (DAC) model.\n\n Downloads the DAC model if necessary and loads it onto the specified device.\n Sets the DAC model to evaluation mode.\n\n Raises:\n RuntimeError: If downloading or loading the DAC model fails.\n \"\"\"\n import dac\n\n try:\n dac_model_path = dac.utils.download()\n dac_model = dac.DAC.load(dac_model_path).to(self.device)\n dac_model.eval() # Ensure DAC is in eval mode\n except Exception as e:\n raise RuntimeError(\"Failed to load DAC model\") from e\n self.dac_model = dac_model\n\n def _encode_text(self, text: str) -> torch.Tensor:\n \"\"\"Encodes the input text string into a tensor of token IDs using byte-level encoding.\n\n Special tokens [S1] and [S2] are replaced by their byte values. The resulting\n sequence is truncated to the maximum configured text length.\n\n Args:\n text: The input text string.\n\n Returns:\n A tensor containing the encoded byte token IDs.\n \"\"\"\n max_len = self.config.encoder_config.max_position_embeddings\n\n byte_text = text.encode(\"utf-8\")\n # Replace special tokens with their byte values if needed by the specific tokenizer/config\n # Assuming byte values 1 and 2 are correct placeholders based on original code\n replaced_bytes = byte_text.replace(b\"[S1]\", b\"\\x01\").replace(b\"[S2]\", b\"\\x02\")\n text_tokens = list(replaced_bytes)\n return torch.tensor(\n text_tokens[:max_len],\n dtype=torch.long,\n device=self.device,\n )\n\n def _pad_text_input(self, text_tokens: list[torch.Tensor]) -> torch.Tensor:\n \"\"\"Pads the text input to the maximum length.\"\"\"\n text_pad_value = 0\n max_len = self.config.encoder_config.max_position_embeddings\n batch_size = len(text_tokens)\n\n src_tokens = torch.full(\n (batch_size, 1, max_len),\n fill_value=text_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n for i in range(batch_size):\n current_len = len(text_tokens[i])\n src_tokens[i, 0, :current_len] = text_tokens[i]\n return src_tokens\n\n def _prepare_audio_prompt(self, audio_prompts: list[torch.Tensor | None]) -> tuple[torch.Tensor, list[int]]:\n \"\"\"Prepares the audio prompt tensor for the decoder.\n\n Handles padding, adds the beginning-of-sequence (BOS) token, applies the\n delay pattern, and determines the number of prefill steps for each item\n in the batch.\n\n Args:\n audio_prompts: A list of audio prompt tensors (encoded DAC frames) or None.\n Each tensor should have shape [T, C].\n\n Returns:\n A tuple containing:\n - delayed_batch (torch.Tensor): The prepared audio prompt tensor with\n delays applied, shape [B, T_max_padded, C].\n - prefill_steps (list[int]): A list containing the number of valid\n tokens (including BOS) for each prompt in the batch.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n audio_bos_value = self.config.bos_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n batch_size = len(audio_prompts)\n\n max_len = max(p.shape[0] if p is not None else 0 for p in audio_prompts) + max_delay_pattern\n prefill_steps = []\n\n prefill = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=self.device,\n )\n\n prefill[:, 0, :] = audio_bos_value\n\n for i in range(batch_size):\n prompt = audio_prompts[i]\n if prompt is not None:\n prompt = prompt.to(device=self.device, dtype=torch.int)\n prefill[i, 1 : prompt.shape[0] + 1, :] = prompt\n prefill_steps.append(prompt.shape[0] + 1)\n else:\n prefill_steps.append(1)\n\n delay_precomp = build_delay_indices(\n B=batch_size,\n T=max_len,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n delayed_batch = apply_audio_delay(\n audio_BxTxC=prefill,\n pad_value=-1,\n bos_value=audio_bos_value,\n precomp=delay_precomp,\n )\n\n return delayed_batch, prefill_steps\n\n def _prepare_generation(\n self,\n text: torch.Tensor,\n audio_prompts: list[torch.Tensor | None],\n max_tokens: int | None = None,\n attn_fn: Callable = F.scaled_dot_product_attention,\n ):\n \"\"\"Initializes the model state for generation.\n\n Encodes the text input (conditional and unconditional), prepares the\n encoder and decoder states (including KV caches and cross-attention),\n prepares the audio prompt, and performs the initial decoder prefill steps\n based on the audio prompts.\n\n Args:\n text: The padded text input tensor, shape [B, 1, T_text].\n audio_prompts: A list of prepared audio prompt tensors or None.\n\n Returns:\n A tuple containing:\n - dec_state (DecoderInferenceState): The initialized decoder state.\n - dec_output (DecoderOutput): The initialized decoder output manager,\n containing the prefilled audio tokens.\n \"\"\"\n batch_size = text.shape[0]\n\n enc_input_uncond = torch.zeros_like(text)\n enc_input_cond = text\n stacked_inputs = torch.stack([enc_input_uncond, enc_input_cond], dim=1)\n enc_input = stacked_inputs.view(2 * batch_size, -1)\n\n enc_state = EncoderInferenceState.new(self.config, enc_input_cond)\n encoder_out = self.model.encoder(enc_input, enc_state)\n\n dec_cross_attn_cache = self.model.decoder.precompute_cross_attn_cache(encoder_out)\n dec_state = DecoderInferenceState.new(\n self.config,\n enc_state,\n encoder_out,\n dec_cross_attn_cache,\n self.compute_dtype,\n max_generation_length=max_tokens,\n )\n prefill, prefill_steps = self._prepare_audio_prompt(audio_prompts)\n\n dec_output = DecoderOutput.new(batch_size, self.config, self.device)\n dec_output.prefill(prefill, prefill_steps)\n\n dec_step = min(prefill_steps) - 1\n if dec_step > 0:\n dec_state.prepare_step(0, dec_step)\n tokens_BxTxC = dec_output.get_tokens_at(0, dec_step).repeat_interleave(2, dim=0)\n self.model.decoder.forward(tokens_BxTxC, dec_state)\n\n return dec_state, dec_output\n\n def _decoder_step(\n self,\n tokens_Bx1xC: torch.Tensor,\n dec_state: DecoderInferenceState,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n top_k: int,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"Performs a single step of the decoder inference.\n\n Takes the tokens from the previous step, runs them through the decoder\n (for both conditional and unconditional paths), applies classifier-free\n guidance (CFG), samples the next token using temperature, top-p, and top-k\n sampling, and applies constraints (e.g., preventing EOS in certain channels).\n\n Args:\n tokens_Bx1xC: The input tokens for the current step, shape [2*B, 1, C].\n Repeated for CFG (unconditional and conditional).\n dec_state: The current state of the decoder (KV caches, etc.).\n cfg_scale: The scale factor for classifier-free guidance.\n temperature: The temperature for sampling.\n top_p: The cumulative probability threshold for top-p sampling.\n top_k: The number of top logits to consider for top-k sampling.\n current_idx: The current generation step index.\n\n Returns:\n torch.Tensor: The sampled next tokens for each item in the batch,\n shape [B, C].\n \"\"\"\n B = tokens_Bx1xC.shape[0] // 2\n\n audio_eos_value = self.config.eos_token_id\n logits_Bx1xCxV = self.model.decoder.decode_step(tokens_Bx1xC, dec_state, current_idx)\n\n logits_last_2BxCxV = logits_Bx1xCxV[:, -1]\n logits_last_Bx2xCxV = logits_last_2BxCxV.view(B, 2, *logits_last_2BxCxV.shape[1:])\n\n uncond_logits_BxCxV = logits_last_Bx2xCxV[:, 0, :, :] # Shape [B, C, V]\n cond_logits_BxCxV = logits_last_Bx2xCxV[:, 1, :, :] # Shape [B, C, V]\n logits_BxCxV = cond_logits_BxCxV + cfg_scale * (cond_logits_BxCxV - uncond_logits_BxCxV)\n\n _, top_k_indices_BxCxk = torch.topk(logits_BxCxV, k=top_k, dim=-1)\n mask_BxCxV = torch.ones_like(logits_BxCxV, dtype=torch.bool)\n mask_BxCxV = mask_BxCxV.scatter(dim=-1, index=top_k_indices_BxCxk, value=False)\n logits_BxCxV = cond_logits_BxCxV.masked_fill(mask_BxCxV, -torch.inf)\n\n logits_BxCxV[:, :, audio_eos_value + 1 :] = torch.full_like(\n logits_BxCxV[:, :, audio_eos_value + 1 :],\n fill_value=-torch.inf,\n )\n logits_BxCxV[:, 1:, audio_eos_value:] = torch.full_like(\n logits_BxCxV[:, 1:, audio_eos_value:],\n fill_value=-torch.inf,\n )\n\n flat_logits_BCxV = logits_BxCxV.view(B * self.config.decoder_config.num_channels, -1)\n\n pred_BC = _sample_next_token(\n flat_logits_BCxV.float(),\n temperature=temperature,\n top_p=top_p,\n top_k=top_k,\n audio_eos_value=audio_eos_value,\n )\n\n pred_BxC = pred_BC.view(B, self.config.decoder_config.num_channels)\n return pred_BxC\n\n def _generate_output(self, generated_codes: torch.Tensor, lengths_Bx: torch.Tensor) -> list[np.ndarray]:\n \"\"\"Converts generated delayed codes into audio waveforms.\n\n Reverts the delay pattern applied during generation, decodes the resulting\n codebook using the DAC model (if loaded), and returns a list of audio\n waveforms as NumPy arrays. If DAC is not loaded, returns the raw codebook indices.\n\n Args:\n generated_codes: The tensor of generated audio codes with delays,\n shape [B, T_gen, C].\n lengths_Bx: A tensor containing the valid length of generated codes\n (excluding padding and BOS/EOS markers) for each item\n in the batch, shape [B].\n\n Returns:\n A list of NumPy arrays, where each array represents the generated audio\n waveform for one item in the batch. If DAC is not loaded, returns the\n raw, reverted codebook indices as NumPy arrays.\n \"\"\"\n num_channels = self.config.decoder_config.num_channels\n batch_size = generated_codes.shape[0]\n seq_length = generated_codes.shape[1]\n delay_pattern = self.config.delay_pattern\n audio_pad_value = self.config.pad_token_id\n max_delay_pattern = max(delay_pattern)\n\n revert_precomp = build_revert_indices(\n B=batch_size,\n T=seq_length,\n C=num_channels,\n delay_pattern=delay_pattern,\n )\n\n codebook = revert_audio_delay(\n audio_BxTxC=generated_codes,\n pad_value=audio_pad_value,\n precomp=revert_precomp,\n T=seq_length,\n )[:, :-max_delay_pattern, :]\n\n min_valid_index = 0\n max_valid_index = 1023\n invalid_mask = (codebook < min_valid_index) | (codebook > max_valid_index)\n codebook[invalid_mask] = 0\n\n audios = []\n\n if self.load_dac:\n for i in range(batch_size):\n audio = self._decode(codebook[i, : lengths_Bx[i], :])\n audio_np = audio.cpu().numpy()\n audios.append(audio_np)\n else:\n for i in range(batch_size):\n audios.append(codebook[i, : lengths_Bx[i], :].cpu().numpy())\n return audios\n\n @torch.no_grad()\n @torch.inference_mode()\n def _encode(self, audio: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Encodes the given audio waveform into a tensor of DAC codebook indices\n \"\"\"\n audio = audio.unsqueeze(0)\n audio_data = self.dac_model.preprocess(audio, DEFAULT_SAMPLE_RATE)\n _, encoded_frame, _, _, _ = self.dac_model.encode(audio_data)\n encoded_frame: torch.Tensor\n return encoded_frame.squeeze(0).transpose(0, 1)\n\n @torch.no_grad()\n @torch.inference_mode()\n def _decode(self, audio_codes: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Decodes the given frames into an output audio waveform\n \"\"\"\n audio_codes = audio_codes.unsqueeze(0).transpose(1, 2)\n audio_values, _, _ = self.dac_model.quantizer.from_codes(audio_codes)\n audio_values = self.dac_model.decode(audio_values)\n audio_values: torch.Tensor\n return audio_values.squeeze()\n\n def load_audio(self, audio_path: str) -> torch.Tensor:\n \"\"\"Loads and preprocesses an audio file for use as a prompt.\n\n Loads the audio file, resamples it to the target sample rate if necessary,\n preprocesses it using the DAC model's preprocessing, and encodes it into\n DAC codebook indices.\n\n Args:\n audio_path: Path to the audio file.\n\n Returns:\n torch.Tensor: The encoded audio prompt as DAC codebook indices,\n shape [T, C].\n\n Raises:\n RuntimeError: If the DAC model is not loaded (`load_dac=False` during init).\n FileNotFoundError: If the audio file cannot be found.\n Exception: If there's an error during loading or processing.\n \"\"\"\n if self.dac_model is None:\n raise RuntimeError(\"DAC model is required for loading audio prompts but was not loaded.\")\n audio, sr = torchaudio.load(audio_path, channels_first=True) # C, T\n if sr != DEFAULT_SAMPLE_RATE:\n audio = torchaudio.functional.resample(audio, sr, DEFAULT_SAMPLE_RATE)\n # Convert to mono if stereo\n if audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True) # Average channels to get mono\n return self._encode(audio.to(self.device))\n\n def save_audio(self, path: str, audio: np.ndarray):\n \"\"\"Saves the generated audio waveform to a file.\n\n Uses the soundfile library to write the NumPy audio array to the specified\n path with the default sample rate.\n\n Args:\n path: The path where the audio file will be saved.\n audio: The audio waveform as a NumPy array.\n \"\"\"\n import soundfile as sf\n\n sf.write(path, audio, DEFAULT_SAMPLE_RATE)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str | list[str],\n max_tokens: int = 3072,\n cfg_scale: float = 3.0,\n temperature: float = 1.2,\n top_p: float = 0.95,\n use_torch_compile: bool = False,\n cfg_filter_top_k: int = 45,\n audio_prompt: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n audio_prompt_path: list[str | torch.Tensor | None] | str | torch.Tensor | None = None,\n use_cfg_filter: bool | None = None,\n verbose: bool = False,\n ) -> np.ndarray | list[np.ndarray]:\n \"\"\"Generates audio corresponding to the input text.\n\n Args:\n text: The input text prompt, or a list of text prompts for batch generation.\n max_tokens: The maximum number of audio tokens to generate per prompt.\n Defaults to the model's configured audio length if None.\n cfg_scale: The scale factor for classifier-free guidance (CFG). Higher values\n lead to stronger guidance towards the text prompt.\n temperature: The temperature for sampling. Higher values increase randomness.\n top_p: The cumulative probability threshold for nucleus (top-p) sampling.\n use_torch_compile: Whether to compile the generation steps using torch.compile.\n Can significantly speed up generation after the initial\n compilation overhead. Defaults to False.\n cfg_filter_top_k: The number of top logits to consider during CFG filtering.\n (Note: This parameter name might be slightly misleading based\n on the code; it's used in the `_sample_next_token` function.)\n audio_prompt: An audio prompt or list of prompts to condition the generation.\n Can be a file path (str), a pre-loaded tensor (DAC codes), or None.\n If a list, its length must match the batch size of the text input.\n audio_prompt_path: (Deprecated) Use `audio_prompt` instead.\n use_cfg_filter: (Deprecated) This parameter is no longer used.\n verbose: If True, prints progress information during generation, including\n speed metrics.\n\n Returns:\n If a single text prompt was provided, returns a NumPy array containing the\n generated audio waveform.\n If a list of text prompts was provided, returns a list of NumPy arrays,\n each corresponding to a prompt in the input list. Returns None for a\n sequence if no audio was generated for it.\n \"\"\"\n batch_size = len(text) if isinstance(text, list) else 1\n audio_eos_value = self.config.eos_token_id\n audio_pad_value = self.config.pad_token_id\n delay_pattern = self.config.delay_pattern\n max_delay_pattern = max(delay_pattern)\n delay_pattern_Cx = torch.tensor(delay_pattern, device=self.device, dtype=torch.long)\n self.model.eval()\n\n if audio_prompt_path:\n print(\"Warning: audio_prompt_path is deprecated. Use audio_prompt instead.\")\n audio_prompt = audio_prompt_path\n if use_cfg_filter is not None:\n print(\"Warning: use_cfg_filter is deprecated.\")\n\n if verbose:\n total_start_time = time.time()\n\n if use_torch_compile and not hasattr(self, \"_compiled\"):\n # Compilation can take about a minute.\n self._prepare_generation = torch.compile(self._prepare_generation, dynamic=True, fullgraph=True)\n self._decoder_step = torch.compile(self._decoder_step, fullgraph=True, mode=\"max-autotune\")\n self._compiled = True\n\n if isinstance(audio_prompt, list):\n audio_prompt = [self.load_audio(p) if isinstance(p, str) else p for p in audio_prompt]\n elif isinstance(audio_prompt, str):\n audio_prompt = [self.load_audio(audio_prompt)]\n elif isinstance(audio_prompt, torch.Tensor):\n audio_prompt = [audio_prompt]\n elif audio_prompt is None:\n audio_prompt = [None] * batch_size\n\n assert len(audio_prompt) == batch_size, \"Number of audio prompts must match batch size\"\n\n if isinstance(text, list):\n text = [self._encode_text(t) for t in text]\n else:\n text = [self._encode_text(text)]\n text = self._pad_text_input(text)\n\n dec_state, dec_output = self._prepare_generation(text, audio_prompt, max_tokens=max_tokens)\n dec_step = min(dec_output.prefill_steps) - 1\n current_idx = torch.tensor([dec_step], device=self.device)\n\n eos_detected_Bx = torch.zeros((batch_size,), dtype=torch.bool, device=self.device)\n eos_countdown_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n finished_step_Bx = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n\n bos_over = False\n\n if verbose:\n print(\"generate: starting generation loop\")\n if use_torch_compile:\n print(\"generate: using use_torch_compile=True, the first step may be slow\")\n start_time = time.time()\n\n # --- Generation Loop ---\n while dec_step < max_tokens:\n if (eos_countdown_Bx == 0).all():\n break\n\n current_step_idx = dec_step + 1\n torch.compiler.cudagraph_mark_step_begin()\n dec_state.prepare_step(dec_step)\n tokens_Bx1xC = dec_output.get_tokens_at(dec_step).repeat_interleave(2, dim=0) # Repeat for CFG\n\n pred_BxC = self._decoder_step(\n tokens_Bx1xC,\n dec_state,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n current_idx,\n )\n\n current_idx += 1\n\n active_mask_Bx = eos_countdown_Bx != 0\n eos_trigger_Bx = torch.zeros_like(active_mask_Bx)\n if active_mask_Bx.any():\n is_eos_token = (~eos_detected_Bx[active_mask_Bx]) & (pred_BxC[active_mask_Bx, 0] == audio_eos_value)\n is_max_len = current_step_idx >= max_tokens - max_delay_pattern\n eos_trigger_Bx[active_mask_Bx] = is_eos_token | is_max_len\n eos_detected_Bx |= eos_trigger_Bx\n start_countdown_mask_Bx = eos_trigger_Bx & (eos_countdown_Bx < 0)\n if start_countdown_mask_Bx.any():\n eos_countdown_Bx[start_countdown_mask_Bx] = max_delay_pattern\n finished_step_Bx[start_countdown_mask_Bx] = current_step_idx\n\n padding_mask_Bx = eos_countdown_Bx > 0\n if padding_mask_Bx.any():\n pred_active_BxC = pred_BxC[padding_mask_Bx].clone()\n countdown_active_Bx = eos_countdown_Bx[padding_mask_Bx]\n step_after_eos_Bx = max_delay_pattern - countdown_active_Bx\n step_after_eos_Bx_ = step_after_eos_Bx.unsqueeze(1)\n delay_pattern_Cx_ = delay_pattern_Cx.unsqueeze(0)\n eos_mask_NxC = step_after_eos_Bx_ == delay_pattern_Cx_\n pad_mask_NxC = step_after_eos_Bx_ > delay_pattern_Cx_\n pred_active_BxC[eos_mask_NxC] = audio_eos_value\n pred_active_BxC[pad_mask_NxC] = audio_pad_value\n pred_BxC[padding_mask_Bx] = pred_active_BxC\n eos_countdown_Bx[padding_mask_Bx] -= 1\n\n # --- Update BOS flag (Original) ---\n if not bos_over:\n bos_over = all(\n dec_step - prefill_step > max_delay_pattern for prefill_step in dec_output.prefill_steps\n )\n\n dec_output.update_one(pred_BxC, current_step_idx, not bos_over)\n\n dec_step += 1\n\n if verbose and dec_step % 86 == 0:\n duration = time.time() - start_time\n if duration > 0:\n print(\n f\"generate step {dec_step}: speed={86 * batch_size / duration:.3f} tokens/s, realtime factor={batch_size / duration:.3f}x\"\n )\n start_time = time.time()\n\n # --- Finalize and Extract Output ---\n final_step = dec_step + 1\n\n finished_step_Bx[finished_step_Bx == -1] = final_step - max_delay_pattern\n\n prefill_steps_tensor = torch.tensor(dec_output.prefill_steps, device=self.device)\n lengths_Bx = finished_step_Bx - prefill_steps_tensor\n lengths_Bx = torch.clamp(lengths_Bx, min=0)\n\n max_len = lengths_Bx.max().item() + max_delay_pattern\n outputs = []\n\n if max_len > 0:\n num_channels = self.config.decoder_config.num_channels\n audio_pad_value = self.config.pad_token_id\n generated_codes = torch.full(\n (batch_size, max_len, num_channels),\n fill_value=audio_pad_value,\n dtype=torch.long,\n device=self.device,\n )\n\n for i in range(batch_size):\n start_step = dec_output.prefill_steps[i]\n actual_len = lengths_Bx[i].item() + max_delay_pattern\n if actual_len > 0:\n tokens_to_copy = dec_output.generated_tokens[i, start_step : start_step + actual_len, :]\n generated_codes[i, :actual_len, :] = tokens_to_copy\n\n if verbose:\n avg_steps = lengths_Bx.float().mean().item()\n total_duration = time.time() - total_start_time\n print(f\"generate: avg steps={avg_steps:.1f}, total duration={total_duration:.3f}s\")\n\n del dec_state\n\n outputs = self._generate_output(generated_codes, lengths_Bx)\n else:\n print(\"Warning: Nothing generated for any sequence in the batch.\")\n outputs = [None] * batch_size\n\n return outputs if batch_size > 1 else outputs[0]\n"], ["/dia/dia/layers.py", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torch import Tensor\nfrom torch.nn import RMSNorm\n\nfrom .config import DecoderConfig, DiaConfig, EncoderConfig\nfrom .state import DecoderInferenceState, EncoderInferenceState, KVCache\n\n\ndef _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:\n return tuple(ax if ax >= 0 else ndim + ax for ax in axes)\n\n\nclass DenseGeneral(nn.Module):\n \"\"\"\n PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init.\n Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot\n for the generalized matrix multiplication. Weight/bias shapes are calculated\n and parameters created during initialization based on config.\n `load_weights` validates shapes and copies data.\n Attributes:\n axis (Tuple[int, ...]): Input axis or axes to contract.\n in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`.\n out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims).\n use_bias (bool): Whether to add a bias term.\n weight (nn.Parameter): The kernel parameter.\n bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True).\n \"\"\"\n\n def __init__(\n self,\n in_shapes: tuple[int, ...],\n out_features: tuple[int, ...],\n axis: tuple[int, ...] = (-1,),\n weight_dtype: torch.dtype | None = None,\n device: torch.device | None = None,\n ):\n super().__init__()\n self.in_shapes = in_shapes\n self.out_features = out_features\n self.axis = axis\n self.kernel_shape = self.in_shapes + self.out_features\n\n factory_kwargs = {\"device\": device, \"dtype\": weight_dtype}\n self.weight = nn.Parameter(torch.empty(self.kernel_shape, **factory_kwargs))\n\n def forward(self, inputs: Tensor) -> Tensor:\n norm_axis = _normalize_axes(self.axis, inputs.ndim)\n kernel_contract_axes = tuple(range(len(norm_axis)))\n\n output = torch.tensordot(\n inputs.to(self.weight.dtype),\n self.weight,\n dims=(norm_axis, kernel_contract_axes),\n ).to(inputs.dtype)\n return output\n\n\nclass MlpBlock(nn.Module):\n \"\"\"MLP block using DenseGeneral.\"\"\"\n\n def __init__(self, embed_dim: int, intermediate_dim: int, compute_dtype: torch.dtype):\n super().__init__()\n self.dtype = compute_dtype\n\n self.wi_fused = DenseGeneral(\n in_shapes=(embed_dim,),\n out_features=(2, intermediate_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n self.wo = DenseGeneral(\n in_shapes=(intermediate_dim,),\n out_features=(embed_dim,),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass.\"\"\"\n fused_x = self.wi_fused(x)\n\n gate = fused_x[..., 0, :]\n up = fused_x[..., 1, :]\n\n hidden = torch.mul(F.silu(gate), up).to(self.dtype)\n\n output = self.wo(hidden)\n return output\n\n\nclass RotaryEmbedding(nn.Module):\n \"\"\"Rotary Position Embedding (RoPE) implementation in PyTorch.\"\"\"\n\n def __init__(\n self,\n embedding_dims: int,\n min_timescale: float = 1.0,\n max_timescale: float = 10000.0,\n dtype: torch.dtype = torch.float32,\n ):\n super().__init__()\n if embedding_dims % 2 != 0:\n raise ValueError(\"Embedding dim must be even for RoPE.\")\n self.embedding_dims = embedding_dims\n self.min_timescale = min_timescale\n self.max_timescale = max_timescale\n self.compute_dtype = dtype\n\n half_embedding_dim = embedding_dims // 2\n fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims\n timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32)\n self.register_buffer(\"timescale\", timescale, persistent=False)\n\n def forward(self, inputs: torch.Tensor, position: torch.Tensor):\n \"\"\"Applies RoPE.\"\"\"\n position = position.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat(\n (first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)),\n dim=-1,\n )\n\n def apply_rope(self, inputs: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor):\n first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)\n first_part = first_half * cos - second_half * sin\n second_part = second_half * cos + first_half * sin\n return torch.cat((first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)), dim=-1)\n\n\ndef custom_scaled_dot_product_attention(\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n attn_mask: torch.Tensor | None = None,\n scale: float = 1.0,\n is_causal: bool = False,\n num_gqa_groups: int = 1,\n) -> torch.Tensor:\n \"\"\"\n Custom scaled dot-product attention with GQA support for MPS compatibility.\n\n Args:\n query: (B, N_q, T, H) - Query tensor, N_q = num_query_heads\n key: (B, N_kv, S, H) - Key tensor, N_kv = num_kv_heads\n value: (B, N_kv, S, H) - Value tensor\n attn_mask: (B, 1, T, S) - Attention mask, optional\n scale: Scaling factor for attention scores\n is_causal: If True, apply causal masking\n num_gqa_groups: Number of query groups per KV head (N_q / N_kv)\n\n Returns:\n output: (B, N_q, T, H) - Attention output\n \"\"\"\n B, N_q, T, H = query.shape\n _, N_kv, S, _ = key.shape\n\n # For GQA, repeat key and value tensors to match query heads\n if num_gqa_groups > 1:\n key = key.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n value = value.repeat_interleave(num_gqa_groups, dim=1) # (B, N_q, S, H)\n\n # Compute attention scores: (B, N_q, T, H) @ (B, N_q, H, S) -> (B, N_q, T, S)\n scores = torch.matmul(query, key.transpose(-1, -2)) * scale\n\n # Apply causal mask if needed\n if is_causal:\n causal_mask = torch.tril(torch.ones(T, S, dtype=torch.bool, device=query.device))\n scores = scores.masked_fill(~causal_mask, float(\"-inf\"))\n\n # Apply attention mask if provided\n if attn_mask is not None:\n scores = scores.masked_fill(~attn_mask, float(\"-inf\"))\n\n # Softmax over the last dimension (S)\n attn_weights = F.softmax(scores, dim=-1)\n\n # Compute output: (B, N_q, T, S) @ (B, N_q, S, H) -> (B, N_q, T, H)\n output = torch.matmul(attn_weights, value)\n\n return output\n\n\nclass CrossAttention(nn.Module):\n \"\"\"Cross-Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n def forward(\n self,\n Xq: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n is_causal: bool = False,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n original_dtype = Xq.dtype\n\n Xq_BxTxNxH = self.q_proj(Xq)\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xq.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass FusedQKV(nn.Module):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = False,\n num_q_heads: int = 1,\n q_head_dim: int = 1,\n num_kv_heads: int = 1,\n kv_head_dim: int = 1,\n ):\n super().__init__()\n self.num_q_heads = num_q_heads\n self.q_head_dim = q_head_dim\n self.num_kv_heads = num_kv_heads\n self.kv_head_dim = kv_head_dim\n self.q_output_dim = num_q_heads * q_head_dim\n self.kv_output_dim = num_kv_heads * kv_head_dim\n self.linear = nn.Linear(in_features, out_features, bias=bias)\n\n def forward(self, inputs: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n x = self.linear(inputs)\n\n q, k, v = x.split([self.q_output_dim, self.kv_output_dim, self.kv_output_dim], dim=-1)\n\n q = q.reshape(q.shape[:-1] + (self.num_q_heads, self.q_head_dim))\n k = k.reshape(k.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n v = v.reshape(v.shape[:-1] + (self.num_kv_heads, self.kv_head_dim))\n\n return q, k, v\n\n\nclass SelfAttention(nn.Module):\n \"\"\"Attention using DenseGeneral.\"\"\"\n\n def __init__(\n self,\n config: EncoderConfig | DecoderConfig,\n q_embed_dim: int,\n kv_embed_dim: int,\n num_query_heads: int,\n num_kv_heads: int,\n head_dim: int,\n compute_dtype: torch.dtype,\n out_embed_dim: int | None = None,\n ):\n super().__init__()\n self.num_query_heads = num_query_heads\n self.num_kv_heads = num_kv_heads\n self.head_dim = head_dim\n self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim\n self.projected_query_dim = num_query_heads * head_dim\n if num_query_heads % num_kv_heads != 0:\n raise ValueError(f\"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})\")\n self.num_gqa_groups = num_query_heads // num_kv_heads\n self.kv_embed_dim = kv_embed_dim\n self.q_embed_dim = q_embed_dim\n\n # --- Projection Layers using DenseGeneral ---\n self.q_proj = DenseGeneral(\n in_shapes=(q_embed_dim,),\n out_features=(num_query_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.k_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.v_proj = DenseGeneral(\n in_shapes=(kv_embed_dim,),\n out_features=(num_kv_heads, head_dim),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n self.o_proj = DenseGeneral(\n in_shapes=(num_query_heads, head_dim),\n out_features=(self.output_dim,),\n axis=(-2, -1),\n weight_dtype=compute_dtype,\n )\n\n # --- Rotary Embedding ---\n self.rotary_emb = RotaryEmbedding(\n embedding_dims=self.head_dim,\n max_timescale=config.rope_theta,\n dtype=compute_dtype,\n )\n\n self.is_fused_qkv = False\n\n def get_linear_weight(self, dense: DenseGeneral):\n W_dg = dense.weight.data\n\n out_features = 1\n input_features = 1\n for dim in dense.out_features:\n out_features *= dim\n for dim in dense.in_shapes:\n input_features *= dim\n\n W_dg_reshaped_for_linear_T = W_dg.reshape(input_features, out_features)\n linear_weight = W_dg_reshaped_for_linear_T.transpose(0, 1).contiguous()\n return linear_weight\n\n def patch_fused_qkv(self):\n q_proj_weight = self.get_linear_weight(self.q_proj)\n k_proj_weight = self.get_linear_weight(self.k_proj)\n v_proj_weight = self.get_linear_weight(self.v_proj)\n\n self.qkv = FusedQKV(\n self.kv_embed_dim,\n (self.num_query_heads * self.head_dim + 2 * (self.num_kv_heads * self.head_dim)),\n bias=False,\n num_q_heads=self.num_query_heads,\n q_head_dim=self.head_dim,\n num_kv_heads=self.num_kv_heads,\n kv_head_dim=self.head_dim,\n )\n self.qkv.linear.weight.data = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=0)\n\n # print(f\"qkv.weight.shape: {self.qkv.linear.weight.shape}\")\n self.is_fused_qkv = True\n\n def forward(\n self,\n X: torch.Tensor, # (B, T, D) T = 1 in AR generation\n q_positions: torch.Tensor, # (B, T)\n kv_positions: torch.Tensor | None = None, # (B, S)\n attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others\n cache: KVCache | None = None, # None in Encoder, KVCache in Decoder\n prefill: bool = False,\n is_causal: bool = False,\n current_idx: torch.Tensor | None = None,\n ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:\n \"\"\"\n Performs attention calculation with optional KV caching.\n Args:\n Xq: Query tensor (B, T, D). T=1 during single-step decoding.\n Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.\n q_positions: Positions for queries (B, T).\n kv_positions: Positions for keys/values (B, S). If None, uses q_positions.\n attn_mask: Attention mask.\n cache: KVCache.\n prefill: If True, use prefill mode.\n Returns:\n A tuple containing:\n - output: The attention output tensor (B, T, output_dim).\n - present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.\n \"\"\"\n if kv_positions is None:\n kv_positions = q_positions\n\n original_dtype = X.dtype\n\n if self.is_fused_qkv:\n Xq_BxTxNxH, Xk_BxSxKxH, Xv_BxSxKxH = self.qkv(X)\n else:\n Xq_BxTxNxH = self.q_proj(X)\n Xk_BxSxKxH = self.k_proj(X)\n Xv_BxSxKxH = self.v_proj(X)\n\n position = q_positions.unsqueeze(-1).unsqueeze(-1)\n sinusoid_inp = position / self.rotary_emb.timescale\n sin = torch.sin(sinusoid_inp)\n cos = torch.cos(sinusoid_inp)\n\n Xq_BxTxNxH = self.rotary_emb.apply_rope(Xq_BxTxNxH, sin, cos)\n Xk_BxSxKxH = self.rotary_emb.apply_rope(Xk_BxSxKxH, sin, cos)\n\n Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)\n\n attn_k: torch.Tensor | None = cache.k if cache is not None else None\n attn_v: torch.Tensor | None = cache.v if cache is not None else None\n\n Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) # (B, K, S, H)\n\n if cache is None:\n attn_k = Xk_BxKxSxH\n attn_v = Xv_BxKxSxH\n elif prefill:\n attn_k, attn_v = Xk_BxKxSxH, Xv_BxKxSxH\n cache.prefill(attn_k, attn_v)\n else:\n attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH, current_idx)\n\n # Use custom attention for MPS backend, otherwise use optimized PyTorch function\n is_mps = Xv_BxSxKxH.device.type == \"mps\" and torch.backends.mps.is_available()\n if is_mps:\n attn_output = custom_scaled_dot_product_attention(\n query=Xq_BxNxTxH,\n key=attn_k,\n value=attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n is_causal=is_causal,\n num_gqa_groups=self.num_gqa_groups,\n )\n else:\n attn_output = F.scaled_dot_product_attention(\n Xq_BxNxTxH,\n attn_k,\n attn_v,\n attn_mask=attn_mask if not is_causal else None,\n scale=1.0,\n enable_gqa=self.num_gqa_groups > 1,\n is_causal=is_causal,\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)\n output = self.o_proj(attn_output)\n\n return output.to(original_dtype)\n\n\nclass EncoderLayer(nn.Module):\n \"\"\"Transformer Encoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n self.pre_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.self_attention = SelfAttention(\n enc_config,\n q_embed_dim=embed_dim,\n kv_embed_dim=embed_dim,\n num_query_heads=enc_config.num_attention_heads,\n num_kv_heads=enc_config.num_key_value_heads,\n head_dim=enc_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=embed_dim,\n )\n self.post_sa_norm = RMSNorm(\n embed_dim,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n self.mlp = MlpBlock(\n embed_dim=embed_dim,\n intermediate_dim=enc_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n sa_out = self.self_attention(\n X=x_norm,\n q_positions=state.positions,\n kv_positions=state.positions,\n attn_mask=state.attn_mask,\n )\n x = residual + sa_out\n\n residual = x\n x_norm = self.post_sa_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Encoder(nn.Module):\n \"\"\"Transformer Encoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n enc_config = config.encoder_config\n self.compute_dtype = compute_dtype\n\n self.embedding = nn.Embedding(\n enc_config.vocab_size,\n enc_config.hidden_size,\n dtype=compute_dtype,\n )\n self.layers = nn.ModuleList([EncoderLayer(config, compute_dtype) for _ in range(enc_config.num_hidden_layers)])\n self.norm = RMSNorm(\n enc_config.hidden_size,\n eps=enc_config.norm_eps,\n dtype=torch.float32,\n )\n\n def forward(\n self,\n x_ids: torch.Tensor,\n state: EncoderInferenceState,\n ) -> torch.Tensor:\n x = self.embedding(x_ids)\n\n for layer in self.layers:\n x = layer(x, state)\n\n x = self.norm(x).to(self.compute_dtype)\n return x\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"Transformer Decoder Layer using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n enc_config = config.encoder_config\n dec_embed_dim = dec_config.hidden_size\n enc_embed_dim = enc_config.hidden_size\n self.compute_dtype = compute_dtype\n\n # Norms\n self.pre_sa_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_ca_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n self.pre_mlp_norm = RMSNorm(\n dec_embed_dim,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n # Self-Attention (GQA) with Causal Masking\n self.self_attention = SelfAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=dec_embed_dim,\n num_query_heads=dec_config.num_attention_heads,\n num_kv_heads=dec_config.num_key_value_heads,\n head_dim=dec_config.head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # Cross-Attention (MHA)\n self.cross_attention = CrossAttention(\n dec_config,\n q_embed_dim=dec_embed_dim,\n kv_embed_dim=enc_embed_dim, # Note kv_embed_dim\n num_query_heads=dec_config.cross_num_attention_heads,\n num_kv_heads=dec_config.cross_num_key_value_heads,\n head_dim=dec_config.cross_head_dim,\n compute_dtype=compute_dtype,\n out_embed_dim=dec_embed_dim,\n )\n # MLP\n self.mlp = MlpBlock(\n embed_dim=dec_embed_dim,\n intermediate_dim=dec_config.intermediate_size,\n compute_dtype=compute_dtype,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n state: DecoderInferenceState,\n self_attn_cache: KVCache | None = None,\n cross_attn_cache: KVCache | None = None,\n prefill: bool = False,\n current_idx: int = 0,\n ) -> torch.Tensor:\n residual = x\n x_norm = self.pre_sa_norm(x).to(self.compute_dtype)\n\n self_attn_mask = state.casual_attn_mask[None, None, current_idx]\n\n sa_out = self.self_attention(\n X=x_norm, # (2, 1, D)\n q_positions=state.dec_positions, # (2, 1)\n kv_positions=state.dec_positions, # (2, 1)\n attn_mask=self_attn_mask,\n cache=self_attn_cache,\n prefill=prefill,\n is_causal=prefill,\n current_idx=current_idx,\n )\n\n x = residual + sa_out\n\n residual = x\n x_norm = self.pre_ca_norm(x).to(self.compute_dtype)\n ca_out = self.cross_attention(\n Xq=x_norm,\n q_positions=state.dec_positions,\n kv_positions=state.enc_positions,\n attn_mask=state.cross_attn_mask,\n cache=cross_attn_cache,\n )\n x = residual + ca_out\n\n residual = x\n x_norm = self.pre_mlp_norm(x).to(self.compute_dtype)\n mlp_out = self.mlp(x_norm)\n x = residual + mlp_out\n\n return x\n\n\nclass Decoder(nn.Module):\n \"\"\"Transformer Decoder Stack using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n dec_config = config.decoder_config\n self.num_channels = dec_config.num_channels\n self.num_layers = dec_config.num_hidden_layers\n\n self.embeddings = nn.ModuleList(\n [\n nn.Embedding(dec_config.vocab_size, dec_config.hidden_size, dtype=compute_dtype)\n for _ in range(self.num_channels)\n ]\n )\n self.layers = nn.ModuleList(\n [DecoderLayer(config=config, compute_dtype=compute_dtype) for _ in range(self.num_layers)]\n )\n\n self.norm = RMSNorm(\n dec_config.hidden_size,\n eps=dec_config.norm_eps,\n dtype=torch.float32,\n )\n\n self.logits_dense = DenseGeneral(\n in_shapes=(dec_config.hidden_size,),\n out_features=(self.num_channels, dec_config.vocab_size),\n axis=(-1,),\n weight_dtype=compute_dtype,\n )\n\n def precompute_cross_attn_cache(\n self,\n enc_out: torch.Tensor, # (B, S, E)\n ) -> list[KVCache]:\n \"\"\"\n Computes the Key and Value tensors for cross-attention for each layer from the encoder output.\n \"\"\"\n per_layer_kv_cache: list[KVCache] = []\n\n for layer in self.layers:\n cross_attn_module = layer.cross_attention\n k_proj = cross_attn_module.k_proj(enc_out)\n v_proj = cross_attn_module.v_proj(enc_out)\n\n k = k_proj.transpose(1, 2)\n v = v_proj.transpose(1, 2)\n\n per_layer_kv_cache.append(KVCache.from_kv(k, v))\n\n return per_layer_kv_cache\n\n def decode_step(\n self,\n tgt_ids_Bx1xC: torch.Tensor, # [B, 1, C]\n state: DecoderInferenceState,\n current_idx: int,\n ) -> torch.Tensor:\n \"\"\"\n Performs a single decoding step, managing KV caches layer by layer.\n Returns:\n A tuple containing:\n - logits_Bx1xCV: The final output logits for the current step (B, 1, C*V), cast to float32.\n \"\"\"\n\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_Bx1xC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x, # (2, 1, D)\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n current_idx=current_idx,\n )\n\n x = self.norm(x)\n logits_Bx1xCxV = self.logits_dense(x)\n\n return logits_Bx1xCxV.to(torch.float32)\n\n def forward(self, tgt_ids_BxTxC: torch.Tensor, state: DecoderInferenceState) -> torch.Tensor:\n \"\"\"\n Forward pass for the Decoder stack, managing KV caches.\n Args:\n tgt_ids_BxTxC: Target token IDs (B, T, C).\n encoder_out: Output from the encoder (B, S, E).\n tgt_positions: Positions for target sequence (B, T).\n src_positions: Positions for source sequence (B, S).\n self_attn_mask: Mask for self-attention.\n cross_attn_mask: Mask for cross-attention.\n past_key_values: List containing the self-attention KV cache for each layer\n from the previous decoding step. `len(past_key_values)` should\n equal `num_layers`.\n precomputed_cross_attn_kv: A single tuple containing the pre-computed K/V cache\n derived from `encoder_out`. This is passed identically\n to all layers.\n Returns:\n A tuple containing:\n - logits: The final output logits (B, T, C * V), cast to float32.\n - present_key_values: A list containing the updated self-attention KV cache\n for each layer for the *current* decoding step.\n \"\"\"\n _, _, num_channels_in = tgt_ids_BxTxC.shape\n assert num_channels_in == self.num_channels, \"Input channels mismatch\"\n\n # Embeddings\n x = None\n for i in range(self.num_channels):\n channel_tokens = tgt_ids_BxTxC[..., i]\n channel_embed = self.embeddings[i](channel_tokens)\n x = channel_embed if x is None else x + channel_embed\n\n for i, layer in enumerate(self.layers):\n self_cache = state.self_attn_cache[i]\n cross_cache = state.cross_attn_cache[i]\n x = layer(\n x,\n state,\n self_attn_cache=self_cache,\n cross_attn_cache=cross_cache,\n prefill=True,\n )\n\n # Final Norm\n x = self.norm(x)\n logits_BxTxCxV = self.logits_dense(x)\n\n return logits_BxTxCxV.to(torch.float32)\n\n\nclass DiaModel(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/nari-labs/dia\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n coders={\n DiaConfig: (\n lambda x: x.model_dump(),\n lambda data: DiaConfig.model_validate(data),\n ),\n },\n):\n \"\"\"PyTorch Dia Model using DenseGeneral.\"\"\"\n\n def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):\n super().__init__()\n self.config = config\n self.encoder = Encoder(config, compute_dtype)\n self.decoder = Decoder(config, compute_dtype)\n"], ["/dia/dia/state.py", "from dataclasses import dataclass\nfrom typing import Optional\n\nimport torch\n\nfrom .config import DiaConfig\n\n\ndef create_attn_mask(\n q_padding_mask_1d: torch.Tensor,\n k_padding_mask_1d: torch.Tensor,\n device: torch.device,\n is_causal: bool = False,\n) -> torch.Tensor:\n \"\"\"\n Creates the attention mask (self or cross) mimicking JAX segment ID logic.\n \"\"\"\n # B1, Tq = q_padding_mask_1d.shape\n # B2, Tk = k_padding_mask_1d.shape\n\n p_mask_q = q_padding_mask_1d.unsqueeze(2) # Shape [B, Tq, 1]\n p_mask_k = k_padding_mask_1d.unsqueeze(1) # Shape [B, 1, Tk]\n\n # Condition A: Non-padding query attends to non-padding key\n non_pad_attends_non_pad = p_mask_q & p_mask_k # Shape [B, Tq, Tk]\n\n # Condition B: Padding query attends to padding key\n pad_attends_pad = (~p_mask_q) & (~p_mask_k) # Shape [B, Tq, Tk]\n\n # Combine: True if padding status is compatible (both non-pad OR both pad)\n mask = non_pad_attends_non_pad | pad_attends_pad # Shape [B, Tq, Tk]\n\n if is_causal:\n # assert Tq == Tk, \"Causal mask requires query and key sequence lengths to be equal\"\n causal_mask_2d = torch.tril(torch.ones_like(mask[0], dtype=torch.bool, device=device)) # Shape [B, Tq, Tk]\n causal_mask = mask & causal_mask_2d # Shape [B, Tq, Tk]\n return causal_mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n else:\n return mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]\n\n\n@dataclass\nclass EncoderInferenceState:\n \"\"\"Parameters specifically for encoder inference.\"\"\"\n\n max_seq_len: int\n device: torch.device\n positions: torch.Tensor\n padding_mask: torch.Tensor\n attn_mask: torch.Tensor\n\n @classmethod\n def new(cls, config: DiaConfig, cond_src: torch.Tensor) -> \"EncoderInferenceState\":\n \"\"\"Creates EtorchrInferenceParams from DiaConfig and a device.\"\"\"\n device = cond_src.device\n\n positions = torch.arange(\n config.encoder_config.max_position_embeddings, dtype=torch.float32, device=device\n ).unsqueeze(0)\n padding_mask = (cond_src.squeeze(1) != 0).to(device).repeat_interleave(2, dim=0)\n attn_mask = create_attn_mask(padding_mask, padding_mask, device, is_causal=False)\n\n return cls(\n max_seq_len=config.encoder_config.max_position_embeddings,\n device=device,\n positions=positions,\n padding_mask=padding_mask,\n attn_mask=attn_mask,\n )\n\n\nclass KVCache(torch.nn.Module):\n k: torch.Tensor\n v: torch.Tensor\n\n def __init__(\n self,\n batch_size: int,\n num_heads: int,\n max_len: int,\n head_dim: int,\n dtype: torch.dtype,\n device: torch.device,\n k: torch.Tensor | None = None,\n v: torch.Tensor | None = None,\n ):\n k = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if k is None else k\n v = torch.zeros((2 * batch_size, num_heads, max_len, head_dim), dtype=dtype, device=device) if v is None else v\n super().__init__()\n\n self.register_buffer(\"k\", k)\n self.register_buffer(\"v\", v)\n\n @classmethod\n def from_kv(cls, k: torch.Tensor, v: torch.Tensor) -> \"KVCache\":\n return cls(\n batch_size=k.shape[0] // 2,\n num_heads=k.shape[1],\n max_len=k.shape[2],\n head_dim=k.shape[3],\n dtype=k.dtype,\n device=k.device,\n k=k,\n v=v,\n )\n\n def update(self, k: torch.Tensor, v: torch.Tensor, current_idx: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n k_out, v_out = self.k, self.v\n k_out[:, :, current_idx, :] = k\n v_out[:, :, current_idx, :] = v\n return self.k, self.v\n\n def prefill(self, k: torch.Tensor, v: torch.Tensor):\n prefill_len = k.shape[2]\n self.k[:, :, :prefill_len, :] = k\n self.v[:, :, :prefill_len, :] = v\n\n\n@dataclass\nclass DecoderInferenceState:\n \"\"\"Parameters specifically for decoder inference.\"\"\"\n\n device: torch.device\n dtype: torch.dtype\n enc_out: torch.Tensor\n enc_positions: torch.Tensor\n dec_positions: torch.Tensor\n self_attn_cache: list[KVCache]\n cross_attn_cache: list[KVCache]\n casual_attn_mask: torch.Tensor\n cross_attn_mask: torch.Tensor\n\n @classmethod\n def new(\n cls,\n config: DiaConfig,\n enc_state: EncoderInferenceState,\n enc_out: torch.Tensor,\n dec_cross_attn_cache: list[KVCache],\n compute_dtype: torch.dtype,\n max_generation_length: Optional[int] = None,\n ) -> \"DecoderInferenceState\":\n \"\"\"Creates DecoderInferenceParams from DiaConfig and a device.\"\"\"\n device = enc_out.device\n max_audio_len = max_generation_length or config.decoder_config.max_position_embeddings\n batch_size = enc_out.shape[0] // 2\n\n dec_positions = torch.full((2 * batch_size, 1), fill_value=0, dtype=torch.int32, device=device)\n causal_mask = torch.tril(torch.ones(max_audio_len, max_audio_len, dtype=torch.bool, device=device))\n dec_mask = torch.ones((2 * batch_size, 1), dtype=torch.bool, device=device)\n cross_attn_mask = create_attn_mask(dec_mask, enc_state.padding_mask, device, is_causal=False)\n\n self_attn_cache = [\n KVCache(\n batch_size,\n config.decoder_config.num_key_value_heads,\n max_audio_len,\n config.decoder_config.head_dim,\n compute_dtype,\n device,\n )\n for _ in range(config.decoder_config.num_hidden_layers)\n ]\n\n return cls(\n device=device,\n dtype=compute_dtype,\n enc_out=enc_out,\n enc_positions=enc_state.positions,\n dec_positions=dec_positions,\n self_attn_cache=self_attn_cache,\n cross_attn_cache=dec_cross_attn_cache,\n casual_attn_mask=causal_mask,\n cross_attn_mask=cross_attn_mask,\n )\n\n def prepare_step(self, step_from: int, step_to: int | None = None) -> None:\n if step_to is None:\n step_to = step_from + 1\n self.dec_positions = torch.arange(step_from, step_to, dtype=torch.int32, device=self.device).unsqueeze(0)\n\n\n@dataclass\nclass DecoderOutput:\n generated_tokens: torch.Tensor\n prefill_steps: list[int]\n\n @classmethod\n def new(cls, batch_size: int, config: DiaConfig, device: torch.device) -> \"DecoderOutput\":\n max_audio_len = config.decoder_config.max_position_embeddings\n return cls(\n generated_tokens=torch.full(\n (batch_size, max_audio_len, config.decoder_config.num_channels),\n fill_value=-1,\n dtype=torch.int,\n device=device,\n ),\n prefill_steps=[],\n )\n\n def get_tokens_at(self, step_from: int, step_to: int | None = None) -> torch.Tensor:\n if step_to is None:\n step_to = step_from + 1\n return self.generated_tokens[:, step_from:step_to, :]\n\n def update_one(self, dec_out: torch.Tensor, step: int, apply_mask: bool = False):\n dec_out = dec_out.to(self.generated_tokens.dtype)\n if apply_mask:\n mask = self.generated_tokens[:, step, :] == -1\n self.generated_tokens[:, step, :] = torch.where(mask, dec_out, self.generated_tokens[:, step, :])\n else:\n self.generated_tokens[:, step, :] = dec_out\n\n def prefill(self, dec_out: torch.Tensor, prefill_steps: list[int]):\n length = dec_out.shape[1]\n self.generated_tokens[:, :length, :] = dec_out\n self.prefill_steps = prefill_steps\n"], ["/dia/app.py", "import argparse\nimport contextlib\nimport io\nimport random\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport gradio as gr\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\n# --- Global Setup ---\nparser = argparse.ArgumentParser(description=\"Gradio interface for Nari TTS\")\nparser.add_argument(\"--device\", type=str, default=None, help=\"Force device (e.g., 'cuda', 'mps', 'cpu')\")\nparser.add_argument(\"--share\", action=\"store_true\", help=\"Enable Gradio sharing\")\n\nargs = parser.parse_args()\n\n\n# Determine device\nif args.device:\n device = torch.device(args.device)\nelif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n# Simplified MPS check for broader compatibility\nelif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n # Basic check is usually sufficient, detailed check can be problematic\n device = torch.device(\"mps\")\nelse:\n device = torch.device(\"cpu\")\n\nprint(f\"Using device: {device}\")\n\n# Load Nari model and config\nprint(\"Loading Nari model...\")\ntry:\n dtype_map = {\n \"cpu\": \"float32\",\n \"mps\": \"float32\", # Apple M series – better with float32\n \"cuda\": \"float16\", # NVIDIA – better with float16\n }\n\n dtype = dtype_map.get(device.type, \"float16\")\n print(f\"Using device: {device}, attempting to load model with {dtype}\")\n model = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=dtype, device=device)\nexcept Exception as e:\n print(f\"Error loading Nari model: {e}\")\n raise\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef run_inference(\n text_input: str,\n audio_prompt_text_input: str,\n audio_prompt_input: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: Optional[int] = None,\n):\n \"\"\"\n Runs Nari inference using the globally loaded model and provided inputs.\n Uses temporary files for text and audio prompt compatibility with inference.generate.\n \"\"\"\n global model, device # Access global model, config, device\n console_output_buffer = io.StringIO()\n\n with contextlib.redirect_stdout(console_output_buffer):\n # Prepend transcript text if audio_prompt provided\n if audio_prompt_input and audio_prompt_text_input and not audio_prompt_text_input.isspace():\n text_input = audio_prompt_text_input + \"\\n\" + text_input\n text_input = text_input.strip()\n\n if audio_prompt_input and (not audio_prompt_text_input or audio_prompt_text_input.isspace()):\n raise gr.Error(\"Audio Prompt Text input cannot be empty.\")\n\n if not text_input or text_input.isspace():\n raise gr.Error(\"Text input cannot be empty.\")\n\n # Preprocess Audio\n temp_txt_file_path = None\n temp_audio_prompt_path = None\n output_audio = (44100, np.zeros(1, dtype=np.float32))\n\n try:\n prompt_path_for_generate = None\n if audio_prompt_input is not None:\n sr, audio_data = audio_prompt_input\n # Check if audio_data is valid\n if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: # Check for silence/empty\n gr.Warning(\"Audio prompt seems empty or silent, ignoring prompt.\")\n else:\n # Save prompt audio to a temporary WAV file\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".wav\", delete=False) as f_audio:\n temp_audio_prompt_path = f_audio.name # Store path for cleanup\n\n # Basic audio preprocessing for consistency\n # Convert to float32 in [-1, 1] range if integer type\n if np.issubdtype(audio_data.dtype, np.integer):\n max_val = np.iinfo(audio_data.dtype).max\n audio_data = audio_data.astype(np.float32) / max_val\n elif not np.issubdtype(audio_data.dtype, np.floating):\n gr.Warning(f\"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.\")\n # Attempt conversion, might fail for complex types\n try:\n audio_data = audio_data.astype(np.float32)\n except Exception as conv_e:\n raise gr.Error(f\"Failed to convert audio prompt to float32: {conv_e}\")\n\n # Ensure mono (average channels if stereo)\n if audio_data.ndim > 1:\n if audio_data.shape[0] == 2: # Assume (2, N)\n audio_data = np.mean(audio_data, axis=0)\n elif audio_data.shape[1] == 2: # Assume (N, 2)\n audio_data = np.mean(audio_data, axis=1)\n else:\n gr.Warning(\n f\"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis.\"\n )\n audio_data = (\n audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]\n )\n audio_data = np.ascontiguousarray(audio_data) # Ensure contiguous after slicing/mean\n\n # Write using soundfile\n try:\n sf.write(\n temp_audio_prompt_path, audio_data, sr, subtype=\"FLOAT\"\n ) # Explicitly use FLOAT subtype\n prompt_path_for_generate = temp_audio_prompt_path\n print(f\"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})\")\n except Exception as write_e:\n print(f\"Error writing temporary audio file: {write_e}\")\n raise gr.Error(f\"Failed to save audio prompt: {write_e}\")\n\n # Set and Display Generation Seed\n if seed is None or seed < 0:\n seed = random.randint(0, 2**32 - 1)\n print(f\"\\nNo seed provided, generated random seed: {seed}\\n\")\n else:\n print(f\"\\nUsing user-selected seed: {seed}\\n\")\n set_seed(seed)\n\n # Run Generation\n print(f'Generating speech: \\n\"{text_input}\"\\n')\n\n start_time = time.time()\n\n # Use torch.inference_mode() context manager for the generation call\n with torch.inference_mode():\n output_audio_np = model.generate(\n text_input,\n max_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k, # Pass the value here\n use_torch_compile=False, # Keep False for Gradio stability\n audio_prompt=prompt_path_for_generate,\n verbose=True,\n )\n\n end_time = time.time()\n print(f\"Generation finished in {end_time - start_time:.2f} seconds.\\n\")\n\n # 4. Convert Codes to Audio\n if output_audio_np is not None:\n # Get sample rate from the loaded DAC model\n output_sr = 44100\n\n # --- Slow down audio ---\n original_len = len(output_audio_np)\n # Ensure speed_factor is positive and not excessively small/large to avoid issues\n speed_factor = max(0.1, min(speed_factor, 5.0))\n target_len = int(original_len / speed_factor) # Target length based on speed_factor\n if target_len != original_len and target_len > 0: # Only interpolate if length changes and is valid\n x_original = np.arange(original_len)\n x_resampled = np.linspace(0, original_len - 1, target_len)\n resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)\n output_audio = (\n output_sr,\n resampled_audio_np.astype(np.float32),\n ) # Use resampled audio\n print(\n f\"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.\"\n )\n else:\n output_audio = (\n output_sr,\n output_audio_np,\n ) # Keep original if calculation fails or no change\n print(f\"Skipping audio speed adjustment (factor: {speed_factor:.2f}).\")\n # --- End slowdown ---\n\n print(f\"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}\")\n\n # Explicitly convert to int16 to prevent Gradio warning\n if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:\n audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)\n audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)\n output_audio = (output_sr, audio_for_gradio)\n print(\"Converted audio to int16 for Gradio output.\")\n\n else:\n print(\"\\nGeneration finished, but no valid tokens were produced.\")\n # Return default silence\n gr.Warning(\"Generation produced no output.\")\n\n except Exception as e:\n print(f\"Error during inference: {e}\")\n import traceback\n\n traceback.print_exc()\n # Re-raise as Gradio error to display nicely in the UI\n raise gr.Error(f\"Inference failed: {e}\")\n\n finally:\n # Cleanup Temporary Files defensively\n if temp_txt_file_path and Path(temp_txt_file_path).exists():\n try:\n Path(temp_txt_file_path).unlink()\n print(f\"Deleted temporary text file: {temp_txt_file_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}\")\n if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():\n try:\n Path(temp_audio_prompt_path).unlink()\n print(f\"Deleted temporary audio prompt file: {temp_audio_prompt_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}\")\n\n # After generation, capture the printed output\n console_output = console_output_buffer.getvalue()\n\n return output_audio, seed, console_output\n\n\n# --- Create Gradio Interface ---\ncss = \"\"\"\n#col-container {max-width: 90%; margin-left: auto; margin-right: auto;}\n\"\"\"\n# Attempt to load default text from example.txt\ndefault_text = \"[S1] Dia is an open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] Wow. Amazing. (laughs) \\n[S2] Try it now on Git hub or Hugging Face.\"\nexample_txt_path = Path(\"./example.txt\")\nif example_txt_path.exists():\n try:\n default_text = example_txt_path.read_text(encoding=\"utf-8\").strip()\n if not default_text: # Handle empty example file\n default_text = \"Example text file was empty.\"\n except Exception as e:\n print(f\"Warning: Could not read example.txt: {e}\")\n\n\n# Build Gradio UI\nwith gr.Blocks(css=css, theme=\"gradio/dark\") as demo:\n gr.Markdown(\"# Nari Text-to-Speech Synthesis\")\n\n with gr.Row(equal_height=False):\n with gr.Column(scale=1):\n with gr.Accordion(\"Audio Reference Prompt (Optional)\", open=False):\n audio_prompt_input = gr.Audio(\n label=\"Audio Prompt (Optional)\",\n show_label=True,\n sources=[\"upload\", \"microphone\"],\n type=\"numpy\",\n )\n audio_prompt_text_input = gr.Textbox(\n label=\"Transcript of Audio Prompt (Required if using Audio Prompt)\",\n placeholder=\"Enter text here...\",\n value=\"\",\n lines=5, # Increased lines\n )\n text_input = gr.Textbox(\n label=\"Text To Generate\",\n placeholder=\"Enter text here...\",\n value=default_text,\n lines=5, # Increased lines\n )\n with gr.Accordion(\"Generation Parameters\", open=False):\n max_new_tokens = gr.Slider(\n label=\"Max New Tokens (Audio Length)\",\n minimum=860,\n maximum=3072,\n value=model.config.decoder_config.max_position_embeddings, # Use config default if available, else fallback\n step=50,\n info=\"Controls the maximum length of the generated audio (more tokens = longer audio).\",\n )\n cfg_scale = gr.Slider(\n label=\"CFG Scale (Guidance Strength)\",\n minimum=1.0,\n maximum=5.0,\n value=3.0, # Default from inference.py\n step=0.1,\n info=\"Higher values increase adherence to the text prompt.\",\n )\n temperature = gr.Slider(\n label=\"Temperature (Randomness)\",\n minimum=1.0,\n maximum=2.5,\n value=1.8, # Default from inference.py\n step=0.05,\n info=\"Lower values make the output more deterministic, higher values increase randomness.\",\n )\n top_p = gr.Slider(\n label=\"Top P (Nucleus Sampling)\",\n minimum=0.70,\n maximum=1.0,\n value=0.95, # Default from inference.py\n step=0.01,\n info=\"Filters vocabulary to the most likely tokens cumulatively reaching probability P.\",\n )\n cfg_filter_top_k = gr.Slider(\n label=\"CFG Filter Top K\",\n minimum=15,\n maximum=100,\n value=45,\n step=1,\n info=\"Top k filter for CFG guidance.\",\n )\n speed_factor_slider = gr.Slider(\n label=\"Speed Factor\",\n minimum=0.8,\n maximum=1.0,\n value=1.0,\n step=0.02,\n info=\"Adjusts the speed of the generated audio (1.0 = original speed).\",\n )\n seed_input = gr.Number(\n label=\"Generation Seed (Optional)\",\n value=-1,\n precision=0, # No decimal points\n step=1,\n interactive=True,\n info=\"Set a generation seed for reproducible outputs. Leave empty or -1 for random seed.\",\n )\n\n run_button = gr.Button(\"Generate Audio\", variant=\"primary\")\n\n with gr.Column(scale=1):\n audio_output = gr.Audio(\n label=\"Generated Audio\",\n type=\"numpy\",\n autoplay=False,\n )\n seed_output = gr.Textbox(label=\"Generation Seed\", interactive=False)\n console_output = gr.Textbox(label=\"Console Output Log\", lines=10, interactive=False)\n\n # Link button click to function\n run_button.click(\n fn=run_inference,\n inputs=[\n text_input,\n audio_prompt_text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[\n audio_output,\n seed_output,\n console_output,\n ], # Add status_output here if using it\n api_name=\"generate_audio\",\n )\n\n # Add examples (ensure the prompt path is correct or remove it if example file doesn't exist)\n example_prompt_path = \"./example_prompt.mp3\" # Adjust if needed\n examples_list = [\n [\n \"[S1] Oh fire! Oh my goodness! What's the procedure? What to we do people? The smoke could be coming through an air duct! \\n[S2] Oh my god! Okay.. it's happening. Everybody stay calm! \\n[S1] What's the procedure... \\n[S2] Everybody stay fucking calm!!!... Everybody fucking calm down!!!!! \\n[S1] No! No! If you touch the handle, if its hot there might be a fire down the hallway! \",\n None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n [\n \"[S1] Open weights text to dialogue model. \\n[S2] You get full control over scripts and voices. \\n[S1] I'm biased, but I think we clearly won. \\n[S2] Hard to disagree. (laughs) \\n[S1] Thanks for listening to this demo. \\n[S2] Try it now on Git hub and Hugging Face. \\n[S1] If you liked our model, please give us a star and share to your friends. \\n[S2] This was Nari Labs.\",\n example_prompt_path if Path(example_prompt_path).exists() else None,\n 3072,\n 3.0,\n 1.8,\n 0.95,\n 45,\n 1.0,\n ],\n ]\n\n if examples_list:\n gr.Examples(\n examples=examples_list,\n inputs=[\n text_input,\n audio_prompt_input,\n max_new_tokens,\n cfg_scale,\n temperature,\n top_p,\n cfg_filter_top_k,\n speed_factor_slider,\n seed_input,\n ],\n outputs=[audio_output],\n fn=run_inference,\n cache_examples=False,\n label=\"Examples (Click to Run)\",\n )\n else:\n gr.Markdown(\"_(No examples configured or example prompt file missing)_\")\n\n# --- Launch the App ---\nif __name__ == \"__main__\":\n print(\"Launching Gradio interface...\")\n\n # set `GRADIO_SERVER_NAME`, `GRADIO_SERVER_PORT` env vars to override default values\n # use `GRADIO_SERVER_NAME=0.0.0.0` for Docker\n demo.launch(share=args.share)\n"], ["/dia/dia/config.py", "\"\"\"Configuration management module for the Dia model.\n\nThis module provides comprehensive configuration management for the Dia model,\nutilizing Pydantic for validation. It defines configurations for data processing,\nmodel architecture (encoder and decoder), and training settings.\n\nKey components:\n- DataConfig: Parameters for data loading and preprocessing.\n- EncoderConfig: Architecture details for the encoder module.\n- DecoderConfig: Architecture details for the decoder module.\n- ModelConfig: Combined model architecture settings.\n- TrainingConfig: Training hyperparameters and settings.\n- DiaConfig: Master configuration combining all components.\n\"\"\"\n\nimport os\n\nfrom pydantic import BaseModel, Field\n\n\nclass EncoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the encoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_encoder\".\n hidden_size: Size of the encoder layers, defaults to 1024.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the encoder, defaults to 4096.\n num_hidden_layers: Number of hidden layers in the encoder, defaults to 12.\n num_attention_heads: Number of attention heads in the encoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the encoder, defaults to 16.\n head_dim: Dimension of each attention head, defaults to 128.\n hidden_act: Activation function in the encoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings, defaults to 1024.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n vocab_size: Vocabulary size, defaults to 256.\n \"\"\"\n\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=1024, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=4096, gt=0)\n max_position_embeddings: int = Field(default=1024, gt=0)\n model_type: str = Field(default=\"dia_encoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_hidden_layers: int = Field(default=12, gt=0)\n num_key_value_heads: int = Field(default=16, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=256, gt=0)\n\n\nclass DecoderConfig(BaseModel, frozen=True):\n \"\"\"Configuration for the decoder component of the Dia model.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia_decoder\".\n hidden_size: Size of the decoder layers, defaults to 2048.\n intermediate_size: Size of the \"intermediate\" (i.e., feed-forward) layer in the decoder, defaults to 8192.\n num_hidden_layers: Number of hidden layers in the decoder, defaults to 18.\n num_attention_heads: Number of attention heads in the decoder, defaults to 16.\n num_key_value_heads: Number of key-value heads in the decoder, defaults to 4.\n head_dim: Dimension of each attention head, defaults to 128.\n cross_hidden_size: Size of the cross-attention layers, defaults to 1024.\n cross_num_attention_heads: Number of attention heads in the cross-attention mechanism, defaults to 16.\n cross_num_key_value_heads: Number of key-value heads in the cross-attention mechanism, defaults to 16.\n cross_head_dim: Dimension of each cross-attention head, defaults to 128.\n hidden_act: Activation function in the decoder, defaults to \"silu\".\n max_position_embeddings: Maximum number of position embeddings in the decoder, defaults to 3072.\n initializer_range: Range for initializing weights in the decoder, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers in the decoder, defaults to 1e-5.\n rope_theta: Theta value for RoPE in the decoder, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE in the decoder.\n vocab_size: Vocabulary size for the decoder, defaults to 1028.\n num_channels: Number of channels in the decoder, defaults to 9.\n \"\"\"\n\n cross_head_dim: int = Field(default=128, gt=0)\n cross_hidden_size: int = Field(default=1024, gt=0)\n cross_num_attention_heads: int = Field(default=16, gt=0)\n cross_num_key_value_heads: int = Field(default=16, gt=0)\n head_dim: int = Field(default=128, gt=0)\n hidden_act: str = Field(default=\"silu\")\n hidden_size: int = Field(default=2048, gt=0)\n initializer_range: float = Field(default=0.02)\n intermediate_size: int = Field(default=8192, gt=0)\n max_position_embeddings: int = Field(default=3072, gt=0)\n model_type: str = Field(default=\"dia_decoder\")\n norm_eps: float = Field(default=1e-5)\n num_attention_heads: int = Field(default=16, gt=0)\n num_channels: int = Field(default=9, gt=0)\n num_hidden_layers: int = Field(default=18, gt=0)\n num_key_value_heads: int = Field(default=4, gt=0)\n rope_scaling: float | None = Field(default=None)\n rope_theta: float = Field(default=10000.0)\n vocab_size: int = Field(default=1028, gt=0)\n\n\nclass DiaConfig(BaseModel, frozen=True):\n \"\"\"Main configuration container for the Dia model architecture.\n\n Attributes:\n model_type: Type of the model, defaults to \"dia\".\n is_encoder_decoder: Flag indicating if the model is an encoder-decoder type, defaults to True.\n encoder: Configuration for the encoder component.\n decoder: Configuration for the decoder component.\n src_vocab_size: Size of the source (text) vocabulary.\n tgt_vocab_size: Size of the target (audio code) vocabulary.\n initializer_range: Range for initializing weights, defaults to 0.02.\n norm_eps: Epsilon value for normalization layers, defaults to 1e-5.\n torch_dtype: Data type for model weights in PyTorch, defaults to \"float32\".\n bos_token_id: Beginning-of-sequence token ID, defaults to 1026.\n eos_token_id: End-of-sequence token ID, defaults to 1024.\n pad_token_id: Padding token ID, defaults to 1025.\n rope_theta: Theta value for RoPE, defaults to 10000.0.\n rope_scaling: Optional scaling factor for RoPE.\n transformers_version: Version of the transformers library, defaults to \"4.53.0.dev0\".\n architectures: List of model architectures, defaults to [\"DiaForConditionalGeneration\"].\n delay_pattern: List of delay values for each audio channel, defaults to [0,8,9,10,11,12,13,14,15].\n \"\"\"\n\n architectures: list[str] = Field(default_factory=lambda: [\"DiaForConditionalGeneration\"])\n bos_token_id: int = Field(default=1026)\n decoder_config: DecoderConfig\n delay_pattern: list[int] = Field(default_factory=lambda: [0, 8, 9, 10, 11, 12, 13, 14, 15])\n encoder_config: EncoderConfig\n eos_token_id: int = Field(default=1024)\n initializer_range: float = Field(default=0.02)\n is_encoder_decoder: bool = Field(default=True)\n model_type: str = Field(default=\"dia\")\n norm_eps: float = Field(default=1e-5)\n pad_token_id: int = Field(default=1025)\n torch_dtype: str = Field(default=\"float32\")\n transformers_version: str = Field(default=\"4.53.0.dev0\")\n\n def save(self, path: str) -> None:\n \"\"\"Save the current configuration instance to a JSON file.\n\n Ensures the parent directory exists and the file has a .json extension.\n\n Args:\n path: The target file path to save the configuration.\n\n Raises:\n ValueError: If the path is not a file with a .json extension.\n \"\"\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n config_json = self.model_dump_json(indent=2)\n with open(path, \"w\") as f:\n f.write(config_json)\n\n @classmethod\n def load(cls, path: str) -> \"DiaConfig | None\":\n \"\"\"Load and validate a Dia configuration from a JSON file.\n\n Args:\n path: The path to the configuration file.\n\n Returns:\n A validated DiaConfig instance if the file exists and is valid,\n otherwise None if the file is not found.\n\n Raises:\n ValueError: If the path does not point to an existing .json file.\n pydantic.ValidationError: If the JSON content fails validation against the DiaConfig schema.\n \"\"\"\n try:\n with open(path, \"r\") as f:\n content = f.read()\n return cls.model_validate_json(content)\n except FileNotFoundError:\n return None\n"], ["/dia/cli.py", "import argparse\nimport os\nimport random\n\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom dia.model import Dia\n\n\ndef set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # Ensure deterministic behavior for cuDNN (if used)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Generate audio using the Dia model.\")\n\n parser.add_argument(\"text\", type=str, help=\"Input text for speech generation.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"Path to save the generated audio file (e.g., output.wav).\"\n )\n\n parser.add_argument(\n \"--repo-id\",\n type=str,\n default=\"nari-labs/Dia-1.6B-0626\",\n help=\"Hugging Face repository ID (e.g., nari-labs/Dia-1.6B-0626).\",\n )\n parser.add_argument(\n \"--local-paths\", action=\"store_true\", help=\"Load model from local config and checkpoint files.\"\n )\n\n parser.add_argument(\n \"--config\", type=str, help=\"Path to local config.json file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--checkpoint\", type=str, help=\"Path to local model checkpoint .pth file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--audio-prompt\", type=str, default=None, help=\"Path to an optional audio prompt WAV file for voice cloning.\"\n )\n\n gen_group = parser.add_argument_group(\"Generation Parameters\")\n gen_group.add_argument(\n \"--max-tokens\",\n type=int,\n default=None,\n help=\"Maximum number of audio tokens to generate (defaults to config value).\",\n )\n gen_group.add_argument(\n \"--cfg-scale\", type=float, default=3.0, help=\"Classifier-Free Guidance scale (default: 3.0).\"\n )\n gen_group.add_argument(\n \"--temperature\", type=float, default=1.3, help=\"Sampling temperature (higher is more random, default: 0.7).\"\n )\n gen_group.add_argument(\"--top-p\", type=float, default=0.95, help=\"Nucleus sampling probability (default: 0.95).\")\n\n infra_group = parser.add_argument_group(\"Infrastructure\")\n infra_group.add_argument(\"--seed\", type=int, default=None, help=\"Random seed for reproducibility.\")\n infra_group.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to run inference on (e.g., 'cuda', 'cpu', default: auto).\",\n )\n\n args = parser.parse_args()\n\n # Validation for local paths\n if args.local_paths:\n if not args.config:\n parser.error(\"--config is required when --local-paths is set.\")\n if not args.checkpoint:\n parser.error(\"--checkpoint is required when --local-paths is set.\")\n if not os.path.exists(args.config):\n parser.error(f\"Config file not found: {args.config}\")\n if not os.path.exists(args.checkpoint):\n parser.error(f\"Checkpoint file not found: {args.checkpoint}\")\n\n # Set seed if provided\n if args.seed is not None:\n set_seed(args.seed)\n print(f\"Using user-selected seed: {args.seed}\")\n\n # Determine device\n device = torch.device(args.device)\n print(f\"Using device: {device}\")\n\n # Load model\n print(\"Loading model...\")\n if args.local_paths:\n print(f\"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'\")\n try:\n model = Dia.from_local(args.config, args.checkpoint, device=device)\n except Exception as e:\n print(f\"Error loading local model: {e}\")\n exit(1)\n else:\n print(f\"Loading from Hugging Face Hub: repo_id='{args.repo_id}'\")\n try:\n model = Dia.from_pretrained(args.repo_id, device=device)\n except Exception as e:\n print(f\"Error loading model from Hub: {e}\")\n exit(1)\n print(\"Model loaded.\")\n\n # Generate audio\n print(\"Generating audio...\")\n try:\n sample_rate = 44100 # Default assumption\n\n output_audio = model.generate(\n text=args.text,\n audio_prompt=args.audio_prompt,\n max_tokens=args.max_tokens,\n cfg_scale=args.cfg_scale,\n temperature=args.temperature,\n top_p=args.top_p,\n )\n print(\"Audio generation complete.\")\n\n print(f\"Saving audio to {args.output}...\")\n os.makedirs(os.path.dirname(args.output) or \".\", exist_ok=True)\n\n sf.write(args.output, output_audio, sample_rate)\n print(f\"Audio successfully saved to {args.output}\")\n\n except Exception as e:\n print(f\"Error during audio generation or saving: {e}\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n"], ["/dia/example/benchmark.py", "from random import choice\n\nimport torch\n\nfrom dia.model import Dia\n\n\ntorch._inductor.config.coordinate_descent_tuning = True\ntorch._inductor.config.triton.unique_kernel_names = True\ntorch._inductor.config.fx_graph_cache = True\n\n# debugging\ntorch._logging.set_logs(graph_breaks=True, recompiles=True)\n\nmodel_name = \"nari-labs/Dia-1.6B-0626\"\ncompute_dtype = \"float16\"\n\nmodel = Dia.from_pretrained(model_name, compute_dtype=compute_dtype)\n\n\ntest_cases = [\n \"[S1] Dia is an open weights text to dialogue model.\",\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n \"[S1] torch.compile is a new feature in PyTorch that allows you to compile your model with a single line of code. [S2] It is a new feature in PyTorch that allows you to compile your model with a single line of code.\",\n]\n\n\n# Wram up\nfor _ in range(2):\n text = choice(test_cases)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n\n# Benchmark\nfor _ in range(10):\n text = choice(test_cases)\n output = model.generate(text, use_torch_compile=True, verbose=True)\n output = model.generate(text, audio_prompt=\"./example_prompt.mp3\", use_torch_compile=True, verbose=True)\n"], ["/dia/example/simple-cpu.py", "import torch\n\nfrom dia.model import Dia\n\n\n# Select device: CPU\ndevice = torch.device(\"cpu\")\nprint(f\"Using device: {device}\")\n\n# Load model\nmodel = Dia.from_pretrained(\n \"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float32\", device=device\n) # Float32 works better than float16 on CPU - you can also test with float16\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/voice_clone.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\nclone_from_audio = \"simple.mp3\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Hello, how are you? [S2] I'm good, thank you. [S1] What's your name? [S2] My name is Dia. [S1] Nice to meet you. [S2] Nice to meet you too.\"\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(\n clone_from_text + text_to_generate,\n audio_prompt=clone_from_audio,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=4.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"voice_clone.mp3\", output)\n"], ["/dia/example/voice_clone_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\n# You should put the transcript of the voice you want to clone\n# We will use the audio created by running simple.py as an example.\n# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.\nclone_from_text = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# For your custom needs, replace above with below and add your audio file to this directory:\n# clone_from_text = \"[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3\"\n# clone_from_audio = \"your_audio_name.mp3\"\n\n# Text to generate\ntext_to_generate = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\nclone_from_audios = [f\"simple_{i}.mp3\" for i in range(10)]\n\ntexts = [clone_from_text + text_to_generate for _ in range(10)]\n\n# It will only return the audio from the text_to_generate\noutput = model.generate(texts, audio_prompt=clone_from_audios, use_torch_compile=True, verbose=True, max_tokens=2000)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"voice_clone_{i}.mp3\", o)\n"], ["/dia/hf.py", "from transformers import AutoProcessor, DiaForConditionalGeneration\n\n\ntorch_device = \"cuda\"\nmodel_checkpoint = \"nari-labs/Dia-1.6B-0626\"\n\ntext = [\n \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n]\nprocessor = AutoProcessor.from_pretrained(model_checkpoint)\ninputs = processor(text=text, padding=True, return_tensors=\"pt\").to(torch_device)\n\nmodel = DiaForConditionalGeneration.from_pretrained(model_checkpoint).to(torch_device)\noutputs = model.generate(**inputs, max_new_tokens=3072, guidance_scale=3.0, temperature=1.8, top_p=0.90, top_k=45)\n\noutputs = processor.batch_decode(outputs)\nprocessor.save_audio(outputs, \"example.mp3\")\n"], ["/dia/example/simple_batch.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\ntexts = [text for _ in range(10)]\n\noutput = model.generate(texts, use_torch_compile=True, verbose=True, max_tokens=1500)\n\nfor i, o in enumerate(output):\n model.save_audio(f\"simple_{i}.mp3\", o)\n"], ["/dia/example/simple-mac.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\n# It is important to set the `use_torch_compile` argument to `False` when using Dia on MacOS.\n# This is because the `torch.compile` function is not supported on MacOS.\noutput = model.generate(text, use_torch_compile=False, verbose=True)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/example/simple.py", "from dia.model import Dia\n\n\nmodel = Dia.from_pretrained(\"nari-labs/Dia-1.6B-0626\", compute_dtype=\"float16\")\n\ntext = \"[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face.\"\n\noutput = model.generate(\n text,\n use_torch_compile=False,\n verbose=True,\n cfg_scale=3.0,\n temperature=1.8,\n top_p=0.90,\n cfg_filter_top_k=50,\n)\n\nmodel.save_audio(\"simple.mp3\", output)\n"], ["/dia/dia/__init__.py", "from .model import Dia\n\n\n__all__ = [\n \"Dia\",\n]\n"]], "task_instance_info": {"created_time": "2025-08-20 15:27:36", "created_task_model": "DeepSeek-R1", "function_skeleton": "def build_delay_indices(B: \n pass", "code_description": "This code segment functions to: Generate time-offset indices for each element in a multi-dimensional tensor based on a specified delay pattern and assemble multi-dimensional coordinate mappings for efficient data retrieval operations."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\ndef build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n device = None # Device will be determined by input tensors during actual use\n \n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)\n \n # Create base time indices for output positions, shape [B, T, 1]\n t_idx_BT1 = torch.arange(T, device=device).unsqueeze(0).unsqueeze(-1).expand(B, T, 1)\n \n # Calculate original time indices (before clamping), shape [B, T, C]\n t_idx_original = t_idx_BT1 - delay_arr.view(1, 1, C)\n \n # Clamp indices for valid gather operations\n clamped_t_idx = torch.clamp(t_idx_original, min=0, max=T-1)\n \n # Create batch and channel indices\n b_idx = torch.arange(B, device=device).view(B, 1, 1).expand(B, T, C)\n c_idx = torch.arange(C, device=device).view(1, 1, C).expand(B, T, C)\n \n # Prepare flat indices tensor for gather_nd operation\n indices_BTCx3 = torch.stack([\n b_idx.reshape(-1),\n clamped_t_idx.reshape(-1),\n c_idx.reshape(-1)\n ], dim=1).long()\n\n return t_idx_original, indices_BTCx3\n[TASK_END]\n```", "inference_time": "2025-08-20 15-29-46"}, "editdistance_info": {"edit_distance": 45.1299, "calculate_time": "2025-08-20 15:29:46", "true_code_clean": "def build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n t_idx_BxT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T],\n )\n t_idx_BxTx1 = t_idx_BxT[..., None]\n t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C],\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C],\n )\n t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_clamped_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n dim=1,\n ).long() \n return t_idx_BxTxC, indices_BTCx3", "predict_code_clean": "def build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n device = None \n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)\n t_idx_BT1 = torch.arange(T, device=device).unsqueeze(0).unsqueeze(-1).expand(B, T, 1)\n t_idx_original = t_idx_BT1 - delay_arr.view(1, 1, C)\n clamped_t_idx = torch.clamp(t_idx_original, min=0, max=T-1)\n b_idx = torch.arange(B, device=device).view(B, 1, 1).expand(B, T, C)\n c_idx = torch.arange(C, device=device).view(1, 1, C).expand(B, T, C)\n indices_BTCx3 = torch.stack([\n b_idx.reshape(-1),\n clamped_t_idx.reshape(-1),\n c_idx.reshape(-1)\n ], dim=1).long()\n return t_idx_original, indices_BTCx3"}}