File size: 123,519 Bytes
e683cf2 |
1 2 3 4 5 6 |
{"repo_name": "csm", "file_name": "/csm/generator.py", "inference_info": {"prefix_code": "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\nclass Generator:\n ", "suffix_code": "\n\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n\n frame_tokens = []\n frame_masks = []\n\n # (K, T)\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n # add EOS frame\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns:\n (seq_len, 33), (seq_len, 33)\n \"\"\"\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break # eos\n\n samples.append(sample)\n\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n\n # This applies an imperceptible watermark to identify audio as AI-generated.\n # Watermarking ensures transparency, dissuades misuse, and enables traceability.\n # Please be a responsible AI citizen and keep the watermarking in place.\n # If using CSM 1B in another application, use your own private key and keep it secret.\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n\n return audio\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator", "middle_code": "def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n self._text_tokenizer = load_llama3_tokenizer()\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n self._watermarker = load_watermarker(device=device)\n self.sample_rate = mimi.sample_rate\n self.device = device", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/models.py", "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\nclass Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n \"\"\"Setup KV caches and return a causal mask.\"\"\"\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n tokens: (batch_size, seq_len, audio_num_codebooks+1)\n tokens_mask: (batch_size, seq_len, audio_num_codebooks+1)\n input_pos: (batch_size, seq_len) positions for each token\n mask: (batch_size, seq_len, max_seq_len\n\n Returns:\n (batch_size, audio_num_codebooks) sampled tokens\n \"\"\"\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n\n # Decoder caches must be reset every frame.\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n\n return curr_sample\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n\n return torch.cat([audio_embeds, text_embeds], dim=-2)\n"], ["/csm/run_csm.py", "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\ndef main():\n # Select the best available device, skipping MPS due to float64 limitations\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n\n # Load model\n generator = load_csm_1b(device)\n\n # Prepare prompts\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n\n # Generate conversation\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n\n # Generate each utterance\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n\n # Concatenate all generations\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")\n\nif __name__ == \"__main__\":\n main() "]], "task_instance_info": {"created_time": "2025-08-20 15:24:02", "created_task_model": "DeepSeek-R1", "code_description": "This code segment functions to: Initialize core components for multimodal processing by configuring language and audio tokenizers, preparing model runtime environments, and integrating data transformation utilities with resource protection mechanisms."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\n def __init__(self, model: Model):\n self._text_tokenizer = load_llama3_tokenizer()\n self._audio_tokenizer = model.audio_tokenizer\n self._model = model\n self._watermarker = load_watermarker()\n self.device = model.device\n self.sample_rate = 24000\n[TASK_END]\n```", "inference_time": "2025-08-20 15-24-23"}, "editdistance_info": {"edit_distance": 27.2059, "calculate_time": "2025-08-20 15:24:23", "true_code_clean": "def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n self._text_tokenizer = load_llama3_tokenizer()\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n self._watermarker = load_watermarker(device=device)\n self.sample_rate = mimi.sample_rate\n self.device = device", "predict_code_clean": "def __init__(self, model: Model):\n self._text_tokenizer = load_llama3_tokenizer()\n self._audio_tokenizer = model.audio_tokenizer\n self._model = model\n self._watermarker = load_watermarker()\n self.device = model.device\n self.sample_rate = 24000"}}
{"repo_name": "csm", "file_name": "/csm/watermarking.py", "inference_info": {"prefix_code": "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\n", "suffix_code": "\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n", "middle_code": "def verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n return is_watermarked and is_csm_watermarked", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/generator.py", "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\nclass Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n\n self._text_tokenizer = load_llama3_tokenizer()\n\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n\n self._watermarker = load_watermarker(device=device)\n\n self.sample_rate = mimi.sample_rate\n self.device = device\n\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n\n frame_tokens = []\n frame_masks = []\n\n # (K, T)\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n # add EOS frame\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns:\n (seq_len, 33), (seq_len, 33)\n \"\"\"\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break # eos\n\n samples.append(sample)\n\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n\n # This applies an imperceptible watermark to identify audio as AI-generated.\n # Watermarking ensures transparency, dissuades misuse, and enables traceability.\n # Please be a responsible AI citizen and keep the watermarking in place.\n # If using CSM 1B in another application, use your own private key and keep it secret.\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n\n return audio\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator"], ["/csm/run_csm.py", "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\ndef main():\n # Select the best available device, skipping MPS due to float64 limitations\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n\n # Load model\n generator = load_csm_1b(device)\n\n # Prepare prompts\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n\n # Generate conversation\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n\n # Generate each utterance\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n\n # Concatenate all generations\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")\n\nif __name__ == \"__main__\":\n main() "], ["/csm/models.py", "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\nclass Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n \"\"\"Setup KV caches and return a causal mask.\"\"\"\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n tokens: (batch_size, seq_len, audio_num_codebooks+1)\n tokens_mask: (batch_size, seq_len, audio_num_codebooks+1)\n input_pos: (batch_size, seq_len) positions for each token\n mask: (batch_size, seq_len, max_seq_len\n\n Returns:\n (batch_size, audio_num_codebooks) sampled tokens\n \"\"\"\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n\n # Decoder caches must be reset every frame.\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n\n return curr_sample\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n\n return torch.cat([audio_embeds, text_embeds], dim=-2)\n"]], "task_instance_info": {"created_time": "2025-08-20 15:24:02", "created_task_model": "DeepSeek-R1", "code_description": "This code segment functions to: Validate the presence of a specific digital watermark in audio data by analyzing embedded patterns and comparing them against a provided encryption key."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n message, _ = watermarker.decode_wav(audio_array_44khz, 44100)\n return message == watermark_key\n[TASK_END]\n```", "inference_time": "2025-08-20 15-24-26"}, "editdistance_info": {"edit_distance": 49.6855, "calculate_time": "2025-08-20 15:24:26", "true_code_clean": "def verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n return is_watermarked and is_csm_watermarked", "predict_code_clean": "@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n message, _ = watermarker.decode_wav(audio_array_44khz, 44100)\n return message == watermark_key"}}
{"repo_name": "csm", "file_name": "/csm/generator.py", "inference_info": {"prefix_code": "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\nclass Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n\n self._text_tokenizer = load_llama3_tokenizer()\n\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n\n self._watermarker = load_watermarker(device=device)\n\n self.sample_rate = mimi.sample_rate\n self.device = device\n\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n ", "suffix_code": "\n\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns:\n (seq_len, 33), (seq_len, 33)\n \"\"\"\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break # eos\n\n samples.append(sample)\n\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n\n # This applies an imperceptible watermark to identify audio as AI-generated.\n # Watermarking ensures transparency, dissuades misuse, and enables traceability.\n # Please be a responsible AI citizen and keep the watermarking in place.\n # If using CSM 1B in another application, use your own private key and keep it secret.\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n\n return audio\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator", "middle_code": "def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n frame_tokens = []\n frame_masks = []\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/models.py", "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\nclass Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n \"\"\"Setup KV caches and return a causal mask.\"\"\"\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n tokens: (batch_size, seq_len, audio_num_codebooks+1)\n tokens_mask: (batch_size, seq_len, audio_num_codebooks+1)\n input_pos: (batch_size, seq_len) positions for each token\n mask: (batch_size, seq_len, max_seq_len\n\n Returns:\n (batch_size, audio_num_codebooks) sampled tokens\n \"\"\"\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n\n # Decoder caches must be reset every frame.\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n\n return curr_sample\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n\n return torch.cat([audio_embeds, text_embeds], dim=-2)\n"], ["/csm/run_csm.py", "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\ndef main():\n # Select the best available device, skipping MPS due to float64 limitations\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n\n # Load model\n generator = load_csm_1b(device)\n\n # Prepare prompts\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n\n # Generate conversation\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n\n # Generate each utterance\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n\n # Concatenate all generations\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")\n\nif __name__ == \"__main__\":\n main() "]], "task_instance_info": {"created_time": "2025-08-20 15:24:02", "created_task_model": "DeepSeek-R1", "function_skeleton": "def _tokenize_audio(self, audio: \n pass", "code_description": "This code segment functions to: Process raw audio input by converting it into a structured token sequence with end markers and prepare frame-level masks for model compatibility during subsequent processing steps."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0)).squeeze(0) # (seq_len, 32)\n seq_len = audio_tokens.shape[0]\n \n audio_frame = torch.zeros(seq_len, 33, dtype=torch.long)\n audio_frame[:, :32] = audio_tokens\n \n audio_frame_mask = torch.zeros(seq_len, 33, dtype=torch.bool)\n audio_frame_mask[:, :32] = True\n \n return audio_frame.to(self.device), audio_frame_mask.to(self.device)\n[TASK_END]\n```", "inference_time": "2025-08-20 15-24-29"}, "editdistance_info": {"edit_distance": 33.79, "calculate_time": "2025-08-20 15:24:29", "true_code_clean": "def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n frame_tokens = []\n frame_masks = []\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)", "predict_code_clean": "def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0)).squeeze(0) \n seq_len = audio_tokens.shape[0]\n audio_frame = torch.zeros(seq_len, 33, dtype=torch.long)\n audio_frame[:, :32] = audio_tokens\n audio_frame_mask = torch.zeros(seq_len, 33, dtype=torch.bool)\n audio_frame_mask[:, :32] = True\n return audio_frame.to(self.device), audio_frame_mask.to(self.device)"}}
{"repo_name": "csm", "file_name": "/csm/run_csm.py", "inference_info": {"prefix_code": "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\n", "suffix_code": "\n\nif __name__ == \"__main__\":\n main() ", "middle_code": "def main():\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n generator = load_csm_1b(device)\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/generator.py", "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\nclass Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n\n self._text_tokenizer = load_llama3_tokenizer()\n\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n\n self._watermarker = load_watermarker(device=device)\n\n self.sample_rate = mimi.sample_rate\n self.device = device\n\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n\n frame_tokens = []\n frame_masks = []\n\n # (K, T)\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n # add EOS frame\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns:\n (seq_len, 33), (seq_len, 33)\n \"\"\"\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break # eos\n\n samples.append(sample)\n\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n\n # This applies an imperceptible watermark to identify audio as AI-generated.\n # Watermarking ensures transparency, dissuades misuse, and enables traceability.\n # Please be a responsible AI citizen and keep the watermarking in place.\n # If using CSM 1B in another application, use your own private key and keep it secret.\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n\n return audio\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator"], ["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/models.py", "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\nclass Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n \"\"\"Setup KV caches and return a causal mask.\"\"\"\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n tokens: (batch_size, seq_len, audio_num_codebooks+1)\n tokens_mask: (batch_size, seq_len, audio_num_codebooks+1)\n input_pos: (batch_size, seq_len) positions for each token\n mask: (batch_size, seq_len, max_seq_len\n\n Returns:\n (batch_size, audio_num_codebooks) sampled tokens\n \"\"\"\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n\n # Decoder caches must be reset every frame.\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n\n return curr_sample\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n\n return torch.cat([audio_embeds, text_embeds], dim=-2)\n"]], "task_instance_info": {"created_time": "2025-08-20 15:24:02", "created_task_model": "DeepSeek-R1", "function_skeleton": "def main():\n pass", "code_description": "This code segment functions to: Orchestrate the generation of synthetic conversational audio by sequentially processing dialogue turns between two speakers, utilizing contextual prompts and accumulating outputs into a cohesive audio file."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\ndef main():\n SAMPLE_RATE = 16000\n \n # Prepare both speaker prompts from predefined configurations\n prompt_a = prepare_prompt(\n text=SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n speaker=0,\n audio_path=SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n sample_rate=SAMPLE_RATE\n )\n prompt_b = prepare_prompt(\n text=SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n speaker=1,\n audio_path=SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n sample_rate=SAMPLE_RATE\n )\n \n # Load model and generate conversation response\n model = load_csm_1b()\n generated_audio = model.generate([prompt_a, prompt_b], max_len=600)\n \n # Save generated audio\n torchaudio.save(\"response.wav\", generated_audio.unsqueeze(0), sample_rate=SAMPLE_RATE)\n[TASK_END]\n```", "inference_time": "2025-08-20 15-24-31"}, "editdistance_info": {"edit_distance": 23.1156, "calculate_time": "2025-08-20 15:24:31", "true_code_clean": "def main():\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n generator = load_csm_1b(device)\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")", "predict_code_clean": "def main():\n SAMPLE_RATE = 16000\n prompt_a = prepare_prompt(\n text=SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n speaker=0,\n audio_path=SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n sample_rate=SAMPLE_RATE\n )\n prompt_b = prepare_prompt(\n text=SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n speaker=1,\n audio_path=SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n sample_rate=SAMPLE_RATE\n )\n model = load_csm_1b()\n generated_audio = model.generate([prompt_a, prompt_b], max_len=600)\n torchaudio.save(\"response.wav\", generated_audio.unsqueeze(0), sample_rate=SAMPLE_RATE)"}}
{"repo_name": "csm", "file_name": "/csm/run_csm.py", "inference_info": {"prefix_code": "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\n", "suffix_code": "\n\nif __name__ == \"__main__\":\n main() ", "middle_code": "def main():\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n generator = load_csm_1b(device)\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/generator.py", "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\nclass Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n\n self._text_tokenizer = load_llama3_tokenizer()\n\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n\n self._watermarker = load_watermarker(device=device)\n\n self.sample_rate = mimi.sample_rate\n self.device = device\n\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n\n frame_tokens = []\n frame_masks = []\n\n # (K, T)\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n # add EOS frame\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns:\n (seq_len, 33), (seq_len, 33)\n \"\"\"\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break # eos\n\n samples.append(sample)\n\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n\n # This applies an imperceptible watermark to identify audio as AI-generated.\n # Watermarking ensures transparency, dissuades misuse, and enables traceability.\n # Please be a responsible AI citizen and keep the watermarking in place.\n # If using CSM 1B in another application, use your own private key and keep it secret.\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n\n return audio\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator"], ["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/models.py", "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\nclass Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n \"\"\"Setup KV caches and return a causal mask.\"\"\"\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n tokens: (batch_size, seq_len, audio_num_codebooks+1)\n tokens_mask: (batch_size, seq_len, audio_num_codebooks+1)\n input_pos: (batch_size, seq_len) positions for each token\n mask: (batch_size, seq_len, max_seq_len\n\n Returns:\n (batch_size, audio_num_codebooks) sampled tokens\n \"\"\"\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n\n # Decoder caches must be reset every frame.\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n\n return curr_sample\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n\n return torch.cat([audio_embeds, text_embeds], dim=-2)\n"]], "task_instance_info": {"created_time": "2025-08-20 15:24:02", "created_task_model": "DeepSeek-R1", "function_skeleton": "def main():\n pass", "code_description": "This code segment functions to: Orchestrate the end-to-end process of speech synthesis for a multi-turn conversation by initializing computational resources, processing predefined speech samples, and iteratively synthesizing audio responses while maintaining contextual continuity between dialogue turns."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\ndef main():\n # Initialize the conversational model\n model = load_csm_1b()\n \n # Prepare speaker prompts using the predefined configurations\n sample_rate = model.config.sample_rate\n \n conv_a = prepare_prompt(\n text=SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n speaker=0,\n audio_path=SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n sample_rate=sample_rate\n )\n \n conv_b = prepare_prompt(\n text=SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n speaker=1,\n audio_path=SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n sample_rate=sample_rate\n )\n \n # Generate conversation continuation\n generated_audio = model.generate(\n text=\"So what do you think about the new features?\",\n speaker=0,\n segments=[conv_a, conv_b]\n )\n \n # Save generated audio\n torchaudio.save(\"generated_response.wav\", generated_audio.unsqueeze(0), sample_rate)\n[TASK_END]\n```", "inference_time": "2025-08-20 15-24-35"}, "editdistance_info": {"edit_distance": 22.6131, "calculate_time": "2025-08-20 15:24:35", "true_code_clean": "def main():\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n generator = load_csm_1b(device)\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")", "predict_code_clean": "def main():\n model = load_csm_1b()\n sample_rate = model.config.sample_rate\n conv_a = prepare_prompt(\n text=SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n speaker=0,\n audio_path=SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n sample_rate=sample_rate\n )\n conv_b = prepare_prompt(\n text=SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n speaker=1,\n audio_path=SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n sample_rate=sample_rate\n )\n generated_audio = model.generate(\n text=\"So what do you think about the new features?\",\n speaker=0,\n segments=[conv_a, conv_b]\n )\n torchaudio.save(\"generated_response.wav\", generated_audio.unsqueeze(0), sample_rate)"}}
|